Merge Android U (ab/10368041)

Bug: 291102124
Merged-In: I9b175092d433fc6d7b22b437a09d76d3d2e0ce14
Change-Id: If8a2897a99b111ba107f33c19537d40bcbdc802e
diff --git a/libs/attestation/Android.bp b/libs/attestation/Android.bp
index 2bf15d4..fddecc0 100644
--- a/libs/attestation/Android.bp
+++ b/libs/attestation/Android.bp
@@ -22,6 +22,7 @@
 
 cc_library_static {
     name: "libattestation",
+    host_supported: true,
     cflags: [
         "-Wall",
         "-Wextra",
diff --git a/libs/binder/ActivityManager.cpp b/libs/binder/ActivityManager.cpp
index e45a656..aca5009 100644
--- a/libs/binder/ActivityManager.cpp
+++ b/libs/binder/ActivityManager.cpp
@@ -75,6 +75,20 @@
     return DEAD_OBJECT;
 }
 
+status_t ActivityManager::registerUidObserverForUids(const sp<IUidObserver>& observer,
+                                                     const int32_t event, const int32_t cutpoint,
+                                                     const String16& callingPackage,
+                                                     const int32_t uids[], size_t nUids,
+                                                     /*out*/ sp<IBinder>& observerToken) {
+    sp<IActivityManager> service = getService();
+    if (service != nullptr) {
+        return service->registerUidObserverForUids(observer, event, cutpoint, callingPackage, uids,
+                                                   nUids, observerToken);
+    }
+    // ActivityManagerService appears dead. Return usual error code for dead service.
+    return DEAD_OBJECT;
+}
+
 status_t ActivityManager::unregisterUidObserver(const sp<IUidObserver>& observer)
 {
     sp<IActivityManager> service = getService();
@@ -85,6 +99,26 @@
     return DEAD_OBJECT;
 }
 
+status_t ActivityManager::addUidToObserver(const sp<IBinder>& observerToken,
+                                           const String16& callingPackage, int32_t uid) {
+    sp<IActivityManager> service = getService();
+    if (service != nullptr) {
+        return service->addUidToObserver(observerToken, callingPackage, uid);
+    }
+    // ActivityManagerService appears dead. Return usual error code for dead service.
+    return DEAD_OBJECT;
+}
+
+status_t ActivityManager::removeUidFromObserver(const sp<IBinder>& observerToken,
+                                                const String16& callingPackage, int32_t uid) {
+    sp<IActivityManager> service = getService();
+    if (service != nullptr) {
+        return service->removeUidFromObserver(observerToken, callingPackage, uid);
+    }
+    // ActivityManagerService appears dead. Return usual error code for dead service.
+    return DEAD_OBJECT;
+}
+
 bool ActivityManager::isUidActive(const uid_t uid, const String16& callingPackage)
 {
     sp<IActivityManager> service = getService();
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index f634c1d..6c2b313 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -342,10 +342,6 @@
     },
 
     afdo: true,
-
-    header_abi_checker: {
-        diff_flags: ["-allow-adding-removing-weak-symbols"],
-    },
 }
 
 cc_library_static {
@@ -486,9 +482,7 @@
     local_include_dir: "aidl",
     host_supported: true,
     srcs: [
-        "aidl/android/content/pm/IPackageChangeObserver.aidl",
         "aidl/android/content/pm/IPackageManagerNative.aidl",
-        "aidl/android/content/pm/PackageChangeEvent.aidl",
         "aidl/android/content/pm/IStagedApexObserver.aidl",
         "aidl/android/content/pm/ApexStagedEvent.aidl",
         "aidl/android/content/pm/StagedApexInfo.aidl",
diff --git a/libs/binder/IActivityManager.cpp b/libs/binder/IActivityManager.cpp
index 5ec4e8b..2897561 100644
--- a/libs/binder/IActivityManager.cpp
+++ b/libs/binder/IActivityManager.cpp
@@ -77,6 +77,30 @@
          return OK;
     }
 
+    virtual status_t registerUidObserverForUids(const sp<IUidObserver>& observer,
+                                                const int32_t event, const int32_t cutpoint,
+                                                const String16& callingPackage,
+                                                const int32_t uids[], size_t nUids,
+                                                /*out*/ sp<IBinder>& observerToken) {
+         Parcel data, reply;
+         data.writeInterfaceToken(IActivityManager::getInterfaceDescriptor());
+         data.writeStrongBinder(IInterface::asBinder(observer));
+         data.writeInt32(event);
+         data.writeInt32(cutpoint);
+         data.writeString16(callingPackage);
+         data.writeInt32Array(nUids, uids);
+         status_t err =
+                 remote()->transact(REGISTER_UID_OBSERVER_FOR_UIDS_TRANSACTION, data, &reply);
+         if (err != NO_ERROR || ((err = reply.readExceptionCode()) != NO_ERROR)) {
+             return err;
+         }
+         err = reply.readStrongBinder(&observerToken);
+         if (err != NO_ERROR || ((err = reply.readExceptionCode()) != NO_ERROR)) {
+             return err;
+         }
+         return OK;
+    }
+
     virtual status_t unregisterUidObserver(const sp<IUidObserver>& observer)
     {
          Parcel data, reply;
@@ -89,6 +113,34 @@
          return OK;
     }
 
+    virtual status_t addUidToObserver(const sp<IBinder>& observerToken,
+                                      const String16& callingPackage, int32_t uid) {
+         Parcel data, reply;
+         data.writeInterfaceToken(IActivityManager::getInterfaceDescriptor());
+         data.writeStrongBinder(observerToken);
+         data.writeString16(callingPackage);
+         data.writeInt32(uid);
+         status_t err = remote()->transact(ADD_UID_TO_OBSERVER_TRANSACTION, data, &reply);
+         if (err != NO_ERROR || ((err = reply.readExceptionCode()) != NO_ERROR)) {
+             return err;
+         }
+         return OK;
+    }
+
+    virtual status_t removeUidFromObserver(const sp<IBinder>& observerToken,
+                                           const String16& callingPackage, int32_t uid) {
+         Parcel data, reply;
+         data.writeInterfaceToken(IActivityManager::getInterfaceDescriptor());
+         data.writeStrongBinder(observerToken);
+         data.writeString16(callingPackage);
+         data.writeInt32(uid);
+         status_t err = remote()->transact(REMOVE_UID_FROM_OBSERVER_TRANSACTION, data, &reply);
+         if (err != NO_ERROR || ((err = reply.readExceptionCode()) != NO_ERROR)) {
+             return err;
+         }
+         return OK;
+    }
+
     virtual bool isUidActive(const uid_t uid, const String16& callingPackage)
     {
          Parcel data, reply;
@@ -131,6 +183,56 @@
         *outResult = reply.readInt32();
         return NO_ERROR;
     }
+
+    virtual status_t logFgsApiBegin(int32_t apiType, int32_t appUid, int32_t appPid) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IActivityManager::getInterfaceDescriptor());
+        data.writeInt32(apiType);
+        data.writeInt32(appUid);
+        data.writeInt32(appPid);
+        status_t err = remote()->transact(LOG_FGS_API_BEGIN_TRANSACTION, data, &reply,
+                                          IBinder::FLAG_ONEWAY);
+        if (err != NO_ERROR || ((err = reply.readExceptionCode()) != NO_ERROR)) {
+            ALOGD("FGS Logger Transaction failed");
+            ALOGD("%d", err);
+            return err;
+        }
+        return NO_ERROR;
+    }
+
+    virtual status_t logFgsApiEnd(int32_t apiType, int32_t appUid, int32_t appPid) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IActivityManager::getInterfaceDescriptor());
+        data.writeInt32(apiType);
+        data.writeInt32(appUid);
+        data.writeInt32(appPid);
+        status_t err =
+                remote()->transact(LOG_FGS_API_END_TRANSACTION, data, &reply, IBinder::FLAG_ONEWAY);
+        if (err != NO_ERROR || ((err = reply.readExceptionCode()) != NO_ERROR)) {
+            ALOGD("FGS Logger Transaction failed");
+            ALOGD("%d", err);
+            return err;
+        }
+        return NO_ERROR;
+    }
+
+    virtual status_t logFgsApiStateChanged(int32_t apiType, int32_t state, int32_t appUid,
+                                           int32_t appPid) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IActivityManager::getInterfaceDescriptor());
+        data.writeInt32(apiType);
+        data.writeInt32(state);
+        data.writeInt32(appUid);
+        data.writeInt32(appPid);
+        status_t err = remote()->transact(LOG_FGS_API_BEGIN_TRANSACTION, data, &reply,
+                                          IBinder::FLAG_ONEWAY);
+        if (err != NO_ERROR || ((err = reply.readExceptionCode()) != NO_ERROR)) {
+            ALOGD("FGS Logger Transaction failed");
+            ALOGD("%d", err);
+            return err;
+        }
+        return NO_ERROR;
+    }
 };
 
 // ------------------------------------------------------------------------------------
diff --git a/libs/binder/IBatteryStats.cpp b/libs/binder/IBatteryStats.cpp
index 0de804c..69b11c0 100644
--- a/libs/binder/IBatteryStats.cpp
+++ b/libs/binder/IBatteryStats.cpp
@@ -128,6 +128,15 @@
         remote()->transact(NOTE_RESET_FLASHLIGHT_TRANSACTION, data, &reply);
     }
 
+    virtual binder::Status noteWakeupSensorEvent(int64_t elapsedNanos, int uid, int handle) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IBatteryStats::getInterfaceDescriptor());
+        data.writeInt64(elapsedNanos);
+        data.writeInt32(uid);
+        data.writeInt32(handle);
+        status_t ret = remote()->transact(NOTE_WAKEUP_SENSOR_EVENT_TRANSACTION, data, &reply);
+        return binder::Status::fromStatusT(ret);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(BatteryStats, "com.android.internal.app.IBatteryStats")
@@ -235,6 +244,16 @@
             reply->writeNoException();
             return NO_ERROR;
         } break;
+        case NOTE_WAKEUP_SENSOR_EVENT_TRANSACTION: {
+            CHECK_INTERFACE(IBatteryStats, data, reply);
+            int64_t elapsedNanos = data.readInt64();
+            int uid = data.readInt32();
+            int handle = data.readInt32();
+            noteWakeupSensorEvent(elapsedNanos, uid, handle);
+            reply->writeNoException();
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/libs/binder/IUidObserver.cpp b/libs/binder/IUidObserver.cpp
index d952dc7..1c35f53 100644
--- a/libs/binder/IUidObserver.cpp
+++ b/libs/binder/IUidObserver.cpp
@@ -67,9 +67,10 @@
         remote()->transact(ON_UID_STATE_CHANGED_TRANSACTION, data, &reply, IBinder::FLAG_ONEWAY);
     }
 
-    virtual void onUidProcAdjChanged(uid_t uid) {
+    virtual void onUidProcAdjChanged(uid_t uid, int32_t adj) {
         Parcel data, reply;
         data.writeInt32((int32_t)uid);
+        data.writeInt32((int32_t)adj);
         remote()->transact(ON_UID_PROC_ADJ_CHANGED_TRANSACTION, data, &reply, IBinder::FLAG_ONEWAY);
     }
 };
@@ -121,7 +122,8 @@
         case ON_UID_PROC_ADJ_CHANGED_TRANSACTION: {
             CHECK_INTERFACE(IUidObserver, data, reply);
             uid_t uid = data.readInt32();
-            onUidProcAdjChanged(uid);
+            int32_t adj = data.readInt32();
+            onUidProcAdjChanged(uid, adj);
             return NO_ERROR;
         } break;
 
diff --git a/libs/binder/aidl/android/content/pm/IPackageManagerNative.aidl b/libs/binder/aidl/android/content/pm/IPackageManagerNative.aidl
index 7c99f76..f8a8843 100644
--- a/libs/binder/aidl/android/content/pm/IPackageManagerNative.aidl
+++ b/libs/binder/aidl/android/content/pm/IPackageManagerNative.aidl
@@ -17,7 +17,6 @@
 
 package android.content.pm;
 
-import android.content.pm.IPackageChangeObserver;
 import android.content.pm.IStagedApexObserver;
 import android.content.pm.StagedApexInfo;
 
@@ -92,18 +91,6 @@
      */
     @utf8InCpp String getModuleMetadataPackageName();
 
-    /* Returns the names of all packages. */
-    @utf8InCpp String[] getAllPackages();
-
-    /** Register an extra package change observer to receive the multi-cast. */
-    void registerPackageChangeObserver(in IPackageChangeObserver observer);
-
-    /**
-     * Unregister an existing package change observer.
-     * This does nothing if this observer was not already registered.
-     */
-    void unregisterPackageChangeObserver(in IPackageChangeObserver observer);
-
     /**
      * Returns true if the package has the SHA 256 version of the signing certificate.
      * @see PackageManager#hasSigningCertificate(String, byte[], int), where type
diff --git a/libs/binder/aidl/android/content/pm/PackageChangeEvent.aidl b/libs/binder/aidl/android/content/pm/PackageChangeEvent.aidl
deleted file mode 100644
index e30e907..0000000
--- a/libs/binder/aidl/android/content/pm/PackageChangeEvent.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.content.pm;
-
-/**
- * This event is designed for notification to native code listener about
- * any changes on a package including update, deletion and etc.
- *
- * @hide
- */
-parcelable PackageChangeEvent {
-  @utf8InCpp String packageName;
-  long version;
-  long lastUpdateTimeMillis;
-  boolean newInstalled;
-  boolean dataRemoved;
-  boolean isDeleted;
-}
diff --git a/libs/binder/include/binder/IInterface.h b/libs/binder/include/binder/IInterface.h
index 9f7e2c8..dc572ac 100644
--- a/libs/binder/include/binder/IInterface.h
+++ b/libs/binder/include/binder/IInterface.h
@@ -219,80 +219,79 @@
 
 namespace internal {
 constexpr const char* const kManualInterfaces[] = {
-  "android.app.IActivityManager",
-  "android.app.IUidObserver",
-  "android.drm.IDrm",
-  "android.dvr.IVsyncCallback",
-  "android.dvr.IVsyncService",
-  "android.gfx.tests.ICallback",
-  "android.gfx.tests.IIPCTest",
-  "android.gfx.tests.ISafeInterfaceTest",
-  "android.graphicsenv.IGpuService",
-  "android.gui.IConsumerListener",
-  "android.gui.IGraphicBufferConsumer",
-  "android.gui.ITransactionComposerListener",
-  "android.gui.SensorEventConnection",
-  "android.gui.SensorServer",
-  "android.hardware.ICamera",
-  "android.hardware.ICameraClient",
-  "android.hardware.ICameraRecordingProxy",
-  "android.hardware.ICameraRecordingProxyListener",
-  "android.hardware.ICrypto",
-  "android.hardware.IOMXObserver",
-  "android.hardware.IStreamListener",
-  "android.hardware.IStreamSource",
-  "android.media.IAudioService",
-  "android.media.IDataSource",
-  "android.media.IDrmClient",
-  "android.media.IMediaCodecList",
-  "android.media.IMediaDrmService",
-  "android.media.IMediaExtractor",
-  "android.media.IMediaExtractorService",
-  "android.media.IMediaHTTPConnection",
-  "android.media.IMediaHTTPService",
-  "android.media.IMediaLogService",
-  "android.media.IMediaMetadataRetriever",
-  "android.media.IMediaMetricsService",
-  "android.media.IMediaPlayer",
-  "android.media.IMediaPlayerClient",
-  "android.media.IMediaPlayerService",
-  "android.media.IMediaRecorder",
-  "android.media.IMediaRecorderClient",
-  "android.media.IMediaResourceMonitor",
-  "android.media.IMediaSource",
-  "android.media.IRemoteDisplay",
-  "android.media.IRemoteDisplayClient",
-  "android.media.IResourceManagerClient",
-  "android.media.IResourceManagerService",
-  "android.os.IComplexTypeInterface",
-  "android.os.IPermissionController",
-  "android.os.IPingResponder",
-  "android.os.IProcessInfoService",
-  "android.os.ISchedulingPolicyService",
-  "android.os.IStringConstants",
-  "android.os.storage.IObbActionListener",
-  "android.os.storage.IStorageEventListener",
-  "android.os.storage.IStorageManager",
-  "android.os.storage.IStorageShutdownObserver",
-  "android.service.vr.IPersistentVrStateCallbacks",
-  "android.service.vr.IVrManager",
-  "android.service.vr.IVrStateCallbacks",
-  "android.ui.ISurfaceComposer",
-  "android.ui.ISurfaceComposerClient",
-  "android.utils.IMemory",
-  "android.utils.IMemoryHeap",
-  "com.android.car.procfsinspector.IProcfsInspector",
-  "com.android.internal.app.IAppOpsCallback",
-  "com.android.internal.app.IAppOpsService",
-  "com.android.internal.app.IBatteryStats",
-  "com.android.internal.os.IResultReceiver",
-  "com.android.internal.os.IShellCallback",
-  "drm.IDrmManagerService",
-  "drm.IDrmServiceListener",
-  "IAAudioClient",
-  "IAAudioService",
-  "VtsFuzzer",
-  nullptr,
+        "android.app.IActivityManager",
+        "android.app.IUidObserver",
+        "android.drm.IDrm",
+        "android.dvr.IVsyncCallback",
+        "android.dvr.IVsyncService",
+        "android.gfx.tests.ICallback",
+        "android.gfx.tests.IIPCTest",
+        "android.gfx.tests.ISafeInterfaceTest",
+        "android.graphicsenv.IGpuService",
+        "android.gui.IConsumerListener",
+        "android.gui.IGraphicBufferConsumer",
+        "android.gui.ITransactionComposerListener",
+        "android.gui.SensorEventConnection",
+        "android.gui.SensorServer",
+        "android.hardware.ICamera",
+        "android.hardware.ICameraClient",
+        "android.hardware.ICameraRecordingProxy",
+        "android.hardware.ICameraRecordingProxyListener",
+        "android.hardware.ICrypto",
+        "android.hardware.IOMXObserver",
+        "android.hardware.IStreamListener",
+        "android.hardware.IStreamSource",
+        "android.media.IAudioService",
+        "android.media.IDataSource",
+        "android.media.IDrmClient",
+        "android.media.IMediaCodecList",
+        "android.media.IMediaDrmService",
+        "android.media.IMediaExtractor",
+        "android.media.IMediaExtractorService",
+        "android.media.IMediaHTTPConnection",
+        "android.media.IMediaHTTPService",
+        "android.media.IMediaLogService",
+        "android.media.IMediaMetadataRetriever",
+        "android.media.IMediaMetricsService",
+        "android.media.IMediaPlayer",
+        "android.media.IMediaPlayerClient",
+        "android.media.IMediaPlayerService",
+        "android.media.IMediaRecorder",
+        "android.media.IMediaRecorderClient",
+        "android.media.IMediaResourceMonitor",
+        "android.media.IMediaSource",
+        "android.media.IRemoteDisplay",
+        "android.media.IRemoteDisplayClient",
+        "android.media.IResourceManagerClient",
+        "android.media.IResourceManagerService",
+        "android.os.IComplexTypeInterface",
+        "android.os.IPermissionController",
+        "android.os.IPingResponder",
+        "android.os.IProcessInfoService",
+        "android.os.ISchedulingPolicyService",
+        "android.os.IStringConstants",
+        "android.os.storage.IObbActionListener",
+        "android.os.storage.IStorageEventListener",
+        "android.os.storage.IStorageManager",
+        "android.os.storage.IStorageShutdownObserver",
+        "android.service.vr.IPersistentVrStateCallbacks",
+        "android.service.vr.IVrManager",
+        "android.service.vr.IVrStateCallbacks",
+        "android.ui.ISurfaceComposer",
+        "android.utils.IMemory",
+        "android.utils.IMemoryHeap",
+        "com.android.car.procfsinspector.IProcfsInspector",
+        "com.android.internal.app.IAppOpsCallback",
+        "com.android.internal.app.IAppOpsService",
+        "com.android.internal.app.IBatteryStats",
+        "com.android.internal.os.IResultReceiver",
+        "com.android.internal.os.IShellCallback",
+        "drm.IDrmManagerService",
+        "drm.IDrmServiceListener",
+        "IAAudioClient",
+        "IAAudioService",
+        "VtsFuzzer",
+        nullptr,
 };
 
 constexpr const char* const kDownstreamManualInterfaces[] = {
diff --git a/libs/binder/include_activitymanager/binder/ActivityManager.h b/libs/binder/include_activitymanager/binder/ActivityManager.h
index 5dfbd44..9c634c7 100644
--- a/libs/binder/include_activitymanager/binder/ActivityManager.h
+++ b/libs/binder/include_activitymanager/binder/ActivityManager.h
@@ -82,7 +82,15 @@
                              const int32_t event,
                              const int32_t cutpoint,
                              const String16& callingPackage);
+    status_t registerUidObserverForUids(const sp<IUidObserver>& observer, const int32_t event,
+                                        const int32_t cutpoint, const String16& callingPackage,
+                                        const int32_t uids[], size_t nUids,
+                                        /*out*/ sp<IBinder>& observerToken);
     status_t unregisterUidObserver(const sp<IUidObserver>& observer);
+    status_t addUidToObserver(const sp<IBinder>& observerToken, const String16& callingPackage,
+                              int32_t uid);
+    status_t removeUidFromObserver(const sp<IBinder>& observerToken, const String16& callingPackage,
+                                   int32_t uid);
     bool isUidActive(const uid_t uid, const String16& callingPackage);
     int getUidProcessState(const uid_t uid, const String16& callingPackage);
     status_t checkPermission(const String16& permission, const pid_t pid, const uid_t uid, int32_t* outResult);
diff --git a/libs/binder/include_activitymanager/binder/IActivityManager.h b/libs/binder/include_activitymanager/binder/IActivityManager.h
index 4632b2e..07450c6 100644
--- a/libs/binder/include_activitymanager/binder/IActivityManager.h
+++ b/libs/binder/include_activitymanager/binder/IActivityManager.h
@@ -35,21 +35,40 @@
                                      const int32_t event,
                                      const int32_t cutpoint,
                                      const String16& callingPackage) = 0;
+    virtual status_t registerUidObserverForUids(const sp<IUidObserver>& observer,
+                                                const int32_t event, const int32_t cutpoint,
+                                                const String16& callingPackage,
+                                                const int32_t uids[], size_t nUids,
+                                                /*out*/ sp<IBinder>& observerToken) = 0;
     virtual status_t unregisterUidObserver(const sp<IUidObserver>& observer) = 0;
+    virtual status_t addUidToObserver(const sp<IBinder>& observerToken,
+                                      const String16& callingPackage, int32_t uid) = 0;
+    virtual status_t removeUidFromObserver(const sp<IBinder>& observerToken,
+                                           const String16& callingPackage, int32_t uid) = 0;
     virtual bool isUidActive(const uid_t uid, const String16& callingPackage) = 0;
     virtual int32_t getUidProcessState(const uid_t uid, const String16& callingPackage) = 0;
     virtual status_t checkPermission(const String16& permission,
                                     const pid_t pid,
                                     const uid_t uid,
                                     int32_t* outResult) = 0;
+    virtual status_t logFgsApiBegin(int32_t apiType, int32_t appUid, int32_t appPid) = 0;
+    virtual status_t logFgsApiEnd(int32_t apiType, int32_t appUid, int32_t appPid) = 0;
+    virtual status_t logFgsApiStateChanged(int32_t apiType, int32_t state, int32_t appUid,
+                                           int32_t appPid) = 0;
 
     enum {
         OPEN_CONTENT_URI_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
         REGISTER_UID_OBSERVER_TRANSACTION,
         UNREGISTER_UID_OBSERVER_TRANSACTION,
+        REGISTER_UID_OBSERVER_FOR_UIDS_TRANSACTION,
+        ADD_UID_TO_OBSERVER_TRANSACTION,
+        REMOVE_UID_FROM_OBSERVER_TRANSACTION,
         IS_UID_ACTIVE_TRANSACTION,
         GET_UID_PROCESS_STATE_TRANSACTION,
         CHECK_PERMISSION_TRANSACTION,
+        LOG_FGS_API_BEGIN_TRANSACTION,
+        LOG_FGS_API_END_TRANSACTION,
+        LOG_FGS_API_STATE_CHANGED_TRANSACTION
     };
 };
 
diff --git a/libs/binder/include_activitymanager/binder/IUidObserver.h b/libs/binder/include_activitymanager/binder/IUidObserver.h
index 17f03a9..5ea7447 100644
--- a/libs/binder/include_activitymanager/binder/IUidObserver.h
+++ b/libs/binder/include_activitymanager/binder/IUidObserver.h
@@ -34,7 +34,7 @@
     virtual void onUidIdle(uid_t uid, bool disabled) = 0;
     virtual void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq,
                                    int32_t capability) = 0;
-    virtual void onUidProcAdjChanged(uid_t uid) = 0;
+    virtual void onUidProcAdjChanged(uid_t uid, int32_t adj) = 0;
 
     enum {
         ON_UID_GONE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
diff --git a/libs/binder/include_batterystats/batterystats/IBatteryStats.h b/libs/binder/include_batterystats/batterystats/IBatteryStats.h
index 6defc7f..5bb01dd 100644
--- a/libs/binder/include_batterystats/batterystats/IBatteryStats.h
+++ b/libs/binder/include_batterystats/batterystats/IBatteryStats.h
@@ -19,6 +19,7 @@
 #ifndef __ANDROID_VNDK__
 
 #include <binder/IInterface.h>
+#include <binder/Status.h>
 
 namespace android {
 
@@ -43,6 +44,7 @@
     virtual void noteStopCamera(int uid) = 0;
     virtual void noteResetCamera() = 0;
     virtual void noteResetFlashlight() = 0;
+    virtual binder::Status noteWakeupSensorEvent(int64_t elapsedNanos, int uid, int sensor) = 0;
 
     enum {
         NOTE_START_SENSOR_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
@@ -58,7 +60,8 @@
         NOTE_START_CAMERA_TRANSACTION,
         NOTE_STOP_CAMERA_TRANSACTION,
         NOTE_RESET_CAMERA_TRANSACTION,
-        NOTE_RESET_FLASHLIGHT_TRANSACTION
+        NOTE_RESET_FLASHLIGHT_TRANSACTION,
+        NOTE_WAKEUP_SENSOR_EVENT_TRANSACTION
     };
 };
 
diff --git a/libs/bufferqueueconverter/Android.bp b/libs/bufferqueueconverter/Android.bp
index c5d3a32..d4605ea 100644
--- a/libs/bufferqueueconverter/Android.bp
+++ b/libs/bufferqueueconverter/Android.bp
@@ -13,7 +13,7 @@
     export_include_dirs: ["include"],
 }
 
-cc_library_shared {
+cc_library {
     name: "libbufferqueueconverter",
     vendor_available: true,
     vndk: {
@@ -22,6 +22,7 @@
     double_loadable: true,
 
     srcs: [
+        ":libgui_frame_event_aidl",
         "BufferQueueConverter.cpp",
     ],
 
diff --git a/libs/dumputils/dump_utils.cpp b/libs/dumputils/dump_utils.cpp
index 97cb810..5eb3308 100644
--- a/libs/dumputils/dump_utils.cpp
+++ b/libs/dumputils/dump_utils.cpp
@@ -62,7 +62,10 @@
         "android.hardware.audio@7.0::IDevicesFactory",
         "android.hardware.automotive.audiocontrol@1.0::IAudioControl",
         "android.hardware.automotive.audiocontrol@2.0::IAudioControl",
+        "android.hardware.automotive.can@1.0::ICanBus",
+        "android.hardware.automotive.can@1.0::ICanController",
         "android.hardware.automotive.evs@1.0::IEvsCamera",
+        "android.hardware.automotive.sv@1.0::ISurroundViewService",
         "android.hardware.automotive.vehicle@2.0::IVehicle",
         "android.hardware.biometrics.face@1.0::IBiometricsFace",
         "android.hardware.biometrics.fingerprint@2.1::IBiometricsFingerprint",
@@ -87,7 +90,12 @@
 /* list of hal interface to dump containing process during native dumps */
 static const std::vector<std::string> aidl_interfaces_to_dump {
         "android.hardware.automotive.audiocontrol.IAudioControl",
+        "android.hardware.automotive.can.ICanController",
         "android.hardware.automotive.evs.IEvsEnumerator",
+        "android.hardware.automotive.ivn.IIvnAndroidDevice",
+        "android.hardware.automotive.occupant_awareness.IOccupantAwareness",
+        "android.hardware.automotive.remoteaccess.IRemoteAccess",
+        "android.hardware.automotive.vehicle.IVehicle",
         "android.hardware.biometrics.face.IBiometricsFace",
         "android.hardware.biometrics.fingerprint.IBiometricsFingerprint",
         "android.hardware.camera.provider.ICameraProvider",
diff --git a/libs/ftl/Android.bp b/libs/ftl/Android.bp
index 09422d3..ea1b5e4 100644
--- a/libs/ftl/Android.bp
+++ b/libs/ftl/Android.bp
@@ -11,12 +11,18 @@
     name: "ftl_test",
     test_suites: ["device-tests"],
     srcs: [
+        "algorithm_test.cpp",
         "cast_test.cpp",
         "concat_test.cpp",
         "enum_test.cpp",
         "fake_guard_test.cpp",
         "flags_test.cpp",
         "future_test.cpp",
+        "match_test.cpp",
+        "mixins_test.cpp",
+        "non_null_test.cpp",
+        "optional_test.cpp",
+        "shared_mutex_test.cpp",
         "small_map_test.cpp",
         "small_vector_test.cpp",
         "static_vector_test.cpp",
diff --git a/libs/ftl/algorithm_test.cpp b/libs/ftl/algorithm_test.cpp
new file mode 100644
index 0000000..487b1b8
--- /dev/null
+++ b/libs/ftl/algorithm_test.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ftl/algorithm.h>
+#include <ftl/small_map.h>
+#include <ftl/static_vector.h>
+#include <gtest/gtest.h>
+
+#include <string_view>
+
+namespace android::test {
+
+// Keep in sync with example usage in header file.
+TEST(Algorithm, FindIf) {
+  using namespace std::string_view_literals;
+
+  const ftl::StaticVector vector = {"upside"sv, "down"sv, "cake"sv};
+  EXPECT_EQ(ftl::find_if(vector, [](const auto& str) { return str.front() == 'c'; }), "cake"sv);
+
+  const ftl::SmallMap map = ftl::init::map<int, ftl::StaticVector<std::string_view, 3>>(
+      12, "snow"sv, "cone"sv)(13, "tiramisu"sv)(14, "upside"sv, "down"sv, "cake"sv);
+
+  using Map = decltype(map);
+
+  EXPECT_EQ(14, ftl::find_if(map, [](const auto& pair) {
+                  return pair.second.size() == 3;
+                }).transform(ftl::to_key<Map>));
+
+  const auto opt = ftl::find_if(map, [](const auto& pair) {
+                     return pair.second.size() == 1;
+                   }).transform(ftl::to_mapped_ref<Map>);
+
+  ASSERT_TRUE(opt);
+  EXPECT_EQ(opt->get(), ftl::StaticVector("tiramisu"sv));
+}
+
+TEST(Algorithm, StaticRef) {
+  using namespace std::string_view_literals;
+
+  const ftl::SmallMap map = ftl::init::map(13, "tiramisu"sv)(14, "upside-down cake"sv);
+  ASSERT_EQ("???"sv,
+            map.get(20).or_else(ftl::static_ref<std::string_view>([] { return "???"sv; }))->get());
+
+  using Map = decltype(map);
+
+  ASSERT_EQ("snow cone"sv,
+            ftl::find_if(map, [](const auto& pair) { return pair.second.front() == 's'; })
+                .transform(ftl::to_mapped_ref<Map>)
+                .or_else(ftl::static_ref<std::string_view>([] { return "snow cone"sv; }))
+                ->get());
+}
+
+}  // namespace android::test
diff --git a/libs/ftl/concat_test.cpp b/libs/ftl/concat_test.cpp
index 8ecb1b2..771f054 100644
--- a/libs/ftl/concat_test.cpp
+++ b/libs/ftl/concat_test.cpp
@@ -28,8 +28,25 @@
   EXPECT_EQ(string.c_str()[string.size()], '\0');
 }
 
+TEST(Concat, Characters) {
+  EXPECT_EQ(ftl::Concat(u'a', ' ', U'b').str(), "97 98");
+}
+
+TEST(Concat, References) {
+  int i[] = {-1, 2};
+  unsigned u = 3;
+  EXPECT_EQ(ftl::Concat(i[0], std::as_const(i[1]), u).str(), "-123");
+
+  const bool b = false;
+  const char c = 'o';
+  EXPECT_EQ(ftl::Concat(b, "tt", c).str(), "falsetto");
+}
+
 namespace {
 
+static_assert(ftl::Concat{true, false, true}.str() == "truefalsetrue");
+static_assert(ftl::Concat{':', '-', ')'}.str() == ":-)");
+
 static_assert(ftl::Concat{"foo"}.str() == "foo");
 static_assert(ftl::Concat{ftl::truncated<3>("foobar")}.str() == "foo");
 
diff --git a/libs/ftl/flags_test.cpp b/libs/ftl/flags_test.cpp
index eea052b..1279d11 100644
--- a/libs/ftl/flags_test.cpp
+++ b/libs/ftl/flags_test.cpp
@@ -35,6 +35,7 @@
 
 TEST(Flags, Any) {
     Flags<TestFlags> flags = TestFlags::ONE | TestFlags::TWO;
+    ASSERT_TRUE(flags.any());
     ASSERT_TRUE(flags.any(TestFlags::ONE));
     ASSERT_TRUE(flags.any(TestFlags::TWO));
     ASSERT_FALSE(flags.any(TestFlags::THREE));
@@ -42,6 +43,9 @@
     ASSERT_TRUE(flags.any(TestFlags::TWO | TestFlags::THREE));
     ASSERT_TRUE(flags.any(TestFlags::ONE | TestFlags::THREE));
     ASSERT_TRUE(flags.any(TestFlags::ONE | TestFlags::TWO | TestFlags::THREE));
+
+    Flags<TestFlags> emptyFlags;
+    ASSERT_FALSE(emptyFlags.any());
 }
 
 TEST(Flags, All) {
diff --git a/libs/ftl/match_test.cpp b/libs/ftl/match_test.cpp
new file mode 100644
index 0000000..a6cff2e
--- /dev/null
+++ b/libs/ftl/match_test.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ftl/match.h>
+#include <gtest/gtest.h>
+
+#include <chrono>
+#include <string>
+#include <variant>
+
+namespace android::test {
+
+// Keep in sync with example usage in header file.
+TEST(Match, Example) {
+  using namespace std::chrono;
+  using namespace std::chrono_literals;
+  using namespace std::string_literals;
+
+  std::variant<seconds, minutes, hours> duration = 119min;
+
+  // Mutable match.
+  ftl::match(duration, [](auto& d) { ++d; });
+
+  // Immutable match. Exhaustive due to minutes being convertible to seconds.
+  EXPECT_EQ("2 hours"s,
+            ftl::match(
+                duration,
+                [](const seconds& s) {
+                  const auto h = duration_cast<hours>(s);
+                  return std::to_string(h.count()) + " hours"s;
+                },
+                [](const hours& h) { return std::to_string(h.count() / 24) + " days"s; }));
+}
+
+}  // namespace android::test
diff --git a/libs/ftl/mixins_test.cpp b/libs/ftl/mixins_test.cpp
new file mode 100644
index 0000000..2c9f9df
--- /dev/null
+++ b/libs/ftl/mixins_test.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ftl/mixins.h>
+#include <gtest/gtest.h>
+
+#include <chrono>
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+namespace android::test {
+namespace {
+
+// Keep in sync with example usage in header file.
+
+struct Id : ftl::Constructible<Id, std::int32_t>, ftl::Equatable<Id> {
+  using Constructible::Constructible;
+};
+
+static_assert(!std::is_default_constructible_v<Id>);
+
+struct Color : ftl::DefaultConstructible<Color, std::uint8_t>,
+               ftl::Equatable<Color>,
+               ftl::Orderable<Color> {
+  using DefaultConstructible::DefaultConstructible;
+};
+
+static_assert(Color() == Color(0u));
+static_assert(ftl::to_underlying(Color(-1)) == 255u);
+static_assert(Color(1u) < Color(2u));
+
+struct Sequence : ftl::DefaultConstructible<Sequence, std::int8_t, -1>,
+                  ftl::Equatable<Sequence>,
+                  ftl::Orderable<Sequence>,
+                  ftl::Incrementable<Sequence> {
+  using DefaultConstructible::DefaultConstructible;
+};
+
+static_assert(Sequence() == Sequence(-1));
+
+struct Timeout : ftl::DefaultConstructible<Timeout, std::chrono::seconds, 10>,
+                 ftl::Equatable<Timeout>,
+                 ftl::Addable<Timeout> {
+  using DefaultConstructible::DefaultConstructible;
+};
+
+using namespace std::chrono_literals;
+static_assert(Timeout() + Timeout(5s) == Timeout(15s));
+
+// Construction.
+constexpr Id kId{1234};
+constexpr Sequence kSequence;
+
+// Underlying value.
+static_assert(ftl::to_underlying(Id(-42)) == -42);
+static_assert(ftl::to_underlying(kSequence) == -1);
+
+// Casting.
+static_assert(static_cast<std::int32_t>(Id(-1)) == -1);
+static_assert(static_cast<std::int8_t>(kSequence) == -1);
+
+static_assert(!std::is_convertible_v<std::int32_t, Id>);
+static_assert(!std::is_convertible_v<Id, std::int32_t>);
+
+// Equality.
+static_assert(kId == Id(1234));
+static_assert(kId != Id(123));
+static_assert(kSequence == Sequence(-1));
+
+// Ordering.
+static_assert(Sequence(1) < Sequence(2));
+static_assert(Sequence(2) > Sequence(1));
+static_assert(Sequence(3) <= Sequence(4));
+static_assert(Sequence(4) >= Sequence(3));
+static_assert(Sequence(5) <= Sequence(5));
+static_assert(Sequence(6) >= Sequence(6));
+
+// Incrementing.
+template <typename Op, typename T, typename... Ts>
+constexpr auto mutable_op(Op op, T lhs, Ts... rhs) {
+  const T result = op(lhs, rhs...);
+  return std::make_pair(lhs, result);
+}
+
+static_assert(mutable_op([](auto& lhs) { return ++lhs; }, Sequence()) ==
+              std::make_pair(Sequence(0), Sequence(0)));
+
+static_assert(mutable_op([](auto& lhs) { return lhs++; }, Sequence()) ==
+              std::make_pair(Sequence(0), Sequence(-1)));
+
+// Addition.
+
+// `Addable` implies `Incrementable`.
+static_assert(mutable_op([](auto& lhs) { return ++lhs; }, Timeout()) ==
+              std::make_pair(Timeout(11s), Timeout(11s)));
+
+static_assert(mutable_op([](auto& lhs) { return lhs++; }, Timeout()) ==
+              std::make_pair(Timeout(11s), Timeout(10s)));
+
+static_assert(Timeout(5s) + Timeout(6s) == Timeout(11s));
+
+static_assert(mutable_op([](auto& lhs, const auto& rhs) { return lhs += rhs; }, Timeout(7s),
+                         Timeout(8s)) == std::make_pair(Timeout(15s), Timeout(15s)));
+
+// Type safety.
+
+namespace traits {
+
+template <typename, typename = void>
+struct is_incrementable : std::false_type {};
+
+template <typename T>
+struct is_incrementable<T, std::void_t<decltype(++std::declval<T&>())>> : std::true_type {};
+
+template <typename T>
+constexpr bool is_incrementable_v = is_incrementable<T>{};
+
+template <typename, typename, typename, typename = void>
+struct has_binary_op : std::false_type {};
+
+template <typename Op, typename T, typename U>
+struct has_binary_op<Op, T, U, std::void_t<decltype(Op{}(std::declval<T&>(), std::declval<U&>()))>>
+    : std::true_type {};
+
+template <typename T, typename U>
+constexpr bool is_equatable_v =
+    has_binary_op<std::equal_to<void>, T, U>{} && has_binary_op<std::not_equal_to<void>, T, U>{};
+
+template <typename T, typename U>
+constexpr bool is_orderable_v =
+    has_binary_op<std::less<void>, T, U>{} && has_binary_op<std::less_equal<void>, T, U>{} &&
+    has_binary_op<std::greater<void>, T, U>{} && has_binary_op<std::greater_equal<void>, T, U>{};
+
+template <typename T, typename U>
+constexpr bool is_addable_v = has_binary_op<std::plus<void>, T, U>{};
+
+}  // namespace traits
+
+struct Real : ftl::Constructible<Real, float> {
+  using Constructible::Constructible;
+};
+
+static_assert(traits::is_equatable_v<Id, Id>);
+static_assert(!traits::is_equatable_v<Real, Real>);
+static_assert(!traits::is_equatable_v<Id, Color>);
+static_assert(!traits::is_equatable_v<Sequence, Id>);
+static_assert(!traits::is_equatable_v<Id, std::int32_t>);
+static_assert(!traits::is_equatable_v<std::chrono::seconds, Timeout>);
+
+static_assert(traits::is_orderable_v<Color, Color>);
+static_assert(!traits::is_orderable_v<Id, Id>);
+static_assert(!traits::is_orderable_v<Real, Real>);
+static_assert(!traits::is_orderable_v<Color, Sequence>);
+static_assert(!traits::is_orderable_v<Color, std::uint8_t>);
+static_assert(!traits::is_orderable_v<std::chrono::seconds, Timeout>);
+
+static_assert(traits::is_incrementable_v<Sequence>);
+static_assert(traits::is_incrementable_v<Timeout>);
+static_assert(!traits::is_incrementable_v<Id>);
+static_assert(!traits::is_incrementable_v<Color>);
+static_assert(!traits::is_incrementable_v<Real>);
+
+static_assert(traits::is_addable_v<Timeout, Timeout>);
+static_assert(!traits::is_addable_v<Id, Id>);
+static_assert(!traits::is_addable_v<Real, Real>);
+static_assert(!traits::is_addable_v<Sequence, Sequence>);
+static_assert(!traits::is_addable_v<Timeout, Sequence>);
+static_assert(!traits::is_addable_v<Color, Timeout>);
+
+}  // namespace
+}  // namespace android::test
diff --git a/libs/ftl/non_null_test.cpp b/libs/ftl/non_null_test.cpp
new file mode 100644
index 0000000..bd0462b
--- /dev/null
+++ b/libs/ftl/non_null_test.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ftl/non_null.h>
+#include <gtest/gtest.h>
+
+#include <memory>
+#include <string>
+#include <string_view>
+
+namespace android::test {
+namespace {
+
+void get_length(const ftl::NonNull<std::shared_ptr<std::string>>& string_ptr,
+                ftl::NonNull<std::size_t*> length_ptr) {
+  // No need for `nullptr` checks.
+  *length_ptr = string_ptr->length();
+}
+
+using Pair = std::pair<ftl::NonNull<std::shared_ptr<int>>, std::shared_ptr<int>>;
+
+Pair dupe_if(ftl::NonNull<std::unique_ptr<int>> non_null_ptr, bool condition) {
+  // Move the underlying pointer out, so `non_null_ptr` must not be accessed after this point.
+  auto unique_ptr = std::move(non_null_ptr).take();
+
+  auto non_null_shared_ptr = ftl::as_non_null(std::shared_ptr<int>(std::move(unique_ptr)));
+  auto nullable_shared_ptr = condition ? non_null_shared_ptr.get() : nullptr;
+
+  return {std::move(non_null_shared_ptr), std::move(nullable_shared_ptr)};
+}
+
+}  // namespace
+
+// Keep in sync with example usage in header file.
+TEST(NonNull, Example) {
+  const auto string_ptr = ftl::as_non_null(std::make_shared<std::string>("android"));
+  std::size_t size;
+  get_length(string_ptr, ftl::as_non_null(&size));
+  EXPECT_EQ(size, 7u);
+
+  auto ptr = ftl::as_non_null(std::make_unique<int>(42));
+  const auto [ptr1, ptr2] = dupe_if(std::move(ptr), true);
+  EXPECT_EQ(ptr1.get(), ptr2);
+}
+
+namespace {
+
+constexpr std::string_view kApple = "apple";
+constexpr std::string_view kOrange = "orange";
+
+using StringViewPtr = ftl::NonNull<const std::string_view*>;
+constexpr StringViewPtr kApplePtr = ftl::as_non_null(&kApple);
+constexpr StringViewPtr kOrangePtr = ftl::as_non_null(&kOrange);
+
+constexpr StringViewPtr longest(StringViewPtr ptr1, StringViewPtr ptr2) {
+  return ptr1->length() > ptr2->length() ? ptr1 : ptr2;
+}
+
+static_assert(longest(kApplePtr, kOrangePtr) == kOrangePtr);
+
+}  // namespace
+}  // namespace android::test
diff --git a/libs/ftl/optional_test.cpp b/libs/ftl/optional_test.cpp
new file mode 100644
index 0000000..91bf7bc
--- /dev/null
+++ b/libs/ftl/optional_test.cpp
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ftl/optional.h>
+#include <ftl/static_vector.h>
+#include <ftl/string.h>
+#include <ftl/unit.h>
+#include <gtest/gtest.h>
+
+#include <cstdlib>
+#include <functional>
+#include <numeric>
+#include <utility>
+
+using namespace std::placeholders;
+using namespace std::string_literals;
+
+namespace android::test {
+
+using ftl::Optional;
+using ftl::StaticVector;
+
+TEST(Optional, Construct) {
+  // Empty.
+  EXPECT_EQ(std::nullopt, Optional<int>());
+  EXPECT_EQ(std::nullopt, Optional<std::string>(std::nullopt));
+
+  // Value.
+  EXPECT_EQ('?', Optional('?'));
+  EXPECT_EQ(""s, Optional(std::string()));
+
+  // In place.
+  EXPECT_EQ("???"s, Optional<std::string>(std::in_place, 3u, '?'));
+  EXPECT_EQ("abc"s, Optional<std::string>(std::in_place, {'a', 'b', 'c'}));
+
+  // Implicit downcast.
+  {
+    Optional opt = std::optional("test"s);
+    static_assert(std::is_same_v<decltype(opt), Optional<std::string>>);
+
+    ASSERT_TRUE(opt);
+    EXPECT_EQ(opt.value(), "test"s);
+  }
+}
+
+TEST(Optional, Transform) {
+  // Empty.
+  EXPECT_EQ(std::nullopt, Optional<int>().transform([](int) { return 0; }));
+
+  // By value.
+  EXPECT_EQ(0, Optional(0).transform([](int x) { return x; }));
+  EXPECT_EQ(100, Optional(99).transform([](int x) { return x + 1; }));
+  EXPECT_EQ("0b100"s, Optional(4).transform(std::bind(ftl::to_string<int>, _1, ftl::Radix::kBin)));
+
+  // By reference.
+  {
+    Optional opt = 'x';
+    EXPECT_EQ('z', opt.transform([](char& c) {
+      c = 'y';
+      return 'z';
+    }));
+
+    EXPECT_EQ('y', opt);
+  }
+
+  // By rvalue reference.
+  {
+    std::string out;
+    EXPECT_EQ("xyz"s, Optional("abc"s).transform([&out](std::string&& str) {
+      out = std::move(str);
+      return "xyz"s;
+    }));
+
+    EXPECT_EQ(out, "abc"s);
+  }
+
+  // No return value.
+  {
+    Optional opt = "food"s;
+    EXPECT_EQ(ftl::unit, opt.transform(ftl::unit_fn([](std::string& str) { str.pop_back(); })));
+    EXPECT_EQ(opt, "foo"s);
+  }
+
+  // Chaining.
+  EXPECT_EQ(14u, Optional(StaticVector{"upside"s, "down"s})
+                     .transform([](StaticVector<std::string, 3>&& v) {
+                       v.push_back("cake"s);
+                       return v;
+                     })
+                     .transform([](const StaticVector<std::string, 3>& v) {
+                       return std::accumulate(v.begin(), v.end(), std::string());
+                     })
+                     .transform([](const std::string& s) { return s.length(); }));
+}
+
+namespace {
+
+Optional<int> parse_int(const std::string& str) {
+  if (const int i = std::atoi(str.c_str())) return i;
+  return std::nullopt;
+}
+
+}  // namespace
+
+TEST(Optional, AndThen) {
+  // Empty.
+  EXPECT_EQ(std::nullopt, Optional<int>().and_then([](int) -> Optional<int> { return 0; }));
+  EXPECT_EQ(std::nullopt, Optional<int>().and_then([](int) { return Optional<int>(); }));
+
+  // By value.
+  EXPECT_EQ(0, Optional(0).and_then([](int x) { return Optional(x); }));
+  EXPECT_EQ(123, Optional("123").and_then(parse_int));
+  EXPECT_EQ(std::nullopt, Optional("abc").and_then(parse_int));
+
+  // By reference.
+  {
+    Optional opt = 'x';
+    EXPECT_EQ('z', opt.and_then([](char& c) {
+      c = 'y';
+      return Optional('z');
+    }));
+
+    EXPECT_EQ('y', opt);
+  }
+
+  // By rvalue reference.
+  {
+    std::string out;
+    EXPECT_EQ("xyz"s, Optional("abc"s).and_then([&out](std::string&& str) {
+      out = std::move(str);
+      return Optional("xyz"s);
+    }));
+
+    EXPECT_EQ(out, "abc"s);
+  }
+
+  // Chaining.
+  using StringVector = StaticVector<std::string, 3>;
+  EXPECT_EQ(14u, Optional(StaticVector{"-"s, "1"s})
+                     .and_then([](StringVector&& v) -> Optional<StringVector> {
+                       if (v.push_back("4"s)) return v;
+                       return {};
+                     })
+                     .and_then([](const StringVector& v) -> Optional<std::string> {
+                       if (v.full()) return std::accumulate(v.begin(), v.end(), std::string());
+                       return {};
+                     })
+                     .and_then(parse_int)
+                     .and_then([](int i) {
+                       return i > 0 ? std::nullopt : std::make_optional(static_cast<unsigned>(-i));
+                     }));
+}
+
+TEST(Optional, OrElse) {
+  // Non-empty.
+  {
+    const Optional opt = false;
+    EXPECT_EQ(false, opt.or_else([] { return Optional(true); }));
+    EXPECT_EQ('x', Optional('x').or_else([] { return std::make_optional('y'); }));
+  }
+
+  // Empty.
+  {
+    const Optional<int> opt;
+    EXPECT_EQ(123, opt.or_else([]() -> Optional<int> { return 123; }));
+    EXPECT_EQ("abc"s, Optional<std::string>().or_else([] { return Optional("abc"s); }));
+  }
+  {
+    bool empty = false;
+    EXPECT_EQ(Optional<float>(), Optional<float>().or_else([&empty]() -> Optional<float> {
+      empty = true;
+      return std::nullopt;
+    }));
+    EXPECT_TRUE(empty);
+  }
+
+  // Chaining.
+  using StringVector = StaticVector<std::string, 3>;
+  EXPECT_EQ(999, Optional(StaticVector{"1"s, "0"s, "0"s})
+                     .and_then([](StringVector&& v) -> Optional<StringVector> {
+                       if (v.push_back("0"s)) return v;
+                       return {};
+                     })
+                     .or_else([] {
+                       return Optional(StaticVector{"9"s, "9"s, "9"s});
+                     })
+                     .transform([](const StringVector& v) {
+                       return std::accumulate(v.begin(), v.end(), std::string());
+                     })
+                     .and_then(parse_int)
+                     .or_else([] { return Optional(-1); }));
+}
+
+// Comparison.
+namespace {
+
+constexpr Optional<int> kOptional1 = 1;
+constexpr Optional<int> kAnotherOptional1 = 1;
+constexpr Optional<int> kOptional2 = 2;
+constexpr Optional<int> kOptionalEmpty, kAnotherOptionalEmpty;
+
+constexpr std::optional<int> kStdOptional1 = 1;
+
+static_assert(kOptional1 == kAnotherOptional1);
+
+static_assert(kOptional1 != kOptional2);
+static_assert(kOptional2 != kOptional1);
+
+static_assert(kOptional1 != kOptionalEmpty);
+static_assert(kOptionalEmpty != kOptional1);
+
+static_assert(kOptionalEmpty == kAnotherOptionalEmpty);
+
+static_assert(kOptional1 == kStdOptional1);
+static_assert(kStdOptional1 == kOptional1);
+
+static_assert(kOptional2 != kStdOptional1);
+static_assert(kStdOptional1 != kOptional2);
+
+static_assert(kOptional2 != kOptionalEmpty);
+static_assert(kOptionalEmpty != kOptional2);
+
+} // namespace
+
+}  // namespace android::test
diff --git a/libs/ftl/shared_mutex_test.cpp b/libs/ftl/shared_mutex_test.cpp
new file mode 100644
index 0000000..6da7061
--- /dev/null
+++ b/libs/ftl/shared_mutex_test.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ftl/shared_mutex.h>
+#include <gtest/gtest.h>
+#include <ftl/fake_guard.h>
+
+namespace android::test {
+
+TEST(SharedMutex, SharedLock) {
+  ftl::SharedMutex mutex;
+  std::shared_lock shared_lock(mutex);
+
+  { std::shared_lock shared_lock2(mutex); }
+}
+
+TEST(SharedMutex, ExclusiveLock) {
+  ftl::SharedMutex mutex;
+  std::unique_lock unique_lock(mutex);
+}
+
+TEST(SharedMutex, Annotations) {
+  struct {
+    void foo() FTL_ATTRIBUTE(requires_shared_capability(mutex)) { num++; }
+    void bar() FTL_ATTRIBUTE(requires_capability(mutex)) { num++; }
+    void baz() {
+      std::shared_lock shared_lock(mutex);
+      num++;
+    }
+    ftl::SharedMutex mutex;
+    int num = 0;
+
+  } s;
+
+  {
+    // TODO(b/257958323): Use an RAII class instead of locking manually.
+    s.mutex.lock_shared();
+    s.foo();
+    s.baz();
+    s.mutex.unlock_shared();
+  }
+  s.mutex.lock();
+  s.bar();
+  s.mutex.unlock();
+}
+
+}  // namespace android::test
diff --git a/libs/ftl/small_map_test.cpp b/libs/ftl/small_map_test.cpp
index 1740a2b..634877f 100644
--- a/libs/ftl/small_map_test.cpp
+++ b/libs/ftl/small_map_test.cpp
@@ -15,12 +15,15 @@
  */
 
 #include <ftl/small_map.h>
+#include <ftl/unit.h>
 #include <gtest/gtest.h>
 
 #include <cctype>
 #include <string>
+#include <string_view>
 
 using namespace std::string_literals;
+using namespace std::string_view_literals;
 
 namespace android::test {
 
@@ -38,7 +41,7 @@
 
   EXPECT_TRUE(map.contains(123));
 
-  EXPECT_EQ(map.get(42, [](const std::string& s) { return s.size(); }), 3u);
+  EXPECT_EQ(map.get(42).transform([](const std::string& s) { return s.size(); }), 3u);
 
   const auto opt = map.get(-1);
   ASSERT_TRUE(opt);
@@ -50,7 +53,7 @@
   map.emplace_or_replace(0, "vanilla", 2u, 3u);
   EXPECT_TRUE(map.dynamic());
 
-  EXPECT_EQ(map, SmallMap(ftl::init::map(-1, "xyz")(0, "nil")(42, "???")(123, "abc")));
+  EXPECT_EQ(map, SmallMap(ftl::init::map(-1, "xyz"sv)(0, "nil"sv)(42, "???"sv)(123, "abc"sv)));
 }
 
 TEST(SmallMap, Construct) {
@@ -70,7 +73,7 @@
     EXPECT_EQ(map.max_size(), 5u);
     EXPECT_FALSE(map.dynamic());
 
-    EXPECT_EQ(map, SmallMap(ftl::init::map(123, "abc")(456, "def")(789, "ghi")));
+    EXPECT_EQ(map, SmallMap(ftl::init::map(123, "abc"sv)(456, "def"sv)(789, "ghi"sv)));
   }
   {
     // In-place constructor with different types.
@@ -81,7 +84,7 @@
     EXPECT_EQ(map.max_size(), 5u);
     EXPECT_FALSE(map.dynamic());
 
-    EXPECT_EQ(map, SmallMap(ftl::init::map(42, "???")(123, "abc")(-1, "\0\0\0")));
+    EXPECT_EQ(map, SmallMap(ftl::init::map(42, "???"sv)(123, "abc"sv)(-1, ""sv)));
   }
   {
     // In-place constructor with implicit size.
@@ -92,7 +95,7 @@
     EXPECT_EQ(map.max_size(), 3u);
     EXPECT_FALSE(map.dynamic());
 
-    EXPECT_EQ(map, SmallMap(ftl::init::map(-1, "\0\0\0")(42, "???")(123, "abc")));
+    EXPECT_EQ(map, SmallMap(ftl::init::map(-1, ""sv)(42, "???"sv)(123, "abc"sv)));
   }
 }
 
@@ -108,7 +111,7 @@
   {
     // Convertible types; same capacity.
     SmallMap map1 = ftl::init::map<char, std::string>('M', "mega")('G', "giga");
-    const SmallMap map2 = ftl::init::map('T', "tera")('P', "peta");
+    const SmallMap map2 = ftl::init::map('T', "tera"sv)('P', "peta"sv);
 
     map1 = map2;
     EXPECT_EQ(map1, map2);
@@ -147,7 +150,7 @@
   }
 }
 
-TEST(SmallMap, Find) {
+TEST(SmallMap, Get) {
   {
     // Constant reference.
     const SmallMap map = ftl::init::map('a', 'A')('b', 'B')('c', 'C');
@@ -172,14 +175,15 @@
     EXPECT_EQ(d, 'D');
   }
   {
-    // Constant unary operation.
+    // Immutable transform operation.
     const SmallMap map = ftl::init::map('a', 'x')('b', 'y')('c', 'z');
-    EXPECT_EQ(map.get('c', [](char c) { return std::toupper(c); }), 'Z');
+    EXPECT_EQ(map.get('c').transform([](char c) { return std::toupper(c); }), 'Z');
   }
   {
-    // Mutable unary operation.
+    // Mutable transform operation.
     SmallMap map = ftl::init::map('a', 'x')('b', 'y')('c', 'z');
-    EXPECT_TRUE(map.get('c', [](char& c) { c = std::toupper(c); }));
+    EXPECT_EQ(map.get('c').transform(ftl::unit_fn([](char& c) { c = std::toupper(c); })),
+              ftl::unit);
 
     EXPECT_EQ(map, SmallMap(ftl::init::map('c', 'Z')('b', 'y')('a', 'x')));
   }
@@ -247,7 +251,7 @@
   }
   {
     // Replacement arguments can refer to the replaced mapping.
-    const auto ref = map.get(2, [](const auto& s) { return s.str[0]; });
+    const auto ref = map.get(2).transform([](const String& s) { return s.str[0]; });
     ASSERT_TRUE(ref);
 
     // Construct std::string from one character.
@@ -292,7 +296,7 @@
   }
   {
     // Replacement arguments can refer to the replaced mapping.
-    const auto ref = map.get(2, [](const auto& s) { return s.str[0]; });
+    const auto ref = map.get(2).transform([](const String& s) { return s.str[0]; });
     ASSERT_TRUE(ref);
 
     // Construct std::string from one character.
diff --git a/libs/gralloc/types/Android.bp b/libs/gralloc/types/Android.bp
index f5af425..6d1dfe8 100644
--- a/libs/gralloc/types/Android.bp
+++ b/libs/gralloc/types/Android.bp
@@ -23,6 +23,7 @@
 
 cc_library {
     name: "libgralloctypes",
+    defaults: ["android.hardware.graphics.common-ndk_shared"],
     cflags: [
         "-Wall",
         "-Werror",
@@ -51,7 +52,6 @@
     ],
 
     shared_libs: [
-        "android.hardware.graphics.common-V4-ndk",
         "android.hardware.graphics.mapper@4.0",
         "libhidlbase",
         "liblog",
diff --git a/libs/graphicsenv/GpuStatsInfo.cpp b/libs/graphicsenv/GpuStatsInfo.cpp
index 858739c..7b74214 100644
--- a/libs/graphicsenv/GpuStatsInfo.cpp
+++ b/libs/graphicsenv/GpuStatsInfo.cpp
@@ -89,6 +89,14 @@
     if ((status = parcel->writeBool(falsePrerotation)) != OK) return status;
     if ((status = parcel->writeBool(gles1InUse)) != OK) return status;
     if ((status = parcel->writeBool(angleInUse)) != OK) return status;
+    if ((status = parcel->writeBool(createdGlesContext)) != OK) return status;
+    if ((status = parcel->writeBool(createdVulkanDevice)) != OK) return status;
+    if ((status = parcel->writeBool(createdVulkanSwapchain)) != OK) return status;
+    if ((status = parcel->writeUint32(vulkanApiVersion)) != OK) return status;
+    if ((status = parcel->writeUint64(vulkanDeviceFeaturesEnabled)) != OK) return status;
+    if ((status = parcel->writeInt32Vector(vulkanInstanceExtensions)) != OK) return status;
+    if ((status = parcel->writeInt32Vector(vulkanDeviceExtensions)) != OK) return status;
+
     return OK;
 }
 
@@ -103,6 +111,14 @@
     if ((status = parcel->readBool(&falsePrerotation)) != OK) return status;
     if ((status = parcel->readBool(&gles1InUse)) != OK) return status;
     if ((status = parcel->readBool(&angleInUse)) != OK) return status;
+    if ((status = parcel->readBool(&createdGlesContext)) != OK) return status;
+    if ((status = parcel->readBool(&createdVulkanDevice)) != OK) return status;
+    if ((status = parcel->readBool(&createdVulkanSwapchain)) != OK) return status;
+    if ((status = parcel->readUint32(&vulkanApiVersion)) != OK) return status;
+    if ((status = parcel->readUint64(&vulkanDeviceFeaturesEnabled)) != OK) return status;
+    if ((status = parcel->readInt32Vector(&vulkanInstanceExtensions)) != OK) return status;
+    if ((status = parcel->readInt32Vector(&vulkanDeviceExtensions)) != OK) return status;
+
     return OK;
 }
 
@@ -114,6 +130,12 @@
     StringAppendF(&result, "falsePrerotation = %d\n", falsePrerotation);
     StringAppendF(&result, "gles1InUse = %d\n", gles1InUse);
     StringAppendF(&result, "angleInUse = %d\n", angleInUse);
+    StringAppendF(&result, "createdGlesContext = %d\n", createdGlesContext);
+    StringAppendF(&result, "createdVulkanDevice = %d\n", createdVulkanDevice);
+    StringAppendF(&result, "createdVulkanSwapchain = %d\n", createdVulkanSwapchain);
+    StringAppendF(&result, "vulkanApiVersion = 0x%" PRIx32 "\n", vulkanApiVersion);
+    StringAppendF(&result, "vulkanDeviceFeaturesEnabled = 0x%" PRIx64 "\n",
+                  vulkanDeviceFeaturesEnabled);
     result.append("glDriverLoadingTime:");
     for (int32_t loadingTime : glDriverLoadingTime) {
         StringAppendF(&result, " %d", loadingTime);
@@ -129,6 +151,16 @@
         StringAppendF(&result, " %d", loadingTime);
     }
     result.append("\n");
+    result.append("vulkanInstanceExtensions:");
+    for (int32_t extension : vulkanInstanceExtensions) {
+        StringAppendF(&result, " 0x%x", extension);
+    }
+    result.append("\n");
+    result.append("vulkanDeviceExtensions:");
+    for (int32_t extension : vulkanDeviceExtensions) {
+        StringAppendF(&result, " 0x%x", extension);
+    }
+    result.append("\n");
     return result;
 }
 
diff --git a/libs/graphicsenv/GraphicsEnv.cpp b/libs/graphicsenv/GraphicsEnv.cpp
index 64f8704..9d3e674 100644
--- a/libs/graphicsenv/GraphicsEnv.cpp
+++ b/libs/graphicsenv/GraphicsEnv.cpp
@@ -278,6 +278,57 @@
     sendGpuStatsLocked(api, isDriverLoaded, driverLoadingTime);
 }
 
+// Hash function to calculate hash for null-terminated Vulkan extension names
+// We store hash values of the extensions, rather than the actual names or
+// indices to be able to support new extensions easily, avoid creating
+// a table of 'known' extensions inside Android and reduce the runtime overhead.
+static uint64_t calculateExtensionHash(const char* word) {
+    if (!word) {
+        return 0;
+    }
+    const size_t wordLen = strlen(word);
+    const uint32_t seed = 167;
+    uint64_t hash = 0;
+    for (size_t i = 0; i < wordLen; i++) {
+        hash = (hash * seed) + word[i];
+    }
+    return hash;
+}
+
+void GraphicsEnv::setVulkanInstanceExtensions(uint32_t enabledExtensionCount,
+                                              const char* const* ppEnabledExtensionNames) {
+    ATRACE_CALL();
+    if (enabledExtensionCount == 0 || ppEnabledExtensionNames == nullptr) {
+        return;
+    }
+
+    const uint32_t maxNumStats = android::GpuStatsAppInfo::MAX_NUM_EXTENSIONS;
+    uint64_t extensionHashes[maxNumStats];
+    const uint32_t numStats = std::min(enabledExtensionCount, maxNumStats);
+    for(uint32_t i = 0; i < numStats; i++) {
+        extensionHashes[i] = calculateExtensionHash(ppEnabledExtensionNames[i]);
+    }
+    setTargetStatsArray(android::GpuStatsInfo::Stats::VULKAN_INSTANCE_EXTENSION,
+                        extensionHashes, numStats);
+}
+
+void GraphicsEnv::setVulkanDeviceExtensions(uint32_t enabledExtensionCount,
+                                            const char* const* ppEnabledExtensionNames) {
+    ATRACE_CALL();
+    if (enabledExtensionCount == 0 || ppEnabledExtensionNames == nullptr) {
+        return;
+    }
+
+    const uint32_t maxNumStats = android::GpuStatsAppInfo::MAX_NUM_EXTENSIONS;
+    uint64_t extensionHashes[maxNumStats];
+    const uint32_t numStats = std::min(enabledExtensionCount, maxNumStats);
+    for(uint32_t i = 0; i < numStats; i++) {
+        extensionHashes[i] = calculateExtensionHash(ppEnabledExtensionNames[i]);
+    }
+    setTargetStatsArray(android::GpuStatsInfo::Stats::VULKAN_DEVICE_EXTENSION,
+                        extensionHashes, numStats);
+}
+
 static sp<IGpuService> getGpuService() {
     static const sp<IBinder> binder = defaultServiceManager()->checkService(String16("gpu"));
     if (!binder) {
@@ -295,6 +346,11 @@
 }
 
 void GraphicsEnv::setTargetStats(const GpuStatsInfo::Stats stats, const uint64_t value) {
+    return setTargetStatsArray(stats, &value, 1);
+}
+
+void GraphicsEnv::setTargetStatsArray(const GpuStatsInfo::Stats stats, const uint64_t* values,
+                                      const uint32_t valueCount) {
     ATRACE_CALL();
 
     std::lock_guard<std::mutex> lock(mStatsLock);
@@ -302,8 +358,8 @@
 
     const sp<IGpuService> gpuService = getGpuService();
     if (gpuService) {
-        gpuService->setTargetStats(mGpuStats.appPackageName, mGpuStats.driverVersionCode, stats,
-                                   value);
+        gpuService->setTargetStatsArray(mGpuStats.appPackageName, mGpuStats.driverVersionCode,
+                                        stats, values, valueCount);
     }
 }
 
@@ -396,61 +452,24 @@
     return (mUseAngle == YES) ? true : false;
 }
 
-bool GraphicsEnv::angleIsSystemDriver() {
-    // Make sure we are init'ed
-    if (mAngleAppName.empty()) {
-        ALOGV("App name is empty. setAngleInfo() has not been called to enable ANGLE.");
-        return false;
-    }
-
-    return (mAngleIsSystemDriver == YES) ? true : false;
-}
-
-bool GraphicsEnv::shouldForceLegacyDriver() {
-    // Make sure we are init'ed
-    if (mAngleAppName.empty()) {
-        ALOGV("App name is empty. setAngleInfo() has not been called to enable ANGLE.");
-        return false;
-    }
-
-    return (mAngleIsSystemDriver == YES && mUseAngle == NO) ? true : false;
-}
-
-std::string GraphicsEnv::getLegacySuffix() {
-    return mLegacyDriverSuffix;
-}
-
 void GraphicsEnv::updateUseAngle() {
-    mUseAngle = NO;
-
     const char* ANGLE_PREFER_ANGLE = "angle";
-    const char* ANGLE_PREFER_LEGACY = "legacy";
-    // The following is a deprecated version of "legacy"
     const char* ANGLE_PREFER_NATIVE = "native";
 
     mUseAngle = NO;
     if (mAngleDeveloperOptIn == ANGLE_PREFER_ANGLE) {
-        ALOGI("Using ANGLE, the %s GLES driver for package '%s'",
-              mAngleIsSystemDriver == YES ? "system" : "optional", mAngleAppName.c_str());
+        ALOGV("User set \"Developer Options\" to force the use of ANGLE");
         mUseAngle = YES;
-    } else if (mAngleDeveloperOptIn == ANGLE_PREFER_LEGACY ||
-               mAngleDeveloperOptIn == ANGLE_PREFER_NATIVE) {
-        ALOGI("Using the (%s) Legacy GLES driver for package '%s'",
-              mAngleIsSystemDriver == YES ? "optional" : "system", mAngleAppName.c_str());
+    } else if (mAngleDeveloperOptIn == ANGLE_PREFER_NATIVE) {
+        ALOGV("User set \"Developer Options\" to force the use of Native");
     } else {
         ALOGV("User set invalid \"Developer Options\": '%s'", mAngleDeveloperOptIn.c_str());
     }
 }
 
 void GraphicsEnv::setAngleInfo(const std::string path, const std::string appName,
-                               const bool angleIsSystemDriver, const std::string developerOptIn,
+                               const std::string developerOptIn,
                                const std::vector<std::string> eglFeatures) {
-    // Set whether ANGLE is the system driver:
-    mAngleIsSystemDriver = angleIsSystemDriver ? YES : NO;
-
-    // Note: Given the current logic and lack of the old rules file processing,
-    // there seems to be little chance that mUseAngle != UNKNOWN.  Leave this
-    // for now, even though it seems outdated.
     if (mUseAngle != UNKNOWN) {
         // We've already figured out an answer for this app, so just return.
         ALOGV("Already evaluated the rules file for '%s': use ANGLE = %s", appName.c_str(),
@@ -471,25 +490,6 @@
     updateUseAngle();
 }
 
-void GraphicsEnv::setLegacyDriverInfo(const std::string appName, const bool angleIsSystemDriver,
-                                      const std::string legacyDriverName) {
-    ALOGV("setting legacy app name to '%s'", appName.c_str());
-    mAngleAppName = appName;
-
-    // Force the use of the legacy driver instead of ANGLE
-    const char* ANGLE_PREFER_LEGACY = "legacy";
-    mAngleDeveloperOptIn = ANGLE_PREFER_LEGACY;
-    ALOGV("setting ANGLE application opt-in to 'legacy'");
-
-    // Set whether ANGLE is the system driver:
-    mAngleIsSystemDriver = angleIsSystemDriver ? YES : NO;
-
-    mLegacyDriverSuffix = legacyDriverName;
-
-    // Update the current status of whether we should use ANGLE or not
-    updateUseAngle();
-}
-
 void GraphicsEnv::setLayerPaths(NativeLoaderNamespace* appNamespace, const std::string layerPaths) {
     if (mLayerPaths.empty()) {
         mLayerPaths = layerPaths;
@@ -652,4 +652,13 @@
     return mAngleNamespace;
 }
 
+void GraphicsEnv::nativeToggleAngleAsSystemDriver(bool enabled) {
+    const sp<IGpuService> gpuService = getGpuService();
+    if (!gpuService) {
+        ALOGE("No GPU service");
+        return;
+    }
+    gpuService->toggleAngleAsSystemDriver(enabled);
+}
+
 } // namespace android
diff --git a/libs/graphicsenv/IGpuService.cpp b/libs/graphicsenv/IGpuService.cpp
index fa25c55..4c070ae 100644
--- a/libs/graphicsenv/IGpuService.cpp
+++ b/libs/graphicsenv/IGpuService.cpp
@@ -61,6 +61,14 @@
         remote()->transact(BnGpuService::SET_TARGET_STATS, data, &reply, IBinder::FLAG_ONEWAY);
     }
 
+    void setTargetStatsArray(const std::string& appPackageName, const uint64_t driverVersionCode,
+                             const GpuStatsInfo::Stats stats, const uint64_t* values,
+                             const uint32_t valueCount) override {
+        for (uint32_t i = 0; i < valueCount; i++) {
+            setTargetStats(appPackageName, driverVersionCode, stats, values[i]);
+        }
+    }
+
     void setUpdatableDriverPath(const std::string& driverPath) override {
         Parcel data, reply;
         data.writeInterfaceToken(IGpuService::getInterfaceDescriptor());
@@ -70,6 +78,15 @@
                            IBinder::FLAG_ONEWAY);
     }
 
+    void toggleAngleAsSystemDriver(bool enabled) override {
+        Parcel data, reply;
+        data.writeInterfaceToken(IGpuService::getInterfaceDescriptor());
+        data.writeBool(enabled);
+
+        remote()->transact(BnGpuService::TOGGLE_ANGLE_AS_SYSTEM_DRIVER, data, &reply,
+                           IBinder::FLAG_ONEWAY);
+    }
+
     std::string getUpdatableDriverPath() override {
         Parcel data, reply;
         data.writeInterfaceToken(IGpuService::getInterfaceDescriptor());
@@ -181,6 +198,15 @@
 
             return OK;
         }
+        case TOGGLE_ANGLE_AS_SYSTEM_DRIVER: {
+            CHECK_INTERFACE(IGpuService, data, reply);
+
+            bool enableAngleAsSystemDriver;
+            if ((status = data.readBool(&enableAngleAsSystemDriver)) != OK) return status;
+
+            toggleAngleAsSystemDriver(enableAngleAsSystemDriver);
+            return OK;
+        }
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/libs/graphicsenv/include/graphicsenv/GpuStatsInfo.h b/libs/graphicsenv/include/graphicsenv/GpuStatsInfo.h
index 5b513d2..47607a0 100644
--- a/libs/graphicsenv/include/graphicsenv/GpuStatsInfo.h
+++ b/libs/graphicsenv/include/graphicsenv/GpuStatsInfo.h
@@ -58,6 +58,9 @@
  */
 class GpuStatsAppInfo : public Parcelable {
 public:
+    // This limits the worst case number of extensions to be tracked.
+    static const uint32_t MAX_NUM_EXTENSIONS = 100;
+
     GpuStatsAppInfo() = default;
     GpuStatsAppInfo(const GpuStatsAppInfo&) = default;
     virtual ~GpuStatsAppInfo() = default;
@@ -74,6 +77,13 @@
     bool falsePrerotation = false;
     bool gles1InUse = false;
     bool angleInUse = false;
+    bool createdGlesContext = false;
+    bool createdVulkanDevice = false;
+    bool createdVulkanSwapchain = false;
+    uint32_t vulkanApiVersion = 0;
+    uint64_t vulkanDeviceFeaturesEnabled = 0;
+    std::vector<int32_t> vulkanInstanceExtensions = {};
+    std::vector<int32_t> vulkanDeviceExtensions = {};
 
     std::chrono::time_point<std::chrono::system_clock> lastAccessTime;
 };
@@ -101,6 +111,13 @@
         CPU_VULKAN_IN_USE = 0,
         FALSE_PREROTATION = 1,
         GLES_1_IN_USE = 2,
+        CREATED_GLES_CONTEXT = 3,
+        CREATED_VULKAN_API_VERSION = 4,
+        CREATED_VULKAN_DEVICE = 5,
+        CREATED_VULKAN_SWAPCHAIN = 6,
+        VULKAN_DEVICE_FEATURES_ENABLED = 7,
+        VULKAN_INSTANCE_EXTENSION = 8,
+        VULKAN_DEVICE_EXTENSION = 9,
     };
 
     GpuStatsInfo() = default;
diff --git a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
index 73d3196..f9b234a 100644
--- a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
+++ b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
@@ -71,10 +71,19 @@
                      const std::string& appPackageName, const int32_t vulkanVersion);
     // Set stats for target GpuStatsInfo::Stats type.
     void setTargetStats(const GpuStatsInfo::Stats stats, const uint64_t value = 0);
+    // Set array of stats for target GpuStatsInfo::Stats type.
+    void setTargetStatsArray(const GpuStatsInfo::Stats stats, const uint64_t* values,
+                             const uint32_t valueCount);
     // Set which driver is intended to load.
     void setDriverToLoad(GpuStatsInfo::Driver driver);
     // Set which driver is actually loaded.
     void setDriverLoaded(GpuStatsInfo::Api api, bool isDriverLoaded, int64_t driverLoadingTime);
+    // Set which instance extensions are enabled for the app.
+    void setVulkanInstanceExtensions(uint32_t enabledExtensionCount,
+                                     const char* const* ppEnabledExtensionNames);
+    // Set which device extensions are enabled for the app.
+    void setVulkanDeviceExtensions(uint32_t enabledExtensionCount,
+                                   const char* const* ppEnabledExtensionNames);
 
     /*
      * Api for Vk/GL layer injection.  Presently, drivers enable certain
@@ -91,28 +100,17 @@
     bool shouldUseAngle(std::string appName);
     // Check if this app process should use ANGLE.
     bool shouldUseAngle();
-    // If ANGLE is the system GLES driver
-    bool angleIsSystemDriver();
-    // If should use legacy driver instead of a system ANGLE driver
-    bool shouldForceLegacyDriver();
     // Set a search path for loading ANGLE libraries. The path is a list of
     // directories separated by ':'. A directory can be contained in a zip file
     // (libraries must be stored uncompressed and page aligned); such elements
     // in the search path must have a '!' after the zip filename, e.g.
     //     /system/app/ANGLEPrebuilt/ANGLEPrebuilt.apk!/lib/arm64-v8a
-    void setAngleInfo(const std::string path, const std::string appName,
-                      const bool angleIsSystemDriver, std::string devOptIn,
+    void setAngleInfo(const std::string path, const std::string appName, std::string devOptIn,
                       const std::vector<std::string> eglFeatures);
-    // Set the state so that the legacy driver will be used, and in case ANGLE
-    // is the system driver, provide the name of the legacy driver.
-    void setLegacyDriverInfo(const std::string appName, const bool angleIsSystemDriver,
-                             const std::string legacyDriverName);
     // Get the ANGLE driver namespace.
     android_namespace_t* getAngleNamespace();
     // Get the app name for ANGLE debug message.
     std::string& getAngleAppName();
-    // Get the legacy driver's suffix name.
-    std::string getLegacySuffix();
 
     const std::vector<std::string>& getAngleEglFeatures();
 
@@ -133,6 +131,8 @@
     const std::string& getDebugLayers();
     // Get the debug layers to load.
     const std::string& getDebugLayersGLES();
+    // Set the persist.graphics.egl system property value.
+    void nativeToggleAngleAsSystemDriver(bool enabled);
 
 private:
     enum UseAngle { UNKNOWN, YES, NO };
@@ -167,10 +167,6 @@
     std::string mAngleDeveloperOptIn;
     // ANGLE EGL features;
     std::vector<std::string> mAngleEglFeatures;
-    // ANGLE is System Driver flag.
-    UseAngle mAngleIsSystemDriver = UNKNOWN;
-    // Legacy driver name to use when ANGLE is the system driver.
-    std::string mLegacyDriverSuffix;
     // Use ANGLE flag.
     UseAngle mUseAngle = UNKNOWN;
     // Vulkan debug layers libs.
diff --git a/libs/graphicsenv/include/graphicsenv/IGpuService.h b/libs/graphicsenv/include/graphicsenv/IGpuService.h
index 2d59fa0..e3857d2 100644
--- a/libs/graphicsenv/include/graphicsenv/IGpuService.h
+++ b/libs/graphicsenv/include/graphicsenv/IGpuService.h
@@ -42,10 +42,17 @@
     // set target stats.
     virtual void setTargetStats(const std::string& appPackageName, const uint64_t driverVersionCode,
                                 const GpuStatsInfo::Stats stats, const uint64_t value = 0) = 0;
+    virtual void setTargetStatsArray(const std::string& appPackageName,
+                                     const uint64_t driverVersionCode,
+                                     const GpuStatsInfo::Stats stats, const uint64_t* values,
+                                     const uint32_t valueCount) = 0;
 
     // setter and getter for updatable driver path.
     virtual void setUpdatableDriverPath(const std::string& driverPath) = 0;
     virtual std::string getUpdatableDriverPath() = 0;
+
+    // sets ANGLE as system GLES driver if enabled==true by setting persist.graphics.egl to true.
+    virtual void toggleAngleAsSystemDriver(bool enabled) = 0;
 };
 
 class BnGpuService : public BnInterface<IGpuService> {
@@ -55,6 +62,7 @@
         SET_TARGET_STATS,
         SET_UPDATABLE_DRIVER_PATH,
         GET_UPDATABLE_DRIVER_PATH,
+        TOGGLE_ANGLE_AS_SYSTEM_DRIVER,
         // Always append new enum to the end.
     };
 
diff --git a/libs/gui/Android.bp b/libs/gui/Android.bp
index 0fe6f24..342f132 100644
--- a/libs/gui/Android.bp
+++ b/libs/gui/Android.bp
@@ -66,6 +66,19 @@
     ],
 }
 
+filegroup {
+    name: "android_gui_aidl",
+    srcs: [
+        "android/gui/DisplayInfo.aidl",
+        "android/gui/FocusRequest.aidl",
+        "android/gui/InputApplicationInfo.aidl",
+        "android/gui/IWindowInfosListener.aidl",
+        "android/gui/IWindowInfosReportedListener.aidl",
+        "android/gui/WindowInfo.aidl",
+        "android/gui/WindowInfosUpdate.aidl",
+    ],
+}
+
 cc_library_static {
     name: "libgui_window_info_static",
     vendor_available: true,
@@ -78,9 +91,11 @@
         "android/gui/InputApplicationInfo.aidl",
         "android/gui/IWindowInfosListener.aidl",
         "android/gui/IWindowInfosReportedListener.aidl",
+        "android/gui/WindowInfosUpdate.aidl",
         "android/gui/WindowInfo.aidl",
         "DisplayInfo.cpp",
         "WindowInfo.cpp",
+        "WindowInfosUpdate.cpp",
     ],
 
     shared_libs: [
@@ -114,18 +129,35 @@
     },
 }
 
-filegroup {
+aidl_library {
+    name: "libgui_aidl_hdrs",
+    hdrs: [
+        "android/gui/DisplayInfo.aidl",
+        "android/gui/FocusRequest.aidl",
+        "android/gui/InputApplicationInfo.aidl",
+        "android/gui/IWindowInfosListener.aidl",
+        "android/gui/IWindowInfosReportedListener.aidl",
+        "android/gui/WindowInfo.aidl",
+        "android/gui/WindowInfosUpdate.aidl",
+    ],
+}
+
+aidl_library {
     name: "libgui_aidl",
     srcs: ["aidl/**/*.aidl"],
+    strip_import_prefix: "aidl",
+    deps: ["libgui_aidl_hdrs"],
+}
+
+filegroup {
+    name: "libgui_frame_event_aidl",
+    srcs: ["aidl/android/gui/FrameEvent.aidl"],
     path: "aidl/",
 }
 
 cc_library_static {
     name: "libgui_aidl_static",
     vendor_available: true,
-    srcs: [
-        ":libgui_aidl",
-    ],
 
     shared_libs: [
         "libbinder",
@@ -136,16 +168,22 @@
         "include",
     ],
 
+    include_dirs: [
+        "frameworks/native/include",
+    ],
+
     export_shared_lib_headers: [
         "libbinder",
     ],
 
     static_libs: [
         "libui-types",
+        "libgui_window_info_static",
     ],
 
     aidl: {
         export_aidl_headers: true,
+        libs: ["libgui_aidl"],
     },
 }
 
@@ -178,22 +216,24 @@
         "BitTube.cpp",
         "BLASTBufferQueue.cpp",
         "BufferItemConsumer.cpp",
+        "Choreographer.cpp",
+        "CompositorTiming.cpp",
         "ConsumerBase.cpp",
         "CpuConsumer.cpp",
         "DebugEGLImageTracker.cpp",
         "DisplayEventDispatcher.cpp",
         "DisplayEventReceiver.cpp",
-        "FrameTimelineInfo.cpp",
+        "FenceMonitor.cpp",
         "GLConsumer.cpp",
         "IConsumerListener.cpp",
         "IGraphicBufferConsumer.cpp",
         "IGraphicBufferProducer.cpp",
         "IProducerListener.cpp",
         "ISurfaceComposer.cpp",
-        "ISurfaceComposerClient.cpp",
         "ITransactionCompletedListener.cpp",
         "LayerDebugInfo.cpp",
         "LayerMetadata.cpp",
+        "LayerStatePermissions.cpp",
         "LayerState.cpp",
         "OccupancyTracker.cpp",
         "StreamSplitter.cpp",
@@ -202,7 +242,6 @@
         "SurfaceControl.cpp",
         "SurfaceComposerClient.cpp",
         "SyncFeatures.cpp",
-        "TransactionTracing.cpp",
         "VsyncEventData.cpp",
         "view/Surface.cpp",
         "WindowInfosListenerReporter.cpp",
@@ -223,6 +262,7 @@
 
     export_header_lib_headers: [
         "libgui_aidl_headers",
+        "jni_headers",
     ],
 
     aidl: {
@@ -230,6 +270,7 @@
     },
 
     header_libs: [
+        "jni_headers",
         "libdvr_headers",
         "libgui_aidl_headers",
         "libpdx_headers",
@@ -240,6 +281,10 @@
     lto: {
         thin: true,
     },
+
+    cflags: [
+        "-Wthread-safety",
+    ],
 }
 
 // Used by media codec services exclusively as a static lib for
@@ -260,10 +305,16 @@
     defaults: ["libgui_bufferqueue-defaults"],
 
     srcs: [
+        ":libgui_frame_event_aidl",
         ":inputconstants_aidl",
         ":libgui_bufferqueue_sources",
-        ":libgui_aidl",
     ],
+
+    aidl: {
+        include_dirs: [
+            "frameworks/native/libs/gui",
+        ],
+    },
 }
 
 filegroup {
@@ -294,6 +345,8 @@
 cc_defaults {
     name: "libgui_bufferqueue-defaults",
 
+    defaults: ["android.hardware.graphics.common-ndk_shared"],
+
     cflags: [
         "-Wall",
         "-Werror",
@@ -322,7 +375,6 @@
         "android.hardware.graphics.bufferqueue@2.0",
         "android.hardware.graphics.common@1.1",
         "android.hardware.graphics.common@1.2",
-        "android.hardware.graphics.common-V4-ndk",
         "android.hidl.token@1.0-utils",
         "libbase",
         "libcutils",
@@ -381,6 +433,7 @@
     ],
 
     srcs: [
+        ":libgui_frame_event_aidl",
         "mock/GraphicBufferConsumer.cpp",
         "mock/GraphicBufferProducer.cpp",
     ],
diff --git a/libs/gui/BLASTBufferQueue.cpp b/libs/gui/BLASTBufferQueue.cpp
index 000f458..5c324b2 100644
--- a/libs/gui/BLASTBufferQueue.cpp
+++ b/libs/gui/BLASTBufferQueue.cpp
@@ -20,6 +20,7 @@
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 //#define LOG_NDEBUG 0
 
+#include <cutils/atomic.h>
 #include <gui/BLASTBufferQueue.h>
 #include <gui/BufferItemConsumer.h>
 #include <gui/BufferQueueConsumer.h>
@@ -33,7 +34,9 @@
 #include <utils/Trace.h>
 
 #include <private/gui/ComposerService.h>
+#include <private/gui/ComposerServiceAIDL.h>
 
+#include <android-base/thread_annotations.h>
 #include <chrono>
 
 using namespace std::chrono_literals;
@@ -62,6 +65,10 @@
     ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
                   mNumAcquired, ##__VA_ARGS__)
 
+#define UNIQUE_LOCK_WITH_ASSERTION(mutex) \
+    std::unique_lock _lock{mutex};        \
+    base::ScopedLockAssertion assumeLocked(mutex);
+
 void BLASTBufferItemConsumer::onDisconnect() {
     Mutex::Autolock lock(mMutex);
     mPreviouslyConnected = mCurrentlyConnected;
@@ -156,30 +163,30 @@
                                                       GraphicBuffer::USAGE_HW_COMPOSER |
                                                               GraphicBuffer::USAGE_HW_TEXTURE,
                                                       1, false, this);
-    static int32_t id = 0;
-    mName = name + "#" + std::to_string(id);
-    auto consumerName = mName + "(BLAST Consumer)" + std::to_string(id);
-    mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(id);
-    id++;
+    static std::atomic<uint32_t> nextId = 0;
+    mProducerId = nextId++;
+    mName = name + "#" + std::to_string(mProducerId);
+    auto consumerName = mName + "(BLAST Consumer)" + std::to_string(mProducerId);
+    mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(mProducerId);
     mBufferItemConsumer->setName(String8(consumerName.c_str()));
     mBufferItemConsumer->setFrameAvailableListener(this);
-    mBufferItemConsumer->setBufferFreedListener(this);
 
-    ComposerService::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
+    ComposerServiceAIDL::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
     mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
     mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
     mNumAcquired = 0;
     mNumFrameAvailable = 0;
 
     TransactionCompletedListener::getInstance()->addQueueStallListener(
-        [&]() {
-            std::function<void(bool)> callbackCopy;
-            {
-                std::unique_lock _lock{mMutex};
-                callbackCopy = mTransactionHangCallback;
-            }
-            if (callbackCopy) callbackCopy(true);
-        }, this);
+            [&](const std::string& reason) {
+                std::function<void(const std::string&)> callbackCopy;
+                {
+                    std::unique_lock _lock{mMutex};
+                    callbackCopy = mTransactionHangCallback;
+                }
+                if (callbackCopy) callbackCopy(reason);
+            },
+            this);
 
     BQA_LOGV("BLASTBufferQueue created");
 }
@@ -211,7 +218,7 @@
                               int32_t format) {
     LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
 
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     if (mFormat != format) {
         mFormat = format;
         mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
@@ -281,7 +288,7 @@
                                                     const sp<Fence>& /*presentFence*/,
                                                     const std::vector<SurfaceControlStats>& stats) {
     {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         BBQ_TRACE();
         BQA_LOGV("transactionCommittedCallback");
         if (!mSurfaceControlsWithPendingCallback.empty()) {
@@ -329,7 +336,7 @@
 void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
                                            const std::vector<SurfaceControlStats>& stats) {
     {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         BBQ_TRACE();
         BQA_LOGV("transactionCallback");
 
@@ -339,9 +346,11 @@
             std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
             if (statsOptional) {
                 SurfaceControlStats stat = *statsOptional;
-                mTransformHint = stat.transformHint;
-                mBufferItemConsumer->setTransformHint(mTransformHint);
-                BQA_LOGV("updated mTransformHint=%d", mTransformHint);
+                if (stat.transformHint) {
+                    mTransformHint = *stat.transformHint;
+                    mBufferItemConsumer->setTransformHint(mTransformHint);
+                    BQA_LOGV("updated mTransformHint=%d", mTransformHint);
+                }
                 // Update frametime stamps if the frame was latched and presented, indicated by a
                 // valid latch time.
                 if (stat.latchTime > 0) {
@@ -408,9 +417,8 @@
 void BLASTBufferQueue::releaseBufferCallback(
         const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
         std::optional<uint32_t> currentMaxAcquiredBufferCount) {
+    std::lock_guard _lock{mMutex};
     BBQ_TRACE();
-
-    std::unique_lock _lock{mMutex};
     releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
                                 false /* fakeRelease */);
 }
@@ -425,10 +433,8 @@
     // to the buffer queue. This will prevent higher latency when we are running
     // on a lower refresh rate than the max supported. We only do that for EGL
     // clients as others don't care about latency
-    const bool isEGL = [&] {
-        const auto it = mSubmitted.find(id);
-        return it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
-    }();
+    const auto it = mSubmitted.find(id);
+    const bool isEGL = it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
 
     if (currentMaxAcquiredBufferCount) {
         mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
@@ -485,6 +491,17 @@
     mSyncedFrameNumbers.erase(callbackId.framenumber);
 }
 
+static ui::Size getBufferSize(const BufferItem& item) {
+    uint32_t bufWidth = item.mGraphicBuffer->getWidth();
+    uint32_t bufHeight = item.mGraphicBuffer->getHeight();
+
+    // Take the buffer's orientation into account
+    if (item.mTransform & ui::Transform::ROT_90) {
+        std::swap(bufWidth, bufHeight);
+    }
+    return ui::Size(bufWidth, bufHeight);
+}
+
 status_t BLASTBufferQueue::acquireNextBufferLocked(
         const std::optional<SurfaceComposerClient::Transaction*> transaction) {
     // Check if we have frames available and we have not acquired the maximum number of buffers.
@@ -562,7 +579,13 @@
     // Ensure BLASTBufferQueue stays alive until we receive the transaction complete callback.
     incStrong((void*)transactionCallbackThunk);
 
-    mSize = mRequestedSize;
+    // Only update mSize for destination bounds if the incoming buffer matches the requested size.
+    // Otherwise, it could cause stretching since the destination bounds will update before the
+    // buffer with the new size is acquired.
+    if (mRequestedSize == getBufferSize(bufferItem) ||
+        bufferItem.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
+        mSize = mRequestedSize;
+    }
     Rect crop = computeCrop(bufferItem);
     mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
                            bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
@@ -572,7 +595,8 @@
             std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
                       std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
     sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
-    t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, releaseBufferCallback);
+    t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, mProducerId,
+                 releaseBufferCallback);
     t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
     t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
     t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
@@ -617,12 +641,12 @@
     }
 
     {
-        std::unique_lock _lock{mTimestampMutex};
+        std::lock_guard _lock{mTimestampMutex};
         auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
         if (dequeueTime != mDequeueTimestamps.end()) {
             Parcel p;
             p.writeInt64(dequeueTime->second);
-            t->setMetadata(mSurfaceControl, METADATA_DEQUEUE_TIME, p);
+            t->setMetadata(mSurfaceControl, gui::METADATA_DEQUEUE_TIME, p);
             mDequeueTimestamps.erase(dequeueTime);
         }
     }
@@ -656,6 +680,7 @@
 }
 
 void BLASTBufferQueue::acquireAndReleaseBuffer() {
+    BBQ_TRACE();
     BufferItem bufferItem;
     status_t status =
             mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
@@ -673,10 +698,10 @@
     SurfaceComposerClient::Transaction* prevTransaction = nullptr;
 
     {
-        std::unique_lock _lock{mMutex};
+        UNIQUE_LOCK_WITH_ASSERTION(mMutex);
         BBQ_TRACE();
-
         bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
+
         const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
         BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
 
@@ -767,44 +792,33 @@
 }
 
 void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
-    std::unique_lock _lock{mTimestampMutex};
+    std::lock_guard _lock{mTimestampMutex};
     mDequeueTimestamps[bufferId] = systemTime();
 };
 
 void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
-    std::unique_lock _lock{mTimestampMutex};
+    std::lock_guard _lock{mTimestampMutex};
     mDequeueTimestamps.erase(bufferId);
 };
 
-void BLASTBufferQueue::syncNextTransaction(
+bool BLASTBufferQueue::syncNextTransaction(
         std::function<void(SurfaceComposerClient::Transaction*)> callback,
         bool acquireSingleBuffer) {
+    LOG_ALWAYS_FATAL_IF(!callback,
+                        "BLASTBufferQueue: callback passed in to syncNextTransaction must not be "
+                        "NULL");
+
+    std::lock_guard _lock{mMutex};
     BBQ_TRACE();
-
-    std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
-    SurfaceComposerClient::Transaction* prevTransaction = nullptr;
-
-    {
-        std::lock_guard _lock{mMutex};
-        // We're about to overwrite the previous call so we should invoke that callback
-        // immediately.
-        if (mTransactionReadyCallback) {
-            prevCallback = mTransactionReadyCallback;
-            prevTransaction = mSyncTransaction;
-        }
-
-        mTransactionReadyCallback = callback;
-        if (callback) {
-            mSyncTransaction = new SurfaceComposerClient::Transaction();
-        } else {
-            mSyncTransaction = nullptr;
-        }
-        mAcquireSingleBuffer = mTransactionReadyCallback ? acquireSingleBuffer : true;
+    if (mTransactionReadyCallback) {
+        ALOGW("Attempting to overwrite transaction callback in syncNextTransaction");
+        return false;
     }
 
-    if (prevCallback) {
-        prevCallback(prevTransaction);
-    }
+    mTransactionReadyCallback = callback;
+    mSyncTransaction = new SurfaceComposerClient::Transaction();
+    mAcquireSingleBuffer = acquireSingleBuffer;
+    return true;
 }
 
 void BLASTBufferQueue::stopContinuousSyncTransaction() {
@@ -812,34 +826,42 @@
     SurfaceComposerClient::Transaction* prevTransaction = nullptr;
     {
         std::lock_guard _lock{mMutex};
-        bool invokeCallback = mTransactionReadyCallback && !mAcquireSingleBuffer;
-        if (invokeCallback) {
-            prevCallback = mTransactionReadyCallback;
-            prevTransaction = mSyncTransaction;
+        if (mAcquireSingleBuffer || !mTransactionReadyCallback) {
+            ALOGW("Attempting to stop continuous sync when none are active");
+            return;
         }
+
+        prevCallback = mTransactionReadyCallback;
+        prevTransaction = mSyncTransaction;
+
         mTransactionReadyCallback = nullptr;
         mSyncTransaction = nullptr;
         mAcquireSingleBuffer = true;
     }
+
     if (prevCallback) {
         prevCallback(prevTransaction);
     }
 }
 
+void BLASTBufferQueue::clearSyncTransaction() {
+    std::lock_guard _lock{mMutex};
+    if (!mAcquireSingleBuffer) {
+        ALOGW("Attempting to clear sync transaction when none are active");
+        return;
+    }
+
+    mTransactionReadyCallback = nullptr;
+    mSyncTransaction = nullptr;
+}
+
 bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
     if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
         // Only reject buffers if scaling mode is freeze.
         return false;
     }
 
-    uint32_t bufWidth = item.mGraphicBuffer->getWidth();
-    uint32_t bufHeight = item.mGraphicBuffer->getHeight();
-
-    // Take the buffer's orientation into account
-    if (item.mTransform & ui::Transform::ROT_90) {
-        std::swap(bufWidth, bufHeight);
-    }
-    ui::Size bufferSize(bufWidth, bufHeight);
+    ui::Size bufferSize = getBufferSize(item);
     if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
         return false;
     }
@@ -851,8 +873,8 @@
 class BBQSurface : public Surface {
 private:
     std::mutex mMutex;
-    sp<BLASTBufferQueue> mBbq;
-    bool mDestroyed = false;
+    sp<BLASTBufferQueue> mBbq GUARDED_BY(mMutex);
+    bool mDestroyed GUARDED_BY(mMutex) = false;
 
 public:
     BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
@@ -873,7 +895,7 @@
 
     status_t setFrameRate(float frameRate, int8_t compatibility,
                           int8_t changeFrameRateStrategy) override {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         if (mDestroyed) {
             return DEAD_OBJECT;
         }
@@ -886,7 +908,7 @@
 
     status_t setFrameTimelineInfo(uint64_t frameNumber,
                                   const FrameTimelineInfo& frameTimelineInfo) override {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         if (mDestroyed) {
             return DEAD_OBJECT;
         }
@@ -896,7 +918,7 @@
     void destroy() override {
         Surface::destroy();
 
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         mDestroyed = true;
         mBbq = nullptr;
     }
@@ -906,7 +928,7 @@
 // no timing issues.
 status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
                                         bool shouldBeSeamless) {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     SurfaceComposerClient::Transaction t;
 
     return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
@@ -916,20 +938,20 @@
                                                 const FrameTimelineInfo& frameTimelineInfo) {
     ATRACE_FORMAT("%s(%s) frameNumber: %" PRIu64 " vsyncId: %" PRId64, __func__, mName.c_str(),
                   frameNumber, frameTimelineInfo.vsyncId);
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     mPendingFrameTimelines.push({frameNumber, frameTimelineInfo});
     return OK;
 }
 
 void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     SurfaceComposerClient::Transaction t;
 
     t.setSidebandStream(mSurfaceControl, stream).apply();
 }
 
 sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     sp<IBinder> scHandle = nullptr;
     if (includeSurfaceControlHandle && mSurfaceControl) {
         scHandle = mSurfaceControl->getHandle();
@@ -1154,6 +1176,7 @@
 }
 
 uint32_t BLASTBufferQueue::getLastTransformHint() const {
+    std::lock_guard _lock{mMutex};
     if (mSurfaceControl != nullptr) {
         return mSurfaceControl->getTransformHint();
     } else {
@@ -1162,62 +1185,18 @@
 }
 
 uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     return mLastAcquiredFrameNumber;
 }
 
-void BLASTBufferQueue::abandon() {
-    std::unique_lock _lock{mMutex};
-    // flush out the shadow queue
-    while (mNumFrameAvailable > 0) {
-        acquireAndReleaseBuffer();
-    }
-
-    // Clear submitted buffer states
-    mNumAcquired = 0;
-    mSubmitted.clear();
-    mPendingRelease.clear();
-
-    if (!mPendingTransactions.empty()) {
-        BQA_LOGD("Applying pending transactions on abandon %d",
-                 static_cast<uint32_t>(mPendingTransactions.size()));
-        SurfaceComposerClient::Transaction t;
-        mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
-        // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
-        t.setApplyToken(mApplyToken).apply(false, true);
-    }
-
-    // Clear sync states
-    if (!mSyncedFrameNumbers.empty()) {
-        BQA_LOGD("mSyncedFrameNumbers cleared");
-        mSyncedFrameNumbers.clear();
-    }
-
-    if (mSyncTransaction != nullptr) {
-        BQA_LOGD("mSyncTransaction cleared mAcquireSingleBuffer=%s",
-                 mAcquireSingleBuffer ? "true" : "false");
-        mSyncTransaction = nullptr;
-        mAcquireSingleBuffer = false;
-    }
-
-    // abandon buffer queue
-    if (mBufferItemConsumer != nullptr) {
-        mBufferItemConsumer->abandon();
-        mBufferItemConsumer->setFrameAvailableListener(nullptr);
-        mBufferItemConsumer->setBufferFreedListener(nullptr);
-    }
-    mBufferItemConsumer = nullptr;
-    mConsumer = nullptr;
-    mProducer = nullptr;
-}
-
 bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
 }
 
-void BLASTBufferQueue::setTransactionHangCallback(std::function<void(bool)> callback) {
-    std::unique_lock _lock{mMutex};
+void BLASTBufferQueue::setTransactionHangCallback(
+        std::function<void(const std::string&)> callback) {
+    std::lock_guard _lock{mMutex};
     mTransactionHangCallback = callback;
 }
 
diff --git a/libs/gui/BufferQueueConsumer.cpp b/libs/gui/BufferQueueConsumer.cpp
index db51356..5b34ba1 100644
--- a/libs/gui/BufferQueueConsumer.cpp
+++ b/libs/gui/BufferQueueConsumer.cpp
@@ -33,6 +33,7 @@
 #include <gui/BufferQueueCore.h>
 #include <gui/IConsumerListener.h>
 #include <gui/IProducerListener.h>
+#include <gui/TraceUtils.h>
 
 #include <private/gui/BufferQueueThreadState.h>
 #ifndef __ANDROID_VNDK__
@@ -645,7 +646,7 @@
 
 status_t BufferQueueConsumer::setMaxAcquiredBufferCount(
         int maxAcquiredBuffers) {
-    ATRACE_CALL();
+    ATRACE_FORMAT("%s(%d)", __func__, maxAcquiredBuffers);
 
     if (maxAcquiredBuffers < 1 ||
             maxAcquiredBuffers > BufferQueueCore::MAX_MAX_ACQUIRED_BUFFERS) {
diff --git a/libs/gui/BufferQueueProducer.cpp b/libs/gui/BufferQueueProducer.cpp
index 36c2e58..cf5ad7b 100644
--- a/libs/gui/BufferQueueProducer.cpp
+++ b/libs/gui/BufferQueueProducer.cpp
@@ -35,6 +35,7 @@
 #include <gui/GLConsumer.h>
 #include <gui/IConsumerListener.h>
 #include <gui/IProducerListener.h>
+#include <gui/TraceUtils.h>
 #include <private/gui/BufferQueueThreadState.h>
 
 #include <utils/Log.h>
@@ -125,7 +126,7 @@
 
 status_t BufferQueueProducer::setMaxDequeuedBufferCount(int maxDequeuedBuffers,
                                                         int* maxBufferCount) {
-    ATRACE_CALL();
+    ATRACE_FORMAT("%s(%d)", __func__, maxDequeuedBuffers);
     BQ_LOGV("setMaxDequeuedBufferCount: maxDequeuedBuffers = %d",
             maxDequeuedBuffers);
 
@@ -502,6 +503,20 @@
         if ((buffer == nullptr) ||
                 buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage))
         {
+            if (CC_UNLIKELY(ATRACE_ENABLED())) {
+                if (buffer == nullptr) {
+                    ATRACE_FORMAT_INSTANT("%s buffer reallocation: null", mConsumerName.string());
+                } else {
+                    ATRACE_FORMAT_INSTANT("%s buffer reallocation actual %dx%d format:%d "
+                                          "layerCount:%d "
+                                          "usage:%d requested: %dx%d format:%d layerCount:%d "
+                                          "usage:%d ",
+                                          mConsumerName.string(), width, height, format,
+                                          BQ_LAYER_COUNT, usage, buffer->getWidth(),
+                                          buffer->getHeight(), buffer->getPixelFormat(),
+                                          buffer->getLayerCount(), buffer->getUsage());
+                }
+            }
             mSlots[found].mAcquireCalled = false;
             mSlots[found].mGraphicBuffer = nullptr;
             mSlots[found].mRequestBufferCalled = false;
diff --git a/libs/gui/Choreographer.cpp b/libs/gui/Choreographer.cpp
new file mode 100644
index 0000000..46fb068
--- /dev/null
+++ b/libs/gui/Choreographer.cpp
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <gui/Choreographer.h>
+#include <gui/TraceUtils.h>
+#include <jni.h>
+
+#undef LOG_TAG
+#define LOG_TAG "AChoreographer"
+
+namespace {
+struct {
+    // Global JVM that is provided by zygote
+    JavaVM* jvm = nullptr;
+    struct {
+        jclass clazz;
+        jmethodID getInstance;
+        jmethodID registerNativeChoreographerForRefreshRateCallbacks;
+        jmethodID unregisterNativeChoreographerForRefreshRateCallbacks;
+    } displayManagerGlobal;
+} gJni;
+
+// Gets the JNIEnv* for this thread, and performs one-off initialization if we
+// have never retrieved a JNIEnv* pointer before.
+JNIEnv* getJniEnv() {
+    if (gJni.jvm == nullptr) {
+        ALOGW("AChoreographer: No JVM provided!");
+        return nullptr;
+    }
+
+    JNIEnv* env = nullptr;
+    if (gJni.jvm->GetEnv((void**)&env, JNI_VERSION_1_4) != JNI_OK) {
+        ALOGD("Attaching thread to JVM for AChoreographer");
+        JavaVMAttachArgs args = {JNI_VERSION_1_4, "AChoreographer_env", NULL};
+        jint attachResult = gJni.jvm->AttachCurrentThreadAsDaemon(&env, (void*)&args);
+        if (attachResult != JNI_OK) {
+            ALOGE("Unable to attach thread. Error: %d", attachResult);
+            return nullptr;
+        }
+    }
+    if (env == nullptr) {
+        ALOGW("AChoreographer: No JNI env available!");
+    }
+    return env;
+}
+
+inline const char* toString(bool value) {
+    return value ? "true" : "false";
+}
+} // namespace
+
+namespace android {
+
+Choreographer::Context Choreographer::gChoreographers;
+
+static thread_local Choreographer* gChoreographer;
+
+void Choreographer::initJVM(JNIEnv* env) {
+    env->GetJavaVM(&gJni.jvm);
+    // Now we need to find the java classes.
+    jclass dmgClass = env->FindClass("android/hardware/display/DisplayManagerGlobal");
+    gJni.displayManagerGlobal.clazz = static_cast<jclass>(env->NewGlobalRef(dmgClass));
+    gJni.displayManagerGlobal.getInstance =
+            env->GetStaticMethodID(dmgClass, "getInstance",
+                                   "()Landroid/hardware/display/DisplayManagerGlobal;");
+    gJni.displayManagerGlobal.registerNativeChoreographerForRefreshRateCallbacks =
+            env->GetMethodID(dmgClass, "registerNativeChoreographerForRefreshRateCallbacks", "()V");
+    gJni.displayManagerGlobal.unregisterNativeChoreographerForRefreshRateCallbacks =
+            env->GetMethodID(dmgClass, "unregisterNativeChoreographerForRefreshRateCallbacks",
+                             "()V");
+}
+
+Choreographer* Choreographer::getForThread() {
+    if (gChoreographer == nullptr) {
+        sp<Looper> looper = Looper::getForThread();
+        if (!looper.get()) {
+            ALOGW("No looper prepared for thread");
+            return nullptr;
+        }
+        gChoreographer = new Choreographer(looper);
+        status_t result = gChoreographer->initialize();
+        if (result != OK) {
+            ALOGW("Failed to initialize");
+            return nullptr;
+        }
+    }
+    return gChoreographer;
+}
+
+Choreographer::Choreographer(const sp<Looper>& looper, const sp<IBinder>& layerHandle)
+      : DisplayEventDispatcher(looper, gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp, {},
+                               layerHandle),
+        mLooper(looper),
+        mThreadId(std::this_thread::get_id()) {
+    std::lock_guard<std::mutex> _l(gChoreographers.lock);
+    gChoreographers.ptrs.push_back(this);
+}
+
+Choreographer::~Choreographer() {
+    std::lock_guard<std::mutex> _l(gChoreographers.lock);
+    gChoreographers.ptrs.erase(std::remove_if(gChoreographers.ptrs.begin(),
+                                              gChoreographers.ptrs.end(),
+                                              [=](Choreographer* c) { return c == this; }),
+                               gChoreographers.ptrs.end());
+    // Only poke DisplayManagerGlobal to unregister if we previously registered
+    // callbacks.
+    if (gChoreographers.ptrs.empty() && gChoreographers.registeredToDisplayManager) {
+        gChoreographers.registeredToDisplayManager = false;
+        JNIEnv* env = getJniEnv();
+        if (env == nullptr) {
+            ALOGW("JNI environment is unavailable, skipping choreographer cleanup");
+            return;
+        }
+        jobject dmg = env->CallStaticObjectMethod(gJni.displayManagerGlobal.clazz,
+                                                  gJni.displayManagerGlobal.getInstance);
+        if (dmg == nullptr) {
+            ALOGW("DMS is not initialized yet, skipping choreographer cleanup");
+        } else {
+            env->CallVoidMethod(dmg,
+                                gJni.displayManagerGlobal
+                                        .unregisterNativeChoreographerForRefreshRateCallbacks);
+            env->DeleteLocalRef(dmg);
+        }
+    }
+}
+
+void Choreographer::postFrameCallbackDelayed(AChoreographer_frameCallback cb,
+                                             AChoreographer_frameCallback64 cb64,
+                                             AChoreographer_vsyncCallback vsyncCallback, void* data,
+                                             nsecs_t delay) {
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    FrameCallback callback{cb, cb64, vsyncCallback, data, now + delay};
+    {
+        std::lock_guard<std::mutex> _l{mLock};
+        mFrameCallbacks.push(callback);
+    }
+    if (callback.dueTime <= now) {
+        if (std::this_thread::get_id() != mThreadId) {
+            if (mLooper != nullptr) {
+                Message m{MSG_SCHEDULE_VSYNC};
+                mLooper->sendMessage(this, m);
+            } else {
+                scheduleVsync();
+            }
+        } else {
+            scheduleVsync();
+        }
+    } else {
+        if (mLooper != nullptr) {
+            Message m{MSG_SCHEDULE_CALLBACKS};
+            mLooper->sendMessageDelayed(delay, this, m);
+        } else {
+            scheduleCallbacks();
+        }
+    }
+}
+
+void Choreographer::registerRefreshRateCallback(AChoreographer_refreshRateCallback cb, void* data) {
+    std::lock_guard<std::mutex> _l{mLock};
+    for (const auto& callback : mRefreshRateCallbacks) {
+        // Don't re-add callbacks.
+        if (cb == callback.callback && data == callback.data) {
+            return;
+        }
+    }
+    mRefreshRateCallbacks.emplace_back(
+            RefreshRateCallback{.callback = cb, .data = data, .firstCallbackFired = false});
+    bool needsRegistration = false;
+    {
+        std::lock_guard<std::mutex> _l2(gChoreographers.lock);
+        needsRegistration = !gChoreographers.registeredToDisplayManager;
+    }
+    if (needsRegistration) {
+        JNIEnv* env = getJniEnv();
+        if (env == nullptr) {
+            ALOGW("JNI environment is unavailable, skipping registration");
+            return;
+        }
+        jobject dmg = env->CallStaticObjectMethod(gJni.displayManagerGlobal.clazz,
+                                                  gJni.displayManagerGlobal.getInstance);
+        if (dmg == nullptr) {
+            ALOGW("DMS is not initialized yet: skipping registration");
+            return;
+        } else {
+            env->CallVoidMethod(dmg,
+                                gJni.displayManagerGlobal
+                                        .registerNativeChoreographerForRefreshRateCallbacks,
+                                reinterpret_cast<int64_t>(this));
+            env->DeleteLocalRef(dmg);
+            {
+                std::lock_guard<std::mutex> _l2(gChoreographers.lock);
+                gChoreographers.registeredToDisplayManager = true;
+            }
+        }
+    } else {
+        scheduleLatestConfigRequest();
+    }
+}
+
+void Choreographer::unregisterRefreshRateCallback(AChoreographer_refreshRateCallback cb,
+                                                  void* data) {
+    std::lock_guard<std::mutex> _l{mLock};
+    mRefreshRateCallbacks.erase(std::remove_if(mRefreshRateCallbacks.begin(),
+                                               mRefreshRateCallbacks.end(),
+                                               [&](const RefreshRateCallback& callback) {
+                                                   return cb == callback.callback &&
+                                                           data == callback.data;
+                                               }),
+                                mRefreshRateCallbacks.end());
+}
+
+void Choreographer::scheduleLatestConfigRequest() {
+    if (mLooper != nullptr) {
+        Message m{MSG_HANDLE_REFRESH_RATE_UPDATES};
+        mLooper->sendMessage(this, m);
+    } else {
+        // If the looper thread is detached from Choreographer, then refresh rate
+        // changes will be handled in AChoreographer_handlePendingEvents, so we
+        // need to wake up the looper thread by writing to the write-end of the
+        // socket the looper is listening on.
+        // Fortunately, these events are small so sending packets across the
+        // socket should be atomic across processes.
+        DisplayEventReceiver::Event event;
+        event.header =
+                DisplayEventReceiver::Event::Header{DisplayEventReceiver::DISPLAY_EVENT_NULL,
+                                                    PhysicalDisplayId::fromPort(0), systemTime()};
+        injectEvent(event);
+    }
+}
+
+void Choreographer::scheduleCallbacks() {
+    const nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    nsecs_t dueTime;
+    {
+        std::lock_guard<std::mutex> _l{mLock};
+        // If there are no pending callbacks then don't schedule a vsync
+        if (mFrameCallbacks.empty()) {
+            return;
+        }
+        dueTime = mFrameCallbacks.top().dueTime;
+    }
+
+    if (dueTime <= now) {
+        ALOGV("choreographer %p ~ scheduling vsync", this);
+        scheduleVsync();
+        return;
+    }
+}
+
+void Choreographer::handleRefreshRateUpdates() {
+    std::vector<RefreshRateCallback> callbacks{};
+    const nsecs_t pendingPeriod = gChoreographers.mLastKnownVsync.load();
+    const nsecs_t lastPeriod = mLatestVsyncPeriod;
+    if (pendingPeriod > 0) {
+        mLatestVsyncPeriod = pendingPeriod;
+    }
+    {
+        std::lock_guard<std::mutex> _l{mLock};
+        for (auto& cb : mRefreshRateCallbacks) {
+            callbacks.push_back(cb);
+            cb.firstCallbackFired = true;
+        }
+    }
+
+    for (auto& cb : callbacks) {
+        if (!cb.firstCallbackFired || (pendingPeriod > 0 && pendingPeriod != lastPeriod)) {
+            cb.callback(pendingPeriod, cb.data);
+        }
+    }
+}
+
+void Choreographer::dispatchVsync(nsecs_t timestamp, PhysicalDisplayId, uint32_t,
+                                  VsyncEventData vsyncEventData) {
+    std::vector<FrameCallback> callbacks{};
+    {
+        std::lock_guard<std::mutex> _l{mLock};
+        nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+        while (!mFrameCallbacks.empty() && mFrameCallbacks.top().dueTime < now) {
+            callbacks.push_back(mFrameCallbacks.top());
+            mFrameCallbacks.pop();
+        }
+    }
+    mLastVsyncEventData = vsyncEventData;
+    for (const auto& cb : callbacks) {
+        if (cb.vsyncCallback != nullptr) {
+            ATRACE_FORMAT("AChoreographer_vsyncCallback %" PRId64,
+                          vsyncEventData.preferredVsyncId());
+            const ChoreographerFrameCallbackDataImpl frameCallbackData =
+                    createFrameCallbackData(timestamp);
+            registerStartTime();
+            mInCallback = true;
+            cb.vsyncCallback(reinterpret_cast<const AChoreographerFrameCallbackData*>(
+                                     &frameCallbackData),
+                             cb.data);
+            mInCallback = false;
+        } else if (cb.callback64 != nullptr) {
+            ATRACE_FORMAT("AChoreographer_frameCallback64");
+            cb.callback64(timestamp, cb.data);
+        } else if (cb.callback != nullptr) {
+            ATRACE_FORMAT("AChoreographer_frameCallback");
+            cb.callback(timestamp, cb.data);
+        }
+    }
+}
+
+void Choreographer::dispatchHotplug(nsecs_t, PhysicalDisplayId displayId, bool connected) {
+    ALOGV("choreographer %p ~ received hotplug event (displayId=%s, connected=%s), ignoring.", this,
+          to_string(displayId).c_str(), toString(connected));
+}
+
+void Choreographer::dispatchModeChanged(nsecs_t, PhysicalDisplayId, int32_t, nsecs_t) {
+    LOG_ALWAYS_FATAL("dispatchModeChanged was called but was never registered");
+}
+
+void Choreographer::dispatchFrameRateOverrides(nsecs_t, PhysicalDisplayId,
+                                               std::vector<FrameRateOverride>) {
+    LOG_ALWAYS_FATAL("dispatchFrameRateOverrides was called but was never registered");
+}
+
+void Choreographer::dispatchNullEvent(nsecs_t, PhysicalDisplayId) {
+    ALOGV("choreographer %p ~ received null event.", this);
+    handleRefreshRateUpdates();
+}
+
+void Choreographer::handleMessage(const Message& message) {
+    switch (message.what) {
+        case MSG_SCHEDULE_CALLBACKS:
+            scheduleCallbacks();
+            break;
+        case MSG_SCHEDULE_VSYNC:
+            scheduleVsync();
+            break;
+        case MSG_HANDLE_REFRESH_RATE_UPDATES:
+            handleRefreshRateUpdates();
+            break;
+    }
+}
+
+int64_t Choreographer::getFrameInterval() const {
+    return mLastVsyncEventData.frameInterval;
+}
+
+bool Choreographer::inCallback() const {
+    return mInCallback;
+}
+
+ChoreographerFrameCallbackDataImpl Choreographer::createFrameCallbackData(nsecs_t timestamp) const {
+    return {.frameTimeNanos = timestamp,
+            .vsyncEventData = mLastVsyncEventData,
+            .choreographer = this};
+}
+
+void Choreographer::registerStartTime() const {
+    std::scoped_lock _l(gChoreographers.lock);
+    for (VsyncEventData::FrameTimeline frameTimeline : mLastVsyncEventData.frameTimelines) {
+        while (gChoreographers.startTimes.size() >= kMaxStartTimes) {
+            gChoreographers.startTimes.erase(gChoreographers.startTimes.begin());
+        }
+        gChoreographers.startTimes[frameTimeline.vsyncId] = systemTime(SYSTEM_TIME_MONOTONIC);
+    }
+}
+
+void Choreographer::signalRefreshRateCallbacks(nsecs_t vsyncPeriod) {
+    std::lock_guard<std::mutex> _l(gChoreographers.lock);
+    gChoreographers.mLastKnownVsync.store(vsyncPeriod);
+    for (auto c : gChoreographers.ptrs) {
+        c->scheduleLatestConfigRequest();
+    }
+}
+
+int64_t Choreographer::getStartTimeNanosForVsyncId(AVsyncId vsyncId) {
+    std::scoped_lock _l(gChoreographers.lock);
+    const auto iter = gChoreographers.startTimes.find(vsyncId);
+    if (iter == gChoreographers.startTimes.end()) {
+        ALOGW("Start time was not found for vsync id: %" PRId64, vsyncId);
+        return 0;
+    }
+    return iter->second;
+}
+
+} // namespace android
\ No newline at end of file
diff --git a/libs/gui/CompositorTiming.cpp b/libs/gui/CompositorTiming.cpp
new file mode 100644
index 0000000..50f7b25
--- /dev/null
+++ b/libs/gui/CompositorTiming.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CompositorTiming"
+
+#include <cutils/compiler.h>
+#include <gui/CompositorTiming.h>
+#include <log/log.h>
+
+namespace android::gui {
+
+CompositorTiming::CompositorTiming(nsecs_t vsyncDeadline, nsecs_t vsyncPeriod, nsecs_t vsyncPhase,
+                                   nsecs_t presentLatency) {
+    if (CC_UNLIKELY(vsyncPeriod <= 0)) {
+        ALOGE("Invalid VSYNC period");
+        return;
+    }
+
+    const nsecs_t idealLatency = [=] {
+        // Modulo rounds toward 0 not INT64_MIN, so treat signs separately.
+        if (vsyncPhase < 0) return -vsyncPhase % vsyncPeriod;
+
+        const nsecs_t latency = (vsyncPeriod - vsyncPhase) % vsyncPeriod;
+        return latency > 0 ? latency : vsyncPeriod;
+    }();
+
+    // Snap the latency to a value that removes scheduling jitter from the composite and present
+    // times, which often have >1ms of jitter. Reducing jitter is important if an app attempts to
+    // extrapolate something like user input to an accurate present time. Snapping also allows an
+    // app to precisely calculate vsyncPhase with (presentLatency % interval).
+    const nsecs_t bias = vsyncPeriod / 2;
+    const nsecs_t extraVsyncs = (presentLatency - idealLatency + bias) / vsyncPeriod;
+    const nsecs_t snappedLatency =
+            extraVsyncs > 0 ? idealLatency + extraVsyncs * vsyncPeriod : idealLatency;
+
+    this->deadline = vsyncDeadline - idealLatency;
+    this->interval = vsyncPeriod;
+    this->presentLatency = snappedLatency;
+}
+
+} // namespace android::gui
diff --git a/libs/gui/DisplayEventDispatcher.cpp b/libs/gui/DisplayEventDispatcher.cpp
index dfdce20..8a88377 100644
--- a/libs/gui/DisplayEventDispatcher.cpp
+++ b/libs/gui/DisplayEventDispatcher.cpp
@@ -35,11 +35,15 @@
 
 static constexpr nsecs_t WAITING_FOR_VSYNC_TIMEOUT = ms2ns(300);
 
-DisplayEventDispatcher::DisplayEventDispatcher(
-        const sp<Looper>& looper, ISurfaceComposer::VsyncSource vsyncSource,
-        ISurfaceComposer::EventRegistrationFlags eventRegistration)
-      : mLooper(looper), mReceiver(vsyncSource, eventRegistration), mWaitingForVsync(false),
-        mLastVsyncCount(0), mLastScheduleVsyncTime(0) {
+DisplayEventDispatcher::DisplayEventDispatcher(const sp<Looper>& looper,
+                                               gui::ISurfaceComposer::VsyncSource vsyncSource,
+                                               EventRegistrationFlags eventRegistration,
+                                               const sp<IBinder>& layerHandle)
+      : mLooper(looper),
+        mReceiver(vsyncSource, eventRegistration, layerHandle),
+        mWaitingForVsync(false),
+        mLastVsyncCount(0),
+        mLastScheduleVsyncTime(0) {
     ALOGV("dispatcher %p ~ Initializing display event dispatcher.", this);
 }
 
diff --git a/libs/gui/DisplayEventReceiver.cpp b/libs/gui/DisplayEventReceiver.cpp
index bfb7769..6849a95 100644
--- a/libs/gui/DisplayEventReceiver.cpp
+++ b/libs/gui/DisplayEventReceiver.cpp
@@ -14,15 +14,16 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "DisplayEventReceiver"
+
 #include <string.h>
 
 #include <utils/Errors.h>
 
 #include <gui/DisplayEventReceiver.h>
-#include <gui/ISurfaceComposer.h>
 #include <gui/VsyncEventData.h>
 
-#include <private/gui/ComposerService.h>
+#include <private/gui/ComposerServiceAIDL.h>
 
 #include <private/gui/BitTube.h>
 
@@ -32,21 +33,29 @@
 
 // ---------------------------------------------------------------------------
 
-DisplayEventReceiver::DisplayEventReceiver(
-        ISurfaceComposer::VsyncSource vsyncSource,
-        ISurfaceComposer::EventRegistrationFlags eventRegistration) {
-    sp<ISurfaceComposer> sf(ComposerService::getComposerService());
+DisplayEventReceiver::DisplayEventReceiver(gui::ISurfaceComposer::VsyncSource vsyncSource,
+                                           EventRegistrationFlags eventRegistration,
+                                           const sp<IBinder>& layerHandle) {
+    sp<gui::ISurfaceComposer> sf(ComposerServiceAIDL::getComposerService());
     if (sf != nullptr) {
-        mEventConnection = sf->createDisplayEventConnection(vsyncSource, eventRegistration);
-        if (mEventConnection != nullptr) {
+        mEventConnection = nullptr;
+        binder::Status status =
+                sf->createDisplayEventConnection(vsyncSource,
+                                                 static_cast<
+                                                         gui::ISurfaceComposer::EventRegistration>(
+                                                         eventRegistration.get()),
+                                                 layerHandle, &mEventConnection);
+        if (status.isOk() && mEventConnection != nullptr) {
             mDataChannel = std::make_unique<gui::BitTube>();
-            const auto status = mEventConnection->stealReceiveChannel(mDataChannel.get());
+            status = mEventConnection->stealReceiveChannel(mDataChannel.get());
             if (!status.isOk()) {
                 ALOGE("stealReceiveChannel failed: %s", status.toString8().c_str());
                 mInitError = std::make_optional<status_t>(status.transactionError());
                 mDataChannel.reset();
                 mEventConnection.clear();
             }
+        } else {
+            ALOGE("DisplayEventConnection creation failed: status=%s", status.toString8().c_str());
         }
     }
 }
diff --git a/libs/gui/DisplayInfo.cpp b/libs/gui/DisplayInfo.cpp
index 52d9540..bd640df 100644
--- a/libs/gui/DisplayInfo.cpp
+++ b/libs/gui/DisplayInfo.cpp
@@ -20,8 +20,13 @@
 #include <gui/DisplayInfo.h>
 #include <private/gui/ParcelUtils.h>
 
+#include <android-base/stringprintf.h>
 #include <log/log.h>
 
+#include <inttypes.h>
+
+#define INDENT "  "
+
 namespace android::gui {
 
 // --- DisplayInfo ---
@@ -67,4 +72,17 @@
     return OK;
 }
 
+void DisplayInfo::dump(std::string& out, const char* prefix) const {
+    using android::base::StringAppendF;
+
+    out += prefix;
+    StringAppendF(&out, "DisplayViewport[id=%" PRId32 "]\n", displayId);
+    out += prefix;
+    StringAppendF(&out, INDENT "Width=%" PRId32 ", Height=%" PRId32 "\n", logicalWidth,
+                  logicalHeight);
+    std::string transformPrefix(prefix);
+    transformPrefix.append(INDENT);
+    transform.dump(out, "Transform", transformPrefix.c_str());
+}
+
 } // namespace android::gui
diff --git a/libs/gui/FenceMonitor.cpp b/libs/gui/FenceMonitor.cpp
new file mode 100644
index 0000000..230c81a
--- /dev/null
+++ b/libs/gui/FenceMonitor.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <gui/FenceMonitor.h>
+#include <gui/TraceUtils.h>
+#include <log/log.h>
+
+#include <thread>
+
+namespace android::gui {
+
+FenceMonitor::FenceMonitor(const char* name) : mName(name), mFencesQueued(0), mFencesSignaled(0) {
+    std::thread thread(&FenceMonitor::loop, this);
+    pthread_setname_np(thread.native_handle(), mName);
+    thread.detach();
+}
+
+void FenceMonitor::queueFence(const sp<Fence>& fence) {
+    char message[64];
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (fence->getSignalTime() != Fence::SIGNAL_TIME_PENDING) {
+        snprintf(message, sizeof(message), "%s fence %u has signaled", mName, mFencesQueued);
+        ATRACE_NAME(message);
+        // Need an increment on both to make the trace number correct.
+        mFencesQueued++;
+        mFencesSignaled++;
+        return;
+    }
+    snprintf(message, sizeof(message), "Trace %s fence %u", mName, mFencesQueued);
+    ATRACE_NAME(message);
+
+    mQueue.push_back(fence);
+    mCondition.notify_one();
+    mFencesQueued++;
+    ATRACE_INT(mName, int32_t(mQueue.size()));
+}
+
+void FenceMonitor::loop() {
+    while (true) {
+        threadLoop();
+    }
+}
+
+void FenceMonitor::threadLoop() {
+    sp<Fence> fence;
+    uint32_t fenceNum;
+    {
+        std::unique_lock<std::mutex> lock(mMutex);
+        while (mQueue.empty()) {
+            mCondition.wait(lock);
+        }
+        fence = mQueue[0];
+        fenceNum = mFencesSignaled;
+    }
+    {
+        char message[64];
+        snprintf(message, sizeof(message), "waiting for %s %u", mName, fenceNum);
+        ATRACE_NAME(message);
+
+        status_t result = fence->waitForever(message);
+        if (result != OK) {
+            ALOGE("Error waiting for fence: %d", result);
+        }
+    }
+    {
+        std::lock_guard<std::mutex> lock(mMutex);
+        mQueue.pop_front();
+        mFencesSignaled++;
+        ATRACE_INT(mName, int32_t(mQueue.size()));
+    }
+}
+
+} // namespace android::gui
\ No newline at end of file
diff --git a/libs/gui/FrameTimelineInfo.cpp b/libs/gui/FrameTimelineInfo.cpp
deleted file mode 100644
index 3800b88..0000000
--- a/libs/gui/FrameTimelineInfo.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "FrameTimelineInfo"
-
-#include <inttypes.h>
-
-#include <android/os/IInputConstants.h>
-#include <gui/FrameTimelineInfo.h>
-#include <gui/LayerState.h>
-#include <private/gui/ParcelUtils.h>
-#include <utils/Errors.h>
-
-#include <cmath>
-
-using android::os::IInputConstants;
-
-namespace android {
-
-status_t FrameTimelineInfo::write(Parcel& output) const {
-    SAFE_PARCEL(output.writeInt64, vsyncId);
-    SAFE_PARCEL(output.writeInt32, inputEventId);
-    SAFE_PARCEL(output.writeInt64, startTimeNanos);
-    return NO_ERROR;
-}
-
-status_t FrameTimelineInfo::read(const Parcel& input) {
-    SAFE_PARCEL(input.readInt64, &vsyncId);
-    SAFE_PARCEL(input.readInt32, &inputEventId);
-    SAFE_PARCEL(input.readInt64, &startTimeNanos);
-    return NO_ERROR;
-}
-
-void FrameTimelineInfo::merge(const FrameTimelineInfo& other) {
-    // When merging vsync Ids we take the oldest valid one
-    if (vsyncId != INVALID_VSYNC_ID && other.vsyncId != INVALID_VSYNC_ID) {
-        if (other.vsyncId > vsyncId) {
-            vsyncId = other.vsyncId;
-            inputEventId = other.inputEventId;
-            startTimeNanos = other.startTimeNanos;
-        }
-    } else if (vsyncId == INVALID_VSYNC_ID) {
-        vsyncId = other.vsyncId;
-        inputEventId = other.inputEventId;
-        startTimeNanos = other.startTimeNanos;
-    }
-}
-
-void FrameTimelineInfo::clear() {
-    vsyncId = INVALID_VSYNC_ID;
-    inputEventId = IInputConstants::INVALID_INPUT_EVENT_ID;
-    startTimeNanos = 0;
-}
-
-}; // namespace android
diff --git a/libs/gui/GLConsumerUtils.cpp b/libs/gui/GLConsumerUtils.cpp
index 7a06c3d..a1c69e7 100644
--- a/libs/gui/GLConsumerUtils.cpp
+++ b/libs/gui/GLConsumerUtils.cpp
@@ -27,6 +27,13 @@
 void GLConsumer::computeTransformMatrix(float outTransform[16],
         const sp<GraphicBuffer>& buf, const Rect& cropRect, uint32_t transform,
         bool filtering) {
+    computeTransformMatrix(outTransform, buf->getWidth(), buf->getHeight(), buf->getPixelFormat(),
+                           cropRect, transform, filtering);
+}
+
+void GLConsumer::computeTransformMatrix(float outTransform[16], float bufferWidth,
+                                        float bufferHeight, PixelFormat pixelFormat,
+                                        const Rect& cropRect, uint32_t transform, bool filtering) {
     // Transform matrices
     static const mat4 mtxFlipH(
         -1, 0, 0, 0,
@@ -60,8 +67,6 @@
 
     if (!cropRect.isEmpty()) {
         float tx = 0.0f, ty = 0.0f, sx = 1.0f, sy = 1.0f;
-        float bufferWidth = buf->getWidth();
-        float bufferHeight = buf->getHeight();
         float shrinkAmount = 0.0f;
         if (filtering) {
             // In order to prevent bilinear sampling beyond the edge of the
@@ -70,7 +75,7 @@
             // off each end, but because the chroma channels of YUV420 images
             // are subsampled we may need to shrink the crop region by a whole
             // texel on each side.
-            switch (buf->getPixelFormat()) {
+            switch (pixelFormat) {
                 case PIXEL_FORMAT_RGBA_8888:
                 case PIXEL_FORMAT_RGBX_8888:
                 case PIXEL_FORMAT_RGBA_FP16:
diff --git a/libs/gui/ISurfaceComposer.cpp b/libs/gui/ISurfaceComposer.cpp
index 24d39fe..b526a6c 100644
--- a/libs/gui/ISurfaceComposer.cpp
+++ b/libs/gui/ISurfaceComposer.cpp
@@ -19,14 +19,11 @@
 
 #include <android/gui/IDisplayEventConnection.h>
 #include <android/gui/IRegionSamplingListener.h>
-#include <android/gui/ITransactionTraceListener.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/Parcel.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/ISurfaceComposer.h>
-#include <gui/ISurfaceComposerClient.h>
-#include <gui/LayerDebugInfo.h>
 #include <gui/LayerState.h>
 #include <private/gui/ParcelUtils.h>
 #include <stdint.h>
@@ -37,7 +34,6 @@
 #include <ui/DisplayState.h>
 #include <ui/DynamicDisplayInfo.h>
 #include <ui/HdrCapabilities.h>
-#include <ui/StaticDisplayInfo.h>
 #include <utils/Log.h>
 
 // ---------------------------------------------------------------------------
@@ -63,26 +59,17 @@
 
     virtual ~BpSurfaceComposer();
 
-    virtual sp<ISurfaceComposerClient> createConnection()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        remote()->transact(BnSurfaceComposer::CREATE_CONNECTION, data, &reply);
-        return interface_cast<ISurfaceComposerClient>(reply.readStrongBinder());
-    }
-
-    status_t setTransactionState(const FrameTimelineInfo& frameTimelineInfo,
-                                 const Vector<ComposerState>& state,
-                                 const Vector<DisplayState>& displays, uint32_t flags,
-                                 const sp<IBinder>& applyToken, const InputWindowCommands& commands,
-                                 int64_t desiredPresentTime, bool isAutoTimestamp,
-                                 const client_cache_t& uncacheBuffer, bool hasListenerCallbacks,
-                                 const std::vector<ListenerCallbacks>& listenerCallbacks,
-                                 uint64_t transactionId) override {
+    status_t setTransactionState(
+            const FrameTimelineInfo& frameTimelineInfo, Vector<ComposerState>& state,
+            const Vector<DisplayState>& displays, uint32_t flags, const sp<IBinder>& applyToken,
+            InputWindowCommands commands, int64_t desiredPresentTime, bool isAutoTimestamp,
+            const std::vector<client_cache_t>& uncacheBuffers, bool hasListenerCallbacks,
+            const std::vector<ListenerCallbacks>& listenerCallbacks, uint64_t transactionId,
+            const std::vector<uint64_t>& mergedTransactionIds) override {
         Parcel data, reply;
         data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
 
-        SAFE_PARCEL(frameTimelineInfo.write, data);
+        frameTimelineInfo.writeToParcel(&data);
 
         SAFE_PARCEL(data.writeUint32, static_cast<uint32_t>(state.size()));
         for (const auto& s : state) {
@@ -99,8 +86,11 @@
         SAFE_PARCEL(commands.write, data);
         SAFE_PARCEL(data.writeInt64, desiredPresentTime);
         SAFE_PARCEL(data.writeBool, isAutoTimestamp);
-        SAFE_PARCEL(data.writeStrongBinder, uncacheBuffer.token.promote());
-        SAFE_PARCEL(data.writeUint64, uncacheBuffer.id);
+        SAFE_PARCEL(data.writeUint32, static_cast<uint32_t>(uncacheBuffers.size()));
+        for (const client_cache_t& uncacheBuffer : uncacheBuffers) {
+            SAFE_PARCEL(data.writeStrongBinder, uncacheBuffer.token.promote());
+            SAFE_PARCEL(data.writeUint64, uncacheBuffer.id);
+        }
         SAFE_PARCEL(data.writeBool, hasListenerCallbacks);
 
         SAFE_PARCEL(data.writeVectorSize, listenerCallbacks);
@@ -111,6 +101,11 @@
 
         SAFE_PARCEL(data.writeUint64, transactionId);
 
+        SAFE_PARCEL(data.writeUint32, static_cast<uint32_t>(mergedTransactionIds.size()));
+        for (auto mergedTransactionId : mergedTransactionIds) {
+            SAFE_PARCEL(data.writeUint64, mergedTransactionId);
+        }
+
         if (flags & ISurfaceComposer::eOneWay) {
             return remote()->transact(BnSurfaceComposer::SET_TRANSACTION_STATE,
                     data, &reply, IBinder::FLAG_ONEWAY);
@@ -119,905 +114,6 @@
                     data, &reply);
         }
     }
-
-    void bootFinished() override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        remote()->transact(BnSurfaceComposer::BOOT_FINISHED, data, &reply);
-    }
-
-    bool authenticateSurfaceTexture(
-            const sp<IGraphicBufferProducer>& bufferProducer) const override {
-        Parcel data, reply;
-        int err = NO_ERROR;
-        err = data.writeInterfaceToken(
-                ISurfaceComposer::getInterfaceDescriptor());
-        if (err != NO_ERROR) {
-            ALOGE("ISurfaceComposer::authenticateSurfaceTexture: error writing "
-                    "interface descriptor: %s (%d)", strerror(-err), -err);
-            return false;
-        }
-        err = data.writeStrongBinder(IInterface::asBinder(bufferProducer));
-        if (err != NO_ERROR) {
-            ALOGE("ISurfaceComposer::authenticateSurfaceTexture: error writing "
-                    "strong binder to parcel: %s (%d)", strerror(-err), -err);
-            return false;
-        }
-        err = remote()->transact(BnSurfaceComposer::AUTHENTICATE_SURFACE, data,
-                &reply);
-        if (err != NO_ERROR) {
-            ALOGE("ISurfaceComposer::authenticateSurfaceTexture: error "
-                    "performing transaction: %s (%d)", strerror(-err), -err);
-            return false;
-        }
-        int32_t result = 0;
-        err = reply.readInt32(&result);
-        if (err != NO_ERROR) {
-            ALOGE("ISurfaceComposer::authenticateSurfaceTexture: error "
-                    "retrieving result: %s (%d)", strerror(-err), -err);
-            return false;
-        }
-        return result != 0;
-    }
-
-    status_t getSupportedFrameTimestamps(std::vector<FrameEvent>* outSupported) const override {
-        if (!outSupported) {
-            return UNEXPECTED_NULL;
-        }
-        outSupported->clear();
-
-        Parcel data, reply;
-
-        status_t err = data.writeInterfaceToken(
-                ISurfaceComposer::getInterfaceDescriptor());
-        if (err != NO_ERROR) {
-            return err;
-        }
-
-        err = remote()->transact(
-                BnSurfaceComposer::GET_SUPPORTED_FRAME_TIMESTAMPS,
-                data, &reply);
-        if (err != NO_ERROR) {
-            return err;
-        }
-
-        int32_t result = 0;
-        err = reply.readInt32(&result);
-        if (err != NO_ERROR) {
-            return err;
-        }
-        if (result != NO_ERROR) {
-            return result;
-        }
-
-        std::vector<int32_t> supported;
-        err = reply.readInt32Vector(&supported);
-        if (err != NO_ERROR) {
-            return err;
-        }
-
-        outSupported->reserve(supported.size());
-        for (int32_t s : supported) {
-            outSupported->push_back(static_cast<FrameEvent>(s));
-        }
-        return NO_ERROR;
-    }
-
-    sp<IDisplayEventConnection> createDisplayEventConnection(
-            VsyncSource vsyncSource, EventRegistrationFlags eventRegistration) override {
-        Parcel data, reply;
-        sp<IDisplayEventConnection> result;
-        int err = data.writeInterfaceToken(
-                ISurfaceComposer::getInterfaceDescriptor());
-        if (err != NO_ERROR) {
-            return result;
-        }
-        data.writeInt32(static_cast<int32_t>(vsyncSource));
-        data.writeUint32(eventRegistration.get());
-        err = remote()->transact(
-                BnSurfaceComposer::CREATE_DISPLAY_EVENT_CONNECTION,
-                data, &reply);
-        if (err != NO_ERROR) {
-            ALOGE("ISurfaceComposer::createDisplayEventConnection: error performing "
-                    "transaction: %s (%d)", strerror(-err), -err);
-            return result;
-        }
-        result = interface_cast<IDisplayEventConnection>(reply.readStrongBinder());
-        return result;
-    }
-
-    status_t getStaticDisplayInfo(const sp<IBinder>& display,
-                                  ui::StaticDisplayInfo* info) override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        data.writeStrongBinder(display);
-        remote()->transact(BnSurfaceComposer::GET_STATIC_DISPLAY_INFO, data, &reply);
-        const status_t result = reply.readInt32();
-        if (result != NO_ERROR) return result;
-        return reply.read(*info);
-    }
-
-    status_t getDynamicDisplayInfo(const sp<IBinder>& display,
-                                   ui::DynamicDisplayInfo* info) override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        data.writeStrongBinder(display);
-        remote()->transact(BnSurfaceComposer::GET_DYNAMIC_DISPLAY_INFO, data, &reply);
-        const status_t result = reply.readInt32();
-        if (result != NO_ERROR) return result;
-        return reply.read(*info);
-    }
-
-    status_t getDisplayNativePrimaries(const sp<IBinder>& display,
-                                       ui::DisplayPrimaries& primaries) override {
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("getDisplayNativePrimaries failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = data.writeStrongBinder(display);
-        if (result != NO_ERROR) {
-            ALOGE("getDisplayNativePrimaries failed to writeStrongBinder: %d", result);
-            return result;
-        }
-        result = remote()->transact(BnSurfaceComposer::GET_DISPLAY_NATIVE_PRIMARIES, data, &reply);
-        if (result != NO_ERROR) {
-            ALOGE("getDisplayNativePrimaries failed to transact: %d", result);
-            return result;
-        }
-        result = reply.readInt32();
-        if (result == NO_ERROR) {
-            memcpy(&primaries, reply.readInplace(sizeof(ui::DisplayPrimaries)),
-                    sizeof(ui::DisplayPrimaries));
-        }
-        return result;
-    }
-
-    status_t setActiveColorMode(const sp<IBinder>& display, ColorMode colorMode) override {
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("setActiveColorMode failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = data.writeStrongBinder(display);
-        if (result != NO_ERROR) {
-            ALOGE("setActiveColorMode failed to writeStrongBinder: %d", result);
-            return result;
-        }
-        result = data.writeInt32(static_cast<int32_t>(colorMode));
-        if (result != NO_ERROR) {
-            ALOGE("setActiveColorMode failed to writeInt32: %d", result);
-            return result;
-        }
-        result = remote()->transact(BnSurfaceComposer::SET_ACTIVE_COLOR_MODE, data, &reply);
-        if (result != NO_ERROR) {
-            ALOGE("setActiveColorMode failed to transact: %d", result);
-            return result;
-        }
-        return static_cast<status_t>(reply.readInt32());
-    }
-
-    status_t setBootDisplayMode(const sp<IBinder>& display,
-                                ui::DisplayModeId displayModeId) override {
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("setBootDisplayMode failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = data.writeStrongBinder(display);
-        if (result != NO_ERROR) {
-            ALOGE("setBootDisplayMode failed to writeStrongBinder: %d", result);
-            return result;
-        }
-        result = data.writeInt32(displayModeId);
-        if (result != NO_ERROR) {
-            ALOGE("setBootDisplayMode failed to writeIint32: %d", result);
-            return result;
-        }
-        result = remote()->transact(BnSurfaceComposer::SET_BOOT_DISPLAY_MODE, data, &reply);
-        if (result != NO_ERROR) {
-            ALOGE("setBootDisplayMode failed to transact: %d", result);
-        }
-        return result;
-    }
-
-    status_t clearAnimationFrameStats() override {
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("clearAnimationFrameStats failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = remote()->transact(BnSurfaceComposer::CLEAR_ANIMATION_FRAME_STATS, data, &reply);
-        if (result != NO_ERROR) {
-            ALOGE("clearAnimationFrameStats failed to transact: %d", result);
-            return result;
-        }
-        return reply.readInt32();
-    }
-
-    status_t getAnimationFrameStats(FrameStats* outStats) const override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        remote()->transact(BnSurfaceComposer::GET_ANIMATION_FRAME_STATS, data, &reply);
-        reply.read(*outStats);
-        return reply.readInt32();
-    }
-
-    virtual status_t overrideHdrTypes(const sp<IBinder>& display,
-                                      const std::vector<ui::Hdr>& hdrTypes) {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, display);
-
-        std::vector<int32_t> hdrTypesVector;
-        for (ui::Hdr i : hdrTypes) {
-            hdrTypesVector.push_back(static_cast<int32_t>(i));
-        }
-        SAFE_PARCEL(data.writeInt32Vector, hdrTypesVector);
-
-        status_t result = remote()->transact(BnSurfaceComposer::OVERRIDE_HDR_TYPES, data, &reply);
-        if (result != NO_ERROR) {
-            ALOGE("overrideHdrTypes failed to transact: %d", result);
-            return result;
-        }
-        return result;
-    }
-
-    status_t onPullAtom(const int32_t atomId, std::string* pulledData, bool* success) {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeInt32, atomId);
-
-        status_t err = remote()->transact(BnSurfaceComposer::ON_PULL_ATOM, data, &reply);
-        if (err != NO_ERROR) {
-            ALOGE("onPullAtom failed to transact: %d", err);
-            return err;
-        }
-
-        int32_t size = 0;
-        SAFE_PARCEL(reply.readInt32, &size);
-        const void* dataPtr = reply.readInplace(size);
-        if (dataPtr == nullptr) {
-            return UNEXPECTED_NULL;
-        }
-        pulledData->assign((const char*)dataPtr, size);
-        SAFE_PARCEL(reply.readBool, success);
-        return NO_ERROR;
-    }
-
-    status_t enableVSyncInjections(bool enable) override {
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("enableVSyncInjections failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = data.writeBool(enable);
-        if (result != NO_ERROR) {
-            ALOGE("enableVSyncInjections failed to writeBool: %d", result);
-            return result;
-        }
-        result = remote()->transact(BnSurfaceComposer::ENABLE_VSYNC_INJECTIONS, data, &reply,
-                                    IBinder::FLAG_ONEWAY);
-        if (result != NO_ERROR) {
-            ALOGE("enableVSyncInjections failed to transact: %d", result);
-            return result;
-        }
-        return result;
-    }
-
-    status_t injectVSync(nsecs_t when) override {
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("injectVSync failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = data.writeInt64(when);
-        if (result != NO_ERROR) {
-            ALOGE("injectVSync failed to writeInt64: %d", result);
-            return result;
-        }
-        result = remote()->transact(BnSurfaceComposer::INJECT_VSYNC, data, &reply,
-                                    IBinder::FLAG_ONEWAY);
-        if (result != NO_ERROR) {
-            ALOGE("injectVSync failed to transact: %d", result);
-            return result;
-        }
-        return result;
-    }
-
-    status_t getLayerDebugInfo(std::vector<LayerDebugInfo>* outLayers) override {
-        if (!outLayers) {
-            return UNEXPECTED_NULL;
-        }
-
-        Parcel data, reply;
-
-        status_t err = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (err != NO_ERROR) {
-            return err;
-        }
-
-        err = remote()->transact(BnSurfaceComposer::GET_LAYER_DEBUG_INFO, data, &reply);
-        if (err != NO_ERROR) {
-            return err;
-        }
-
-        int32_t result = 0;
-        err = reply.readInt32(&result);
-        if (err != NO_ERROR) {
-            return err;
-        }
-        if (result != NO_ERROR) {
-            return result;
-        }
-
-        outLayers->clear();
-        return reply.readParcelableVector(outLayers);
-    }
-
-    status_t getCompositionPreference(ui::Dataspace* defaultDataspace,
-                                      ui::PixelFormat* defaultPixelFormat,
-                                      ui::Dataspace* wideColorGamutDataspace,
-                                      ui::PixelFormat* wideColorGamutPixelFormat) const override {
-        Parcel data, reply;
-        status_t error = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (error != NO_ERROR) {
-            return error;
-        }
-        error = remote()->transact(BnSurfaceComposer::GET_COMPOSITION_PREFERENCE, data, &reply);
-        if (error != NO_ERROR) {
-            return error;
-        }
-        error = static_cast<status_t>(reply.readInt32());
-        if (error == NO_ERROR) {
-            *defaultDataspace = static_cast<ui::Dataspace>(reply.readInt32());
-            *defaultPixelFormat = static_cast<ui::PixelFormat>(reply.readInt32());
-            *wideColorGamutDataspace = static_cast<ui::Dataspace>(reply.readInt32());
-            *wideColorGamutPixelFormat = static_cast<ui::PixelFormat>(reply.readInt32());
-        }
-        return error;
-    }
-
-    status_t getColorManagement(bool* outGetColorManagement) const override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        remote()->transact(BnSurfaceComposer::GET_COLOR_MANAGEMENT, data, &reply);
-        bool result;
-        status_t err = reply.readBool(&result);
-        if (err == NO_ERROR) {
-            *outGetColorManagement = result;
-        }
-        return err;
-    }
-
-    status_t getDisplayedContentSamplingAttributes(const sp<IBinder>& display,
-                                                   ui::PixelFormat* outFormat,
-                                                   ui::Dataspace* outDataspace,
-                                                   uint8_t* outComponentMask) const override {
-        if (!outFormat || !outDataspace || !outComponentMask) return BAD_VALUE;
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        data.writeStrongBinder(display);
-
-        status_t error =
-                remote()->transact(BnSurfaceComposer::GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES,
-                                   data, &reply);
-        if (error != NO_ERROR) {
-            return error;
-        }
-
-        uint32_t value = 0;
-        error = reply.readUint32(&value);
-        if (error != NO_ERROR) {
-            return error;
-        }
-        *outFormat = static_cast<ui::PixelFormat>(value);
-
-        error = reply.readUint32(&value);
-        if (error != NO_ERROR) {
-            return error;
-        }
-        *outDataspace = static_cast<ui::Dataspace>(value);
-
-        error = reply.readUint32(&value);
-        if (error != NO_ERROR) {
-            return error;
-        }
-        *outComponentMask = static_cast<uint8_t>(value);
-        return error;
-    }
-
-    status_t setDisplayContentSamplingEnabled(const sp<IBinder>& display, bool enable,
-                                              uint8_t componentMask, uint64_t maxFrames) override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        data.writeStrongBinder(display);
-        data.writeBool(enable);
-        data.writeByte(static_cast<int8_t>(componentMask));
-        data.writeUint64(maxFrames);
-        status_t result =
-                remote()->transact(BnSurfaceComposer::SET_DISPLAY_CONTENT_SAMPLING_ENABLED, data,
-                                   &reply);
-        return result;
-    }
-
-    status_t getDisplayedContentSample(const sp<IBinder>& display, uint64_t maxFrames,
-                                       uint64_t timestamp,
-                                       DisplayedFrameStats* outStats) const override {
-        if (!outStats) return BAD_VALUE;
-
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        data.writeStrongBinder(display);
-        data.writeUint64(maxFrames);
-        data.writeUint64(timestamp);
-
-        status_t result =
-                remote()->transact(BnSurfaceComposer::GET_DISPLAYED_CONTENT_SAMPLE, data, &reply);
-
-        if (result != NO_ERROR) {
-            return result;
-        }
-
-        result = reply.readUint64(&outStats->numFrames);
-        if (result != NO_ERROR) {
-            return result;
-        }
-
-        result = reply.readUint64Vector(&outStats->component_0_sample);
-        if (result != NO_ERROR) {
-            return result;
-        }
-        result = reply.readUint64Vector(&outStats->component_1_sample);
-        if (result != NO_ERROR) {
-            return result;
-        }
-        result = reply.readUint64Vector(&outStats->component_2_sample);
-        if (result != NO_ERROR) {
-            return result;
-        }
-        result = reply.readUint64Vector(&outStats->component_3_sample);
-        return result;
-    }
-
-    status_t getProtectedContentSupport(bool* outSupported) const override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        status_t error =
-                remote()->transact(BnSurfaceComposer::GET_PROTECTED_CONTENT_SUPPORT, data, &reply);
-        if (error != NO_ERROR) {
-            return error;
-        }
-        error = reply.readBool(outSupported);
-        return error;
-    }
-
-    status_t addRegionSamplingListener(const Rect& samplingArea, const sp<IBinder>& stopLayerHandle,
-                                       const sp<IRegionSamplingListener>& listener) override {
-        Parcel data, reply;
-        status_t error = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (error != NO_ERROR) {
-            ALOGE("addRegionSamplingListener: Failed to write interface token");
-            return error;
-        }
-        error = data.write(samplingArea);
-        if (error != NO_ERROR) {
-            ALOGE("addRegionSamplingListener: Failed to write sampling area");
-            return error;
-        }
-        error = data.writeStrongBinder(stopLayerHandle);
-        if (error != NO_ERROR) {
-            ALOGE("addRegionSamplingListener: Failed to write stop layer handle");
-            return error;
-        }
-        error = data.writeStrongBinder(IInterface::asBinder(listener));
-        if (error != NO_ERROR) {
-            ALOGE("addRegionSamplingListener: Failed to write listener");
-            return error;
-        }
-        error = remote()->transact(BnSurfaceComposer::ADD_REGION_SAMPLING_LISTENER, data, &reply);
-        if (error != NO_ERROR) {
-            ALOGE("addRegionSamplingListener: Failed to transact");
-        }
-        return error;
-    }
-
-    status_t removeRegionSamplingListener(const sp<IRegionSamplingListener>& listener) override {
-        Parcel data, reply;
-        status_t error = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (error != NO_ERROR) {
-            ALOGE("removeRegionSamplingListener: Failed to write interface token");
-            return error;
-        }
-        error = data.writeStrongBinder(IInterface::asBinder(listener));
-        if (error != NO_ERROR) {
-            ALOGE("removeRegionSamplingListener: Failed to write listener");
-            return error;
-        }
-        error = remote()->transact(BnSurfaceComposer::REMOVE_REGION_SAMPLING_LISTENER, data,
-                                   &reply);
-        if (error != NO_ERROR) {
-            ALOGE("removeRegionSamplingListener: Failed to transact");
-        }
-        return error;
-    }
-
-    virtual status_t addFpsListener(int32_t taskId, const sp<gui::IFpsListener>& listener) {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeInt32, taskId);
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(listener));
-        const status_t error =
-                remote()->transact(BnSurfaceComposer::ADD_FPS_LISTENER, data, &reply);
-        if (error != OK) {
-            ALOGE("addFpsListener: Failed to transact");
-        }
-        return error;
-    }
-
-    virtual status_t removeFpsListener(const sp<gui::IFpsListener>& listener) {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(listener));
-
-        const status_t error =
-                remote()->transact(BnSurfaceComposer::REMOVE_FPS_LISTENER, data, &reply);
-        if (error != OK) {
-            ALOGE("removeFpsListener: Failed to transact");
-        }
-        return error;
-    }
-
-    virtual status_t addTunnelModeEnabledListener(
-            const sp<gui::ITunnelModeEnabledListener>& listener) {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(listener));
-
-        const status_t error =
-                remote()->transact(BnSurfaceComposer::ADD_TUNNEL_MODE_ENABLED_LISTENER, data,
-                                   &reply);
-        if (error != NO_ERROR) {
-            ALOGE("addTunnelModeEnabledListener: Failed to transact");
-        }
-        return error;
-    }
-
-    virtual status_t removeTunnelModeEnabledListener(
-            const sp<gui::ITunnelModeEnabledListener>& listener) {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(listener));
-
-        const status_t error =
-                remote()->transact(BnSurfaceComposer::REMOVE_TUNNEL_MODE_ENABLED_LISTENER, data,
-                                   &reply);
-        if (error != NO_ERROR) {
-            ALOGE("removeTunnelModeEnabledListener: Failed to transact");
-        }
-        return error;
-    }
-
-    status_t setDesiredDisplayModeSpecs(const sp<IBinder>& displayToken,
-                                        ui::DisplayModeId defaultMode, bool allowGroupSwitching,
-                                        float primaryRefreshRateMin, float primaryRefreshRateMax,
-                                        float appRequestRefreshRateMin,
-                                        float appRequestRefreshRateMax) override {
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs: failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = data.writeStrongBinder(displayToken);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs: failed to write display token: %d", result);
-            return result;
-        }
-        result = data.writeInt32(defaultMode);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs failed to write defaultMode: %d", result);
-            return result;
-        }
-        result = data.writeBool(allowGroupSwitching);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs failed to write allowGroupSwitching: %d", result);
-            return result;
-        }
-        result = data.writeFloat(primaryRefreshRateMin);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs failed to write primaryRefreshRateMin: %d", result);
-            return result;
-        }
-        result = data.writeFloat(primaryRefreshRateMax);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs failed to write primaryRefreshRateMax: %d", result);
-            return result;
-        }
-        result = data.writeFloat(appRequestRefreshRateMin);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs failed to write appRequestRefreshRateMin: %d",
-                  result);
-            return result;
-        }
-        result = data.writeFloat(appRequestRefreshRateMax);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs failed to write appRequestRefreshRateMax: %d",
-                  result);
-            return result;
-        }
-
-        result =
-                remote()->transact(BnSurfaceComposer::SET_DESIRED_DISPLAY_MODE_SPECS, data, &reply);
-        if (result != NO_ERROR) {
-            ALOGE("setDesiredDisplayModeSpecs failed to transact: %d", result);
-            return result;
-        }
-        return reply.readInt32();
-    }
-
-    status_t getDesiredDisplayModeSpecs(const sp<IBinder>& displayToken,
-                                        ui::DisplayModeId* outDefaultMode,
-                                        bool* outAllowGroupSwitching,
-                                        float* outPrimaryRefreshRateMin,
-                                        float* outPrimaryRefreshRateMax,
-                                        float* outAppRequestRefreshRateMin,
-                                        float* outAppRequestRefreshRateMax) override {
-        if (!outDefaultMode || !outAllowGroupSwitching || !outPrimaryRefreshRateMin ||
-            !outPrimaryRefreshRateMax || !outAppRequestRefreshRateMin ||
-            !outAppRequestRefreshRateMax) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        status_t result = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to writeInterfaceToken: %d", result);
-            return result;
-        }
-        result = data.writeStrongBinder(displayToken);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to writeStrongBinder: %d", result);
-            return result;
-        }
-        result =
-                remote()->transact(BnSurfaceComposer::GET_DESIRED_DISPLAY_MODE_SPECS, data, &reply);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to transact: %d", result);
-            return result;
-        }
-
-        result = reply.readInt32(outDefaultMode);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to read defaultMode: %d", result);
-            return result;
-        }
-        if (*outDefaultMode < 0) {
-            ALOGE("%s: defaultMode must be non-negative but it was %d", __func__, *outDefaultMode);
-            return BAD_VALUE;
-        }
-
-        result = reply.readBool(outAllowGroupSwitching);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to read allowGroupSwitching: %d", result);
-            return result;
-        }
-        result = reply.readFloat(outPrimaryRefreshRateMin);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to read primaryRefreshRateMin: %d", result);
-            return result;
-        }
-        result = reply.readFloat(outPrimaryRefreshRateMax);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to read primaryRefreshRateMax: %d", result);
-            return result;
-        }
-        result = reply.readFloat(outAppRequestRefreshRateMin);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to read appRequestRefreshRateMin: %d", result);
-            return result;
-        }
-        result = reply.readFloat(outAppRequestRefreshRateMax);
-        if (result != NO_ERROR) {
-            ALOGE("getDesiredDisplayModeSpecs failed to read appRequestRefreshRateMax: %d", result);
-            return result;
-        }
-        return reply.readInt32();
-    }
-
-    status_t setGlobalShadowSettings(const half4& ambientColor, const half4& spotColor,
-                                     float lightPosY, float lightPosZ, float lightRadius) override {
-        Parcel data, reply;
-        status_t error = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (error != NO_ERROR) {
-            ALOGE("setGlobalShadowSettings: failed to write interface token: %d", error);
-            return error;
-        }
-
-        std::vector<float> shadowConfig = {ambientColor.r, ambientColor.g, ambientColor.b,
-                                           ambientColor.a, spotColor.r,    spotColor.g,
-                                           spotColor.b,    spotColor.a,    lightPosY,
-                                           lightPosZ,      lightRadius};
-
-        error = data.writeFloatVector(shadowConfig);
-        if (error != NO_ERROR) {
-            ALOGE("setGlobalShadowSettings: failed to write shadowConfig: %d", error);
-            return error;
-        }
-
-        error = remote()->transact(BnSurfaceComposer::SET_GLOBAL_SHADOW_SETTINGS, data, &reply,
-                                   IBinder::FLAG_ONEWAY);
-        if (error != NO_ERROR) {
-            ALOGE("setGlobalShadowSettings: failed to transact: %d", error);
-            return error;
-        }
-        return NO_ERROR;
-    }
-
-    status_t getDisplayDecorationSupport(
-            const sp<IBinder>& displayToken,
-            std::optional<common::DisplayDecorationSupport>* outSupport) const override {
-        Parcel data, reply;
-        status_t error = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (error != NO_ERROR) {
-            ALOGE("getDisplayDecorationSupport: failed to write interface token: %d", error);
-            return error;
-        }
-        error = data.writeStrongBinder(displayToken);
-        if (error != NO_ERROR) {
-            ALOGE("getDisplayDecorationSupport: failed to write display token: %d", error);
-            return error;
-        }
-        error = remote()->transact(BnSurfaceComposer::GET_DISPLAY_DECORATION_SUPPORT, data, &reply);
-        if (error != NO_ERROR) {
-            ALOGE("getDisplayDecorationSupport: failed to transact: %d", error);
-            return error;
-        }
-        bool support;
-        error = reply.readBool(&support);
-        if (error != NO_ERROR) {
-            ALOGE("getDisplayDecorationSupport: failed to read support: %d", error);
-            return error;
-        }
-
-        if (support) {
-            int32_t format, alphaInterpretation;
-            error = reply.readInt32(&format);
-            if (error != NO_ERROR) {
-                ALOGE("getDisplayDecorationSupport: failed to read format: %d", error);
-                return error;
-            }
-            error = reply.readInt32(&alphaInterpretation);
-            if (error != NO_ERROR) {
-                ALOGE("getDisplayDecorationSupport: failed to read alphaInterpretation: %d", error);
-                return error;
-            }
-            outSupport->emplace();
-            outSupport->value().format = static_cast<common::PixelFormat>(format);
-            outSupport->value().alphaInterpretation =
-                    static_cast<common::AlphaInterpretation>(alphaInterpretation);
-        } else {
-            outSupport->reset();
-        }
-        return NO_ERROR;
-    }
-
-    status_t setFrameRate(const sp<IGraphicBufferProducer>& surface, float frameRate,
-                          int8_t compatibility, int8_t changeFrameRateStrategy) override {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(surface));
-        SAFE_PARCEL(data.writeFloat, frameRate);
-        SAFE_PARCEL(data.writeByte, compatibility);
-        SAFE_PARCEL(data.writeByte, changeFrameRateStrategy);
-
-        status_t err = remote()->transact(BnSurfaceComposer::SET_FRAME_RATE, data, &reply);
-        if (err != NO_ERROR) {
-            ALOGE("setFrameRate: failed to transact: %s (%d)", strerror(-err), err);
-            return err;
-        }
-
-        return reply.readInt32();
-    }
-
-    status_t setFrameTimelineInfo(const sp<IGraphicBufferProducer>& surface,
-                                  const FrameTimelineInfo& frameTimelineInfo) override {
-        Parcel data, reply;
-        status_t err = data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        if (err != NO_ERROR) {
-            ALOGE("%s: failed writing interface token: %s (%d)", __func__, strerror(-err), -err);
-            return err;
-        }
-
-        err = data.writeStrongBinder(IInterface::asBinder(surface));
-        if (err != NO_ERROR) {
-            ALOGE("%s: failed writing strong binder: %s (%d)", __func__, strerror(-err), -err);
-            return err;
-        }
-
-        SAFE_PARCEL(frameTimelineInfo.write, data);
-
-        err = remote()->transact(BnSurfaceComposer::SET_FRAME_TIMELINE_INFO, data, &reply);
-        if (err != NO_ERROR) {
-            ALOGE("%s: failed to transact: %s (%d)", __func__, strerror(-err), err);
-            return err;
-        }
-
-        return reply.readInt32();
-    }
-
-    status_t addTransactionTraceListener(
-            const sp<gui::ITransactionTraceListener>& listener) override {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(listener));
-
-        return remote()->transact(BnSurfaceComposer::ADD_TRANSACTION_TRACE_LISTENER, data, &reply);
-    }
-
-    /**
-     * Get priority of the RenderEngine in surface flinger.
-     */
-    int getGPUContextPriority() override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        status_t err =
-                remote()->transact(BnSurfaceComposer::GET_GPU_CONTEXT_PRIORITY, data, &reply);
-        if (err != NO_ERROR) {
-            ALOGE("getGPUContextPriority failed to read data:  %s (%d)", strerror(-err), err);
-            return 0;
-        }
-        return reply.readInt32();
-    }
-
-    status_t getMaxAcquiredBufferCount(int* buffers) const override {
-        Parcel data, reply;
-        data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
-        status_t err =
-                remote()->transact(BnSurfaceComposer::GET_MAX_ACQUIRED_BUFFER_COUNT, data, &reply);
-        if (err != NO_ERROR) {
-            ALOGE("getMaxAcquiredBufferCount failed to read data:  %s (%d)", strerror(-err), err);
-            return err;
-        }
-
-        return reply.readInt32(buffers);
-    }
-
-    status_t addWindowInfosListener(
-            const sp<IWindowInfosListener>& windowInfosListener) const override {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(windowInfosListener));
-        return remote()->transact(BnSurfaceComposer::ADD_WINDOW_INFOS_LISTENER, data, &reply);
-    }
-
-    status_t removeWindowInfosListener(
-            const sp<IWindowInfosListener>& windowInfosListener) const override {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeStrongBinder, IInterface::asBinder(windowInfosListener));
-        return remote()->transact(BnSurfaceComposer::REMOVE_WINDOW_INFOS_LISTENER, data, &reply);
-    }
-
-    status_t setOverrideFrameRate(uid_t uid, float frameRate) override {
-        Parcel data, reply;
-        SAFE_PARCEL(data.writeInterfaceToken, ISurfaceComposer::getInterfaceDescriptor());
-        SAFE_PARCEL(data.writeUint32, uid);
-        SAFE_PARCEL(data.writeFloat, frameRate);
-
-        status_t err = remote()->transact(BnSurfaceComposer::SET_OVERRIDE_FRAME_RATE, data, &reply);
-        if (err != NO_ERROR) {
-            ALOGE("setOverrideFrameRate: failed to transact %s (%d)", strerror(-err), err);
-            return err;
-        }
-
-        return NO_ERROR;
-    }
 };
 
 // Out-of-line virtual method definition to trigger vtable emission in this
@@ -1031,18 +127,12 @@
 status_t BnSurfaceComposer::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
-        case CREATE_CONNECTION: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> b = IInterface::asBinder(createConnection());
-            reply->writeStrongBinder(b);
-            return NO_ERROR;
-        }
+    switch (code) {
         case SET_TRANSACTION_STATE: {
             CHECK_INTERFACE(ISurfaceComposer, data, reply);
 
             FrameTimelineInfo frameTimelineInfo;
-            SAFE_PARCEL(frameTimelineInfo.read, data);
+            frameTimelineInfo.readFromParcel(&data);
 
             uint32_t count = 0;
             SAFE_PARCEL_READ_SIZE(data.readUint32, &count, data.dataSize());
@@ -1075,11 +165,14 @@
             SAFE_PARCEL(data.readInt64, &desiredPresentTime);
             SAFE_PARCEL(data.readBool, &isAutoTimestamp);
 
-            client_cache_t uncachedBuffer;
+            SAFE_PARCEL_READ_SIZE(data.readUint32, &count, data.dataSize());
+            std::vector<client_cache_t> uncacheBuffers(count);
             sp<IBinder> tmpBinder;
-            SAFE_PARCEL(data.readNullableStrongBinder, &tmpBinder);
-            uncachedBuffer.token = tmpBinder;
-            SAFE_PARCEL(data.readUint64, &uncachedBuffer.id);
+            for (size_t i = 0; i < count; i++) {
+                SAFE_PARCEL(data.readNullableStrongBinder, &tmpBinder);
+                uncacheBuffers[i].token = tmpBinder;
+                SAFE_PARCEL(data.readUint64, &uncacheBuffers[i].id);
+            }
 
             bool hasListenerCallbacks = false;
             SAFE_PARCEL(data.readBool, &hasListenerCallbacks);
@@ -1097,646 +190,16 @@
             uint64_t transactionId = -1;
             SAFE_PARCEL(data.readUint64, &transactionId);
 
+            SAFE_PARCEL_READ_SIZE(data.readUint32, &count, data.dataSize());
+            std::vector<uint64_t> mergedTransactions(count);
+            for (size_t i = 0; i < count; i++) {
+                SAFE_PARCEL(data.readUint64, &mergedTransactions[i]);
+            }
+
             return setTransactionState(frameTimelineInfo, state, displays, stateFlags, applyToken,
-                                       inputWindowCommands, desiredPresentTime, isAutoTimestamp,
-                                       uncachedBuffer, hasListenerCallbacks, listenerCallbacks,
-                                       transactionId);
-        }
-        case BOOT_FINISHED: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            bootFinished();
-            return NO_ERROR;
-        }
-        case AUTHENTICATE_SURFACE: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IGraphicBufferProducer> bufferProducer =
-                    interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
-            int32_t result = authenticateSurfaceTexture(bufferProducer) ? 1 : 0;
-            reply->writeInt32(result);
-            return NO_ERROR;
-        }
-        case GET_SUPPORTED_FRAME_TIMESTAMPS: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            std::vector<FrameEvent> supportedTimestamps;
-            status_t result = getSupportedFrameTimestamps(&supportedTimestamps);
-            status_t err = reply->writeInt32(result);
-            if (err != NO_ERROR) {
-                return err;
-            }
-            if (result != NO_ERROR) {
-                return result;
-            }
-
-            std::vector<int32_t> supported;
-            supported.reserve(supportedTimestamps.size());
-            for (FrameEvent s : supportedTimestamps) {
-                supported.push_back(static_cast<int32_t>(s));
-            }
-            return reply->writeInt32Vector(supported);
-        }
-        case CREATE_DISPLAY_EVENT_CONNECTION: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            auto vsyncSource = static_cast<ISurfaceComposer::VsyncSource>(data.readInt32());
-            EventRegistrationFlags eventRegistration =
-                    static_cast<EventRegistration>(data.readUint32());
-
-            sp<IDisplayEventConnection> connection(
-                    createDisplayEventConnection(vsyncSource, eventRegistration));
-            reply->writeStrongBinder(IInterface::asBinder(connection));
-            return NO_ERROR;
-        }
-        case GET_STATIC_DISPLAY_INFO: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            ui::StaticDisplayInfo info;
-            const sp<IBinder> display = data.readStrongBinder();
-            const status_t result = getStaticDisplayInfo(display, &info);
-            SAFE_PARCEL(reply->writeInt32, result);
-            if (result != NO_ERROR) return result;
-            SAFE_PARCEL(reply->write, info);
-            return NO_ERROR;
-        }
-        case GET_DYNAMIC_DISPLAY_INFO: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            ui::DynamicDisplayInfo info;
-            const sp<IBinder> display = data.readStrongBinder();
-            const status_t result = getDynamicDisplayInfo(display, &info);
-            SAFE_PARCEL(reply->writeInt32, result);
-            if (result != NO_ERROR) return result;
-            SAFE_PARCEL(reply->write, info);
-            return NO_ERROR;
-        }
-        case GET_DISPLAY_NATIVE_PRIMARIES: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            ui::DisplayPrimaries primaries;
-            sp<IBinder> display = nullptr;
-
-            status_t result = data.readStrongBinder(&display);
-            if (result != NO_ERROR) {
-                ALOGE("getDisplayNativePrimaries failed to readStrongBinder: %d", result);
-                return result;
-            }
-
-            result = getDisplayNativePrimaries(display, primaries);
-            reply->writeInt32(result);
-            if (result == NO_ERROR) {
-                memcpy(reply->writeInplace(sizeof(ui::DisplayPrimaries)), &primaries,
-                        sizeof(ui::DisplayPrimaries));
-            }
-
-            return NO_ERROR;
-        }
-        case SET_ACTIVE_COLOR_MODE: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> display = nullptr;
-            status_t result = data.readStrongBinder(&display);
-            if (result != NO_ERROR) {
-                ALOGE("getActiveColorMode failed to readStrongBinder: %d", result);
-                return result;
-            }
-            int32_t colorModeInt = 0;
-            result = data.readInt32(&colorModeInt);
-            if (result != NO_ERROR) {
-                ALOGE("setActiveColorMode failed to readInt32: %d", result);
-                return result;
-            }
-            result = setActiveColorMode(display,
-                    static_cast<ColorMode>(colorModeInt));
-            result = reply->writeInt32(result);
-            return result;
-        }
-        case SET_BOOT_DISPLAY_MODE: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> display = nullptr;
-            status_t result = data.readStrongBinder(&display);
-            if (result != NO_ERROR) {
-                ALOGE("setBootDisplayMode failed to readStrongBinder: %d", result);
-                return result;
-            }
-            ui::DisplayModeId displayModeId;
-            result = data.readInt32(&displayModeId);
-            if (result != NO_ERROR) {
-                ALOGE("setBootDisplayMode failed to readInt32: %d", result);
-                return result;
-            }
-            return setBootDisplayMode(display, displayModeId);
-        }
-        case CLEAR_ANIMATION_FRAME_STATS: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            status_t result = clearAnimationFrameStats();
-            reply->writeInt32(result);
-            return NO_ERROR;
-        }
-        case GET_ANIMATION_FRAME_STATS: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            FrameStats stats;
-            status_t result = getAnimationFrameStats(&stats);
-            reply->write(stats);
-            reply->writeInt32(result);
-            return NO_ERROR;
-        }
-        case ENABLE_VSYNC_INJECTIONS: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            bool enable = false;
-            status_t result = data.readBool(&enable);
-            if (result != NO_ERROR) {
-                ALOGE("enableVSyncInjections failed to readBool: %d", result);
-                return result;
-            }
-            return enableVSyncInjections(enable);
-        }
-        case INJECT_VSYNC: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            int64_t when = 0;
-            status_t result = data.readInt64(&when);
-            if (result != NO_ERROR) {
-                ALOGE("enableVSyncInjections failed to readInt64: %d", result);
-                return result;
-            }
-            return injectVSync(when);
-        }
-        case GET_LAYER_DEBUG_INFO: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            std::vector<LayerDebugInfo> outLayers;
-            status_t result = getLayerDebugInfo(&outLayers);
-            reply->writeInt32(result);
-            if (result == NO_ERROR)
-            {
-                result = reply->writeParcelableVector(outLayers);
-            }
-            return result;
-        }
-        case GET_COMPOSITION_PREFERENCE: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            ui::Dataspace defaultDataspace;
-            ui::PixelFormat defaultPixelFormat;
-            ui::Dataspace wideColorGamutDataspace;
-            ui::PixelFormat wideColorGamutPixelFormat;
-            status_t error =
-                    getCompositionPreference(&defaultDataspace, &defaultPixelFormat,
-                                             &wideColorGamutDataspace, &wideColorGamutPixelFormat);
-            reply->writeInt32(error);
-            if (error == NO_ERROR) {
-                reply->writeInt32(static_cast<int32_t>(defaultDataspace));
-                reply->writeInt32(static_cast<int32_t>(defaultPixelFormat));
-                reply->writeInt32(static_cast<int32_t>(wideColorGamutDataspace));
-                reply->writeInt32(static_cast<int32_t>(wideColorGamutPixelFormat));
-            }
-            return error;
-        }
-        case GET_COLOR_MANAGEMENT: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            bool result;
-            status_t error = getColorManagement(&result);
-            if (error == NO_ERROR) {
-                reply->writeBool(result);
-            }
-            return error;
-        }
-        case GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-
-            sp<IBinder> display = data.readStrongBinder();
-            ui::PixelFormat format;
-            ui::Dataspace dataspace;
-            uint8_t component = 0;
-            auto result =
-                    getDisplayedContentSamplingAttributes(display, &format, &dataspace, &component);
-            if (result == NO_ERROR) {
-                reply->writeUint32(static_cast<uint32_t>(format));
-                reply->writeUint32(static_cast<uint32_t>(dataspace));
-                reply->writeUint32(static_cast<uint32_t>(component));
-            }
-            return result;
-        }
-        case SET_DISPLAY_CONTENT_SAMPLING_ENABLED: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-
-            sp<IBinder> display = nullptr;
-            bool enable = false;
-            int8_t componentMask = 0;
-            uint64_t maxFrames = 0;
-            status_t result = data.readStrongBinder(&display);
-            if (result != NO_ERROR) {
-                ALOGE("setDisplayContentSamplingEnabled failure in reading Display token: %d",
-                      result);
-                return result;
-            }
-
-            result = data.readBool(&enable);
-            if (result != NO_ERROR) {
-                ALOGE("setDisplayContentSamplingEnabled failure in reading enable: %d", result);
-                return result;
-            }
-
-            result = data.readByte(static_cast<int8_t*>(&componentMask));
-            if (result != NO_ERROR) {
-                ALOGE("setDisplayContentSamplingEnabled failure in reading component mask: %d",
-                      result);
-                return result;
-            }
-
-            result = data.readUint64(&maxFrames);
-            if (result != NO_ERROR) {
-                ALOGE("setDisplayContentSamplingEnabled failure in reading max frames: %d", result);
-                return result;
-            }
-
-            return setDisplayContentSamplingEnabled(display, enable,
-                                                    static_cast<uint8_t>(componentMask), maxFrames);
-        }
-        case GET_DISPLAYED_CONTENT_SAMPLE: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-
-            sp<IBinder> display = data.readStrongBinder();
-            uint64_t maxFrames = 0;
-            uint64_t timestamp = 0;
-
-            status_t result = data.readUint64(&maxFrames);
-            if (result != NO_ERROR) {
-                ALOGE("getDisplayedContentSample failure in reading max frames: %d", result);
-                return result;
-            }
-
-            result = data.readUint64(&timestamp);
-            if (result != NO_ERROR) {
-                ALOGE("getDisplayedContentSample failure in reading timestamp: %d", result);
-                return result;
-            }
-
-            DisplayedFrameStats stats;
-            result = getDisplayedContentSample(display, maxFrames, timestamp, &stats);
-            if (result == NO_ERROR) {
-                reply->writeUint64(stats.numFrames);
-                reply->writeUint64Vector(stats.component_0_sample);
-                reply->writeUint64Vector(stats.component_1_sample);
-                reply->writeUint64Vector(stats.component_2_sample);
-                reply->writeUint64Vector(stats.component_3_sample);
-            }
-            return result;
-        }
-        case GET_PROTECTED_CONTENT_SUPPORT: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            bool result;
-            status_t error = getProtectedContentSupport(&result);
-            if (error == NO_ERROR) {
-                reply->writeBool(result);
-            }
-            return error;
-        }
-        case ADD_REGION_SAMPLING_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            Rect samplingArea;
-            status_t result = data.read(samplingArea);
-            if (result != NO_ERROR) {
-                ALOGE("addRegionSamplingListener: Failed to read sampling area");
-                return result;
-            }
-            sp<IBinder> stopLayerHandle;
-            result = data.readNullableStrongBinder(&stopLayerHandle);
-            if (result != NO_ERROR) {
-                ALOGE("addRegionSamplingListener: Failed to read stop layer handle");
-                return result;
-            }
-            sp<IRegionSamplingListener> listener;
-            result = data.readNullableStrongBinder(&listener);
-            if (result != NO_ERROR) {
-                ALOGE("addRegionSamplingListener: Failed to read listener");
-                return result;
-            }
-            return addRegionSamplingListener(samplingArea, stopLayerHandle, listener);
-        }
-        case REMOVE_REGION_SAMPLING_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IRegionSamplingListener> listener;
-            status_t result = data.readNullableStrongBinder(&listener);
-            if (result != NO_ERROR) {
-                ALOGE("removeRegionSamplingListener: Failed to read listener");
-                return result;
-            }
-            return removeRegionSamplingListener(listener);
-        }
-        case ADD_FPS_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            int32_t taskId;
-            status_t result = data.readInt32(&taskId);
-            if (result != NO_ERROR) {
-                ALOGE("addFpsListener: Failed to read layer handle");
-                return result;
-            }
-            sp<gui::IFpsListener> listener;
-            result = data.readNullableStrongBinder(&listener);
-            if (result != NO_ERROR) {
-                ALOGE("addFpsListener: Failed to read listener");
-                return result;
-            }
-            return addFpsListener(taskId, listener);
-        }
-        case REMOVE_FPS_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<gui::IFpsListener> listener;
-            status_t result = data.readNullableStrongBinder(&listener);
-            if (result != NO_ERROR) {
-                ALOGE("removeFpsListener: Failed to read listener");
-                return result;
-            }
-            return removeFpsListener(listener);
-        }
-        case ADD_TUNNEL_MODE_ENABLED_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<gui::ITunnelModeEnabledListener> listener;
-            status_t result = data.readNullableStrongBinder(&listener);
-            if (result != NO_ERROR) {
-                ALOGE("addTunnelModeEnabledListener: Failed to read listener");
-                return result;
-            }
-            return addTunnelModeEnabledListener(listener);
-        }
-        case REMOVE_TUNNEL_MODE_ENABLED_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<gui::ITunnelModeEnabledListener> listener;
-            status_t result = data.readNullableStrongBinder(&listener);
-            if (result != NO_ERROR) {
-                ALOGE("removeTunnelModeEnabledListener: Failed to read listener");
-                return result;
-            }
-            return removeTunnelModeEnabledListener(listener);
-        }
-        case SET_DESIRED_DISPLAY_MODE_SPECS: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> displayToken = data.readStrongBinder();
-            ui::DisplayModeId defaultMode;
-            status_t result = data.readInt32(&defaultMode);
-            if (result != NO_ERROR) {
-                ALOGE("setDesiredDisplayModeSpecs: failed to read defaultMode: %d", result);
-                return result;
-            }
-            if (defaultMode < 0) {
-                ALOGE("%s: defaultMode must be non-negative but it was %d", __func__, defaultMode);
-                return BAD_VALUE;
-            }
-            bool allowGroupSwitching;
-            result = data.readBool(&allowGroupSwitching);
-            if (result != NO_ERROR) {
-                ALOGE("setDesiredDisplayModeSpecs: failed to read allowGroupSwitching: %d", result);
-                return result;
-            }
-            float primaryRefreshRateMin;
-            result = data.readFloat(&primaryRefreshRateMin);
-            if (result != NO_ERROR) {
-                ALOGE("setDesiredDisplayModeSpecs: failed to read primaryRefreshRateMin: %d",
-                      result);
-                return result;
-            }
-            float primaryRefreshRateMax;
-            result = data.readFloat(&primaryRefreshRateMax);
-            if (result != NO_ERROR) {
-                ALOGE("setDesiredDisplayModeSpecs: failed to read primaryRefreshRateMax: %d",
-                      result);
-                return result;
-            }
-            float appRequestRefreshRateMin;
-            result = data.readFloat(&appRequestRefreshRateMin);
-            if (result != NO_ERROR) {
-                ALOGE("setDesiredDisplayModeSpecs: failed to read appRequestRefreshRateMin: %d",
-                      result);
-                return result;
-            }
-            float appRequestRefreshRateMax;
-            result = data.readFloat(&appRequestRefreshRateMax);
-            if (result != NO_ERROR) {
-                ALOGE("setDesiredDisplayModeSpecs: failed to read appRequestRefreshRateMax: %d",
-                      result);
-                return result;
-            }
-            result = setDesiredDisplayModeSpecs(displayToken, defaultMode, allowGroupSwitching,
-                                                primaryRefreshRateMin, primaryRefreshRateMax,
-                                                appRequestRefreshRateMin, appRequestRefreshRateMax);
-            if (result != NO_ERROR) {
-                ALOGE("setDesiredDisplayModeSpecs: failed to call setDesiredDisplayModeSpecs: "
-                      "%d",
-                      result);
-                return result;
-            }
-            reply->writeInt32(result);
-            return result;
-        }
-        case GET_DESIRED_DISPLAY_MODE_SPECS: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> displayToken = data.readStrongBinder();
-            ui::DisplayModeId defaultMode;
-            bool allowGroupSwitching;
-            float primaryRefreshRateMin;
-            float primaryRefreshRateMax;
-            float appRequestRefreshRateMin;
-            float appRequestRefreshRateMax;
-
-            status_t result =
-                    getDesiredDisplayModeSpecs(displayToken, &defaultMode, &allowGroupSwitching,
-                                               &primaryRefreshRateMin, &primaryRefreshRateMax,
-                                               &appRequestRefreshRateMin,
-                                               &appRequestRefreshRateMax);
-            if (result != NO_ERROR) {
-                ALOGE("getDesiredDisplayModeSpecs: failed to get getDesiredDisplayModeSpecs: "
-                      "%d",
-                      result);
-                return result;
-            }
-
-            result = reply->writeInt32(defaultMode);
-            if (result != NO_ERROR) {
-                ALOGE("getDesiredDisplayModeSpecs: failed to write defaultMode: %d", result);
-                return result;
-            }
-            result = reply->writeBool(allowGroupSwitching);
-            if (result != NO_ERROR) {
-                ALOGE("getDesiredDisplayModeSpecs: failed to write allowGroupSwitching: %d",
-                      result);
-                return result;
-            }
-            result = reply->writeFloat(primaryRefreshRateMin);
-            if (result != NO_ERROR) {
-                ALOGE("getDesiredDisplayModeSpecs: failed to write primaryRefreshRateMin: %d",
-                      result);
-                return result;
-            }
-            result = reply->writeFloat(primaryRefreshRateMax);
-            if (result != NO_ERROR) {
-                ALOGE("getDesiredDisplayModeSpecs: failed to write primaryRefreshRateMax: %d",
-                      result);
-                return result;
-            }
-            result = reply->writeFloat(appRequestRefreshRateMin);
-            if (result != NO_ERROR) {
-                ALOGE("getDesiredDisplayModeSpecs: failed to write appRequestRefreshRateMin: %d",
-                      result);
-                return result;
-            }
-            result = reply->writeFloat(appRequestRefreshRateMax);
-            if (result != NO_ERROR) {
-                ALOGE("getDesiredDisplayModeSpecs: failed to write appRequestRefreshRateMax: %d",
-                      result);
-                return result;
-            }
-            reply->writeInt32(result);
-            return result;
-        }
-        case SET_GLOBAL_SHADOW_SETTINGS: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-
-            std::vector<float> shadowConfig;
-            status_t error = data.readFloatVector(&shadowConfig);
-            if (error != NO_ERROR || shadowConfig.size() != 11) {
-                ALOGE("setGlobalShadowSettings: failed to read shadowConfig: %d", error);
-                return error;
-            }
-
-            half4 ambientColor = {shadowConfig[0], shadowConfig[1], shadowConfig[2],
-                                  shadowConfig[3]};
-            half4 spotColor = {shadowConfig[4], shadowConfig[5], shadowConfig[6], shadowConfig[7]};
-            float lightPosY = shadowConfig[8];
-            float lightPosZ = shadowConfig[9];
-            float lightRadius = shadowConfig[10];
-            return setGlobalShadowSettings(ambientColor, spotColor, lightPosY, lightPosZ,
-                                           lightRadius);
-        }
-        case GET_DISPLAY_DECORATION_SUPPORT: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> displayToken;
-            SAFE_PARCEL(data.readNullableStrongBinder, &displayToken);
-            std::optional<common::DisplayDecorationSupport> support;
-            auto error = getDisplayDecorationSupport(displayToken, &support);
-            if (error != NO_ERROR) {
-                ALOGE("getDisplayDecorationSupport failed with error %d", error);
-                return error;
-            }
-            reply->writeBool(support.has_value());
-            if (support) {
-                reply->writeInt32(static_cast<int32_t>(support.value().format));
-                reply->writeInt32(static_cast<int32_t>(support.value().alphaInterpretation));
-            }
-            return error;
-        }
-        case SET_FRAME_RATE: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> binder;
-            SAFE_PARCEL(data.readStrongBinder, &binder);
-
-            sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(binder);
-            if (!surface) {
-                ALOGE("setFrameRate: failed to cast to IGraphicBufferProducer");
-                return BAD_VALUE;
-            }
-            float frameRate;
-            SAFE_PARCEL(data.readFloat, &frameRate);
-
-            int8_t compatibility;
-            SAFE_PARCEL(data.readByte, &compatibility);
-
-            int8_t changeFrameRateStrategy;
-            SAFE_PARCEL(data.readByte, &changeFrameRateStrategy);
-
-            status_t result =
-                    setFrameRate(surface, frameRate, compatibility, changeFrameRateStrategy);
-            reply->writeInt32(result);
-            return NO_ERROR;
-        }
-        case SET_FRAME_TIMELINE_INFO: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> binder;
-            status_t err = data.readStrongBinder(&binder);
-            if (err != NO_ERROR) {
-                ALOGE("setFrameTimelineInfo: failed to read strong binder: %s (%d)", strerror(-err),
-                      -err);
-                return err;
-            }
-            sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(binder);
-            if (!surface) {
-                ALOGE("setFrameTimelineInfo: failed to cast to IGraphicBufferProducer: %s (%d)",
-                      strerror(-err), -err);
-                return err;
-            }
-
-            FrameTimelineInfo frameTimelineInfo;
-            SAFE_PARCEL(frameTimelineInfo.read, data);
-
-            status_t result = setFrameTimelineInfo(surface, frameTimelineInfo);
-            reply->writeInt32(result);
-            return NO_ERROR;
-        }
-        case ADD_TRANSACTION_TRACE_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<gui::ITransactionTraceListener> listener;
-            SAFE_PARCEL(data.readStrongBinder, &listener);
-
-            return addTransactionTraceListener(listener);
-        }
-        case GET_GPU_CONTEXT_PRIORITY: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            int priority = getGPUContextPriority();
-            SAFE_PARCEL(reply->writeInt32, priority);
-            return NO_ERROR;
-        }
-        case GET_MAX_ACQUIRED_BUFFER_COUNT: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            int buffers = 0;
-            int err = getMaxAcquiredBufferCount(&buffers);
-            if (err != NO_ERROR) {
-                return err;
-            }
-            SAFE_PARCEL(reply->writeInt32, buffers);
-            return NO_ERROR;
-        }
-        case OVERRIDE_HDR_TYPES: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IBinder> display = nullptr;
-            SAFE_PARCEL(data.readStrongBinder, &display);
-
-            std::vector<int32_t> hdrTypes;
-            SAFE_PARCEL(data.readInt32Vector, &hdrTypes);
-
-            std::vector<ui::Hdr> hdrTypesVector;
-            for (int i : hdrTypes) {
-                hdrTypesVector.push_back(static_cast<ui::Hdr>(i));
-            }
-            return overrideHdrTypes(display, hdrTypesVector);
-        }
-        case ON_PULL_ATOM: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            int32_t atomId = 0;
-            SAFE_PARCEL(data.readInt32, &atomId);
-
-            std::string pulledData;
-            bool success;
-            status_t err = onPullAtom(atomId, &pulledData, &success);
-            SAFE_PARCEL(reply->writeByteArray, pulledData.size(),
-                        reinterpret_cast<const uint8_t*>(pulledData.data()));
-            SAFE_PARCEL(reply->writeBool, success);
-            return err;
-        }
-        case ADD_WINDOW_INFOS_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IWindowInfosListener> listener;
-            SAFE_PARCEL(data.readStrongBinder, &listener);
-
-            return addWindowInfosListener(listener);
-        }
-        case REMOVE_WINDOW_INFOS_LISTENER: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-            sp<IWindowInfosListener> listener;
-            SAFE_PARCEL(data.readStrongBinder, &listener);
-
-            return removeWindowInfosListener(listener);
-        }
-        case SET_OVERRIDE_FRAME_RATE: {
-            CHECK_INTERFACE(ISurfaceComposer, data, reply);
-
-            uid_t uid;
-            SAFE_PARCEL(data.readUint32, &uid);
-
-            float frameRate;
-            SAFE_PARCEL(data.readFloat, &frameRate);
-
-            return setOverrideFrameRate(uid, frameRate);
+                                       std::move(inputWindowCommands), desiredPresentTime,
+                                       isAutoTimestamp, uncacheBuffers, hasListenerCallbacks,
+                                       listenerCallbacks, transactionId, mergedTransactions);
         }
         default: {
             return BBinder::onTransact(code, data, reply, flags);
diff --git a/libs/gui/ISurfaceComposerClient.cpp b/libs/gui/ISurfaceComposerClient.cpp
deleted file mode 100644
index 5e7a7ec..0000000
--- a/libs/gui/ISurfaceComposerClient.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// tag as surfaceflinger
-#define LOG_TAG "SurfaceFlinger"
-
-#include <gui/ISurfaceComposerClient.h>
-
-#include <gui/IGraphicBufferProducer.h>
-
-#include <binder/SafeInterface.h>
-
-#include <ui/FrameStats.h>
-
-namespace android {
-
-namespace { // Anonymous
-
-enum class Tag : uint32_t {
-    CREATE_SURFACE = IBinder::FIRST_CALL_TRANSACTION,
-    CREATE_WITH_SURFACE_PARENT,
-    CLEAR_LAYER_FRAME_STATS,
-    GET_LAYER_FRAME_STATS,
-    MIRROR_SURFACE,
-    LAST = MIRROR_SURFACE,
-};
-
-} // Anonymous namespace
-
-class BpSurfaceComposerClient : public SafeBpInterface<ISurfaceComposerClient> {
-public:
-    explicit BpSurfaceComposerClient(const sp<IBinder>& impl)
-          : SafeBpInterface<ISurfaceComposerClient>(impl, "BpSurfaceComposerClient") {}
-
-    ~BpSurfaceComposerClient() override;
-
-    status_t createSurface(const String8& name, uint32_t width, uint32_t height, PixelFormat format,
-                           uint32_t flags, const sp<IBinder>& parent, LayerMetadata metadata,
-                           sp<IBinder>* handle, sp<IGraphicBufferProducer>* gbp,
-                           int32_t* outLayerId, uint32_t* outTransformHint) override {
-        return callRemote<decltype(&ISurfaceComposerClient::createSurface)>(Tag::CREATE_SURFACE,
-                                                                            name, width, height,
-                                                                            format, flags, parent,
-                                                                            std::move(metadata),
-                                                                            handle, gbp, outLayerId,
-                                                                            outTransformHint);
-    }
-
-    status_t createWithSurfaceParent(const String8& name, uint32_t width, uint32_t height,
-                                     PixelFormat format, uint32_t flags,
-                                     const sp<IGraphicBufferProducer>& parent,
-                                     LayerMetadata metadata, sp<IBinder>* handle,
-                                     sp<IGraphicBufferProducer>* gbp, int32_t* outLayerId,
-                                     uint32_t* outTransformHint) override {
-        return callRemote<decltype(
-                &ISurfaceComposerClient::createWithSurfaceParent)>(Tag::CREATE_WITH_SURFACE_PARENT,
-                                                                   name, width, height, format,
-                                                                   flags, parent,
-                                                                   std::move(metadata), handle, gbp,
-                                                                   outLayerId, outTransformHint);
-    }
-
-    status_t clearLayerFrameStats(const sp<IBinder>& handle) const override {
-        return callRemote<decltype(
-                &ISurfaceComposerClient::clearLayerFrameStats)>(Tag::CLEAR_LAYER_FRAME_STATS,
-                                                                handle);
-    }
-
-    status_t getLayerFrameStats(const sp<IBinder>& handle, FrameStats* outStats) const override {
-        return callRemote<decltype(
-                &ISurfaceComposerClient::getLayerFrameStats)>(Tag::GET_LAYER_FRAME_STATS, handle,
-                                                              outStats);
-    }
-
-    status_t mirrorSurface(const sp<IBinder>& mirrorFromHandle, sp<IBinder>* outHandle,
-                           int32_t* outLayerId) override {
-        return callRemote<decltype(&ISurfaceComposerClient::mirrorSurface)>(Tag::MIRROR_SURFACE,
-                                                                            mirrorFromHandle,
-                                                                            outHandle, outLayerId);
-    }
-};
-
-// Out-of-line virtual method definition to trigger vtable emission in this
-// translation unit (see clang warning -Wweak-vtables)
-BpSurfaceComposerClient::~BpSurfaceComposerClient() {}
-
-IMPLEMENT_META_INTERFACE(SurfaceComposerClient, "android.ui.ISurfaceComposerClient");
-
-// ----------------------------------------------------------------------
-
-status_t BnSurfaceComposerClient::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
-                                             uint32_t flags) {
-    if (code < IBinder::FIRST_CALL_TRANSACTION || code > static_cast<uint32_t>(Tag::LAST)) {
-        return BBinder::onTransact(code, data, reply, flags);
-    }
-    auto tag = static_cast<Tag>(code);
-    switch (tag) {
-        case Tag::CREATE_SURFACE:
-            return callLocal(data, reply, &ISurfaceComposerClient::createSurface);
-        case Tag::CREATE_WITH_SURFACE_PARENT:
-            return callLocal(data, reply, &ISurfaceComposerClient::createWithSurfaceParent);
-        case Tag::CLEAR_LAYER_FRAME_STATS:
-            return callLocal(data, reply, &ISurfaceComposerClient::clearLayerFrameStats);
-        case Tag::GET_LAYER_FRAME_STATS:
-            return callLocal(data, reply, &ISurfaceComposerClient::getLayerFrameStats);
-        case Tag::MIRROR_SURFACE:
-            return callLocal(data, reply, &ISurfaceComposerClient::mirrorSurface);
-    }
-}
-
-} // namespace android
diff --git a/libs/gui/ITransactionCompletedListener.cpp b/libs/gui/ITransactionCompletedListener.cpp
index e4b8bad..ffe79a3 100644
--- a/libs/gui/ITransactionCompletedListener.cpp
+++ b/libs/gui/ITransactionCompletedListener.cpp
@@ -17,6 +17,9 @@
 #define LOG_TAG "ITransactionCompletedListener"
 //#define LOG_NDEBUG 0
 
+#include <cstdint>
+#include <optional>
+
 #include <gui/ISurfaceComposer.h>
 #include <gui/ITransactionCompletedListener.h>
 #include <gui/LayerState.h>
@@ -30,11 +33,18 @@
     ON_TRANSACTION_COMPLETED = IBinder::FIRST_CALL_TRANSACTION,
     ON_RELEASE_BUFFER,
     ON_TRANSACTION_QUEUE_STALLED,
-    LAST = ON_RELEASE_BUFFER,
+    ON_TRUSTED_PRESENTATION_CHANGED,
+    LAST = ON_TRUSTED_PRESENTATION_CHANGED,
 };
 
 } // Anonymous namespace
 
+namespace { // Anonymous
+
+constexpr int32_t kSerializedCallbackTypeOnCompelteWithJankData = 2;
+
+} // Anonymous namespace
+
 status_t FrameEventHistoryStats::writeToParcel(Parcel* output) const {
     status_t err = output->writeUint64(frameNumber);
     if (err != NO_ERROR) return err;
@@ -126,7 +136,12 @@
     } else {
         SAFE_PARCEL(output->writeBool, false);
     }
-    SAFE_PARCEL(output->writeUint32, transformHint);
+
+    SAFE_PARCEL(output->writeBool, transformHint.has_value());
+    if (transformHint.has_value()) {
+        output->writeUint32(transformHint.value());
+    }
+
     SAFE_PARCEL(output->writeUint32, currentMaxAcquiredBufferCount);
     SAFE_PARCEL(output->writeParcelable, eventStats);
     SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(jankData.size()));
@@ -156,7 +171,16 @@
         previousReleaseFence = new Fence();
         SAFE_PARCEL(input->read, *previousReleaseFence);
     }
-    SAFE_PARCEL(input->readUint32, &transformHint);
+    bool hasTransformHint = false;
+    SAFE_PARCEL(input->readBool, &hasTransformHint);
+    if (hasTransformHint) {
+        uint32_t tempTransformHint;
+        SAFE_PARCEL(input->readUint32, &tempTransformHint);
+        transformHint = std::make_optional(tempTransformHint);
+    } else {
+        transformHint = std::nullopt;
+    }
+
     SAFE_PARCEL(input->readUint32, &currentMaxAcquiredBufferCount);
     SAFE_PARCEL(input->readParcelable, &eventStats);
 
@@ -273,15 +297,22 @@
 
     void onReleaseBuffer(ReleaseCallbackId callbackId, sp<Fence> releaseFence,
                          uint32_t currentMaxAcquiredBufferCount) override {
-        callRemoteAsync<decltype(
-                &ITransactionCompletedListener::onReleaseBuffer)>(Tag::ON_RELEASE_BUFFER,
-                                                                  callbackId, releaseFence,
-                                                                  currentMaxAcquiredBufferCount);
+        callRemoteAsync<decltype(&ITransactionCompletedListener::
+                                         onReleaseBuffer)>(Tag::ON_RELEASE_BUFFER, callbackId,
+                                                           releaseFence,
+                                                           currentMaxAcquiredBufferCount);
     }
 
-    void onTransactionQueueStalled() override {
-        callRemoteAsync<decltype(&ITransactionCompletedListener::onTransactionQueueStalled)>(
-            Tag::ON_TRANSACTION_QUEUE_STALLED);
+    void onTransactionQueueStalled(const String8& reason) override {
+        callRemoteAsync<
+                decltype(&ITransactionCompletedListener::
+                                 onTransactionQueueStalled)>(Tag::ON_TRANSACTION_QUEUE_STALLED,
+                                                             reason);
+    }
+
+    void onTrustedPresentationChanged(int id, bool inTrustedPresentationState) override {
+        callRemoteAsync<decltype(&ITransactionCompletedListener::onTrustedPresentationChanged)>(
+                Tag::ON_TRUSTED_PRESENTATION_CHANGED, id, inTrustedPresentationState);
     }
 };
 
@@ -306,6 +337,9 @@
         case Tag::ON_TRANSACTION_QUEUE_STALLED:
             return callLocalAsync(data, reply,
                                   &ITransactionCompletedListener::onTransactionQueueStalled);
+        case Tag::ON_TRUSTED_PRESENTATION_CHANGED:
+            return callLocalAsync(data, reply,
+                                  &ITransactionCompletedListener::onTrustedPresentationChanged);
     }
 }
 
@@ -321,7 +355,11 @@
 
 status_t CallbackId::writeToParcel(Parcel* output) const {
     SAFE_PARCEL(output->writeInt64, id);
-    SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(type));
+    if (type == Type::ON_COMPLETE && includeJankData) {
+        SAFE_PARCEL(output->writeInt32, kSerializedCallbackTypeOnCompelteWithJankData);
+    } else {
+        SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(type));
+    }
     return NO_ERROR;
 }
 
@@ -329,7 +367,13 @@
     SAFE_PARCEL(input->readInt64, &id);
     int32_t typeAsInt;
     SAFE_PARCEL(input->readInt32, &typeAsInt);
-    type = static_cast<CallbackId::Type>(typeAsInt);
+    if (typeAsInt == kSerializedCallbackTypeOnCompelteWithJankData) {
+        type = Type::ON_COMPLETE;
+        includeJankData = true;
+    } else {
+        type = static_cast<CallbackId::Type>(typeAsInt);
+        includeJankData = false;
+    }
     return NO_ERROR;
 }
 
diff --git a/libs/gui/LayerDebugInfo.cpp b/libs/gui/LayerDebugInfo.cpp
index ea5fb29..15b2221 100644
--- a/libs/gui/LayerDebugInfo.cpp
+++ b/libs/gui/LayerDebugInfo.cpp
@@ -27,7 +27,7 @@
 
 #define RETURN_ON_ERROR(X) do {status_t res = (X); if (res != NO_ERROR) return res;} while(false)
 
-namespace android {
+namespace android::gui {
 
 status_t LayerDebugInfo::writeToParcel(Parcel* parcel) const {
     RETURN_ON_ERROR(parcel->writeCString(mName.c_str()));
@@ -149,4 +149,4 @@
     return result;
 }
 
-} // android
+} // namespace android::gui
diff --git a/libs/gui/LayerMetadata.cpp b/libs/gui/LayerMetadata.cpp
index 189d51a..4e12fd3 100644
--- a/libs/gui/LayerMetadata.cpp
+++ b/libs/gui/LayerMetadata.cpp
@@ -23,7 +23,7 @@
 
 using android::base::StringPrintf;
 
-namespace android {
+namespace android::gui {
 
 LayerMetadata::LayerMetadata() = default;
 
@@ -144,4 +144,4 @@
     }
 }
 
-} // namespace android
+} // namespace android::gui
diff --git a/libs/gui/LayerState.cpp b/libs/gui/LayerState.cpp
index 74e6ae6..2322b70 100644
--- a/libs/gui/LayerState.cpp
+++ b/libs/gui/LayerState.cpp
@@ -19,15 +19,36 @@
 #include <cinttypes>
 #include <cmath>
 
+#include <android/gui/ISurfaceComposerClient.h>
 #include <android/native_window.h>
 #include <binder/Parcel.h>
 #include <gui/IGraphicBufferProducer.h>
-#include <gui/ISurfaceComposerClient.h>
 #include <gui/LayerState.h>
+#include <gui/SurfaceControl.h>
 #include <private/gui/ParcelUtils.h>
 #include <system/window.h>
 #include <utils/Errors.h>
 
+#define CHECK_DIFF(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD)          \
+    {                                                               \
+        if ((OTHER.what & CHANGE_FLAG) && (FIELD != OTHER.FIELD)) { \
+            DIFF_RESULT |= CHANGE_FLAG;                             \
+        }                                                           \
+    }
+
+#define CHECK_DIFF2(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD1, FIELD2) \
+    {                                                                \
+        CHECK_DIFF(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD1)          \
+        CHECK_DIFF(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD2)          \
+    }
+
+#define CHECK_DIFF3(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD1, FIELD2, FIELD3) \
+    {                                                                        \
+        CHECK_DIFF(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD1)                  \
+        CHECK_DIFF(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD2)                  \
+        CHECK_DIFF(DIFF_RESULT, CHANGE_FLAG, OTHER, FIELD3)                  \
+    }
+
 namespace android {
 
 using gui::FocusRequest;
@@ -40,22 +61,20 @@
         x(0),
         y(0),
         z(0),
-        w(0),
-        h(0),
-        alpha(0),
         flags(0),
         mask(0),
         reserved(0),
         cornerRadius(0.0f),
         backgroundBlurRadius(0),
-        transform(0),
+        color(0),
+        bufferTransform(0),
         transformToDisplayInverse(false),
         crop(Rect::INVALID_RECT),
         dataspace(ui::Dataspace::UNKNOWN),
         surfaceDamageRegion(),
         api(-1),
         colorTransform(mat4()),
-        bgColorAlpha(0),
+        bgColor(0),
         bgColorDataspace(ui::Dataspace::UNKNOWN),
         colorSpaceAgnostic(false),
         shadowRadius(0.0f),
@@ -63,9 +82,11 @@
         frameRate(0.0f),
         frameRateCompatibility(ANATIVEWINDOW_FRAME_RATE_COMPATIBILITY_DEFAULT),
         changeFrameRateStrategy(ANATIVEWINDOW_CHANGE_FRAME_RATE_ONLY_IF_SEAMLESS),
+        defaultFrameRateCompatibility(ANATIVEWINDOW_FRAME_RATE_COMPATIBILITY_DEFAULT),
         fixedTransformHint(ui::Transform::ROT_INVALID),
         autoRefresh(false),
         isTrustedOverlay(false),
+        borderEnabled(false),
         bufferCrop(Rect::INVALID_RECT),
         destinationFrame(Rect::INVALID_RECT),
         dropInputMode(gui::DropInputMode::NONE) {
@@ -82,25 +103,27 @@
     SAFE_PARCEL(output.writeFloat, x);
     SAFE_PARCEL(output.writeFloat, y);
     SAFE_PARCEL(output.writeInt32, z);
-    SAFE_PARCEL(output.writeUint32, w);
-    SAFE_PARCEL(output.writeUint32, h);
     SAFE_PARCEL(output.writeUint32, layerStack.id);
-    SAFE_PARCEL(output.writeFloat, alpha);
     SAFE_PARCEL(output.writeUint32, flags);
     SAFE_PARCEL(output.writeUint32, mask);
     SAFE_PARCEL(matrix.write, output);
     SAFE_PARCEL(output.write, crop);
-    SAFE_PARCEL(SurfaceControl::writeNullableToParcel, output, reparentSurfaceControl);
     SAFE_PARCEL(SurfaceControl::writeNullableToParcel, output, relativeLayerSurfaceControl);
     SAFE_PARCEL(SurfaceControl::writeNullableToParcel, output, parentSurfaceControlForChild);
     SAFE_PARCEL(output.writeFloat, color.r);
     SAFE_PARCEL(output.writeFloat, color.g);
     SAFE_PARCEL(output.writeFloat, color.b);
+    SAFE_PARCEL(output.writeFloat, color.a);
     SAFE_PARCEL(windowInfoHandle->writeToParcel, &output);
     SAFE_PARCEL(output.write, transparentRegion);
-    SAFE_PARCEL(output.writeUint32, transform);
+    SAFE_PARCEL(output.writeUint32, bufferTransform);
     SAFE_PARCEL(output.writeBool, transformToDisplayInverse);
-
+    SAFE_PARCEL(output.writeBool, borderEnabled);
+    SAFE_PARCEL(output.writeFloat, borderWidth);
+    SAFE_PARCEL(output.writeFloat, borderColor.r);
+    SAFE_PARCEL(output.writeFloat, borderColor.g);
+    SAFE_PARCEL(output.writeFloat, borderColor.b);
+    SAFE_PARCEL(output.writeFloat, borderColor.a);
     SAFE_PARCEL(output.writeUint32, static_cast<uint32_t>(dataspace));
     SAFE_PARCEL(output.write, hdrMetadata);
     SAFE_PARCEL(output.write, surfaceDamageRegion);
@@ -117,7 +140,10 @@
     SAFE_PARCEL(output.writeFloat, cornerRadius);
     SAFE_PARCEL(output.writeUint32, backgroundBlurRadius);
     SAFE_PARCEL(output.writeParcelable, metadata);
-    SAFE_PARCEL(output.writeFloat, bgColorAlpha);
+    SAFE_PARCEL(output.writeFloat, bgColor.r);
+    SAFE_PARCEL(output.writeFloat, bgColor.g);
+    SAFE_PARCEL(output.writeFloat, bgColor.b);
+    SAFE_PARCEL(output.writeFloat, bgColor.a);
     SAFE_PARCEL(output.writeUint32, static_cast<uint32_t>(bgColorDataspace));
     SAFE_PARCEL(output.writeBool, colorSpaceAgnostic);
     SAFE_PARCEL(output.writeVectorSize, listeners);
@@ -131,6 +157,7 @@
     SAFE_PARCEL(output.writeFloat, frameRate);
     SAFE_PARCEL(output.writeByte, frameRateCompatibility);
     SAFE_PARCEL(output.writeByte, changeFrameRateStrategy);
+    SAFE_PARCEL(output.writeByte, defaultFrameRateCompatibility);
     SAFE_PARCEL(output.writeUint32, fixedTransformHint);
     SAFE_PARCEL(output.writeBool, autoRefresh);
     SAFE_PARCEL(output.writeBool, dimmingEnabled);
@@ -161,6 +188,11 @@
     if (hasBufferData) {
         SAFE_PARCEL(output.writeParcelable, *bufferData);
     }
+    SAFE_PARCEL(output.writeParcelable, trustedPresentationThresholds);
+    SAFE_PARCEL(output.writeParcelable, trustedPresentationListener);
+    SAFE_PARCEL(output.writeFloat, currentHdrSdrRatio);
+    SAFE_PARCEL(output.writeFloat, desiredHdrSdrRatio);
+    SAFE_PARCEL(output.writeInt32, static_cast<int32_t>(cachingHint))
     return NO_ERROR;
 }
 
@@ -172,10 +204,7 @@
     SAFE_PARCEL(input.readFloat, &x);
     SAFE_PARCEL(input.readFloat, &y);
     SAFE_PARCEL(input.readInt32, &z);
-    SAFE_PARCEL(input.readUint32, &w);
-    SAFE_PARCEL(input.readUint32, &h);
     SAFE_PARCEL(input.readUint32, &layerStack.id);
-    SAFE_PARCEL(input.readFloat, &alpha);
 
     SAFE_PARCEL(input.readUint32, &flags);
 
@@ -183,7 +212,6 @@
 
     SAFE_PARCEL(matrix.read, input);
     SAFE_PARCEL(input.read, crop);
-    SAFE_PARCEL(SurfaceControl::readNullableFromParcel, input, &reparentSurfaceControl);
 
     SAFE_PARCEL(SurfaceControl::readNullableFromParcel, input, &relativeLayerSurfaceControl);
     SAFE_PARCEL(SurfaceControl::readNullableFromParcel, input, &parentSurfaceControlForChild);
@@ -195,11 +223,25 @@
     color.g = tmpFloat;
     SAFE_PARCEL(input.readFloat, &tmpFloat);
     color.b = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    color.a = tmpFloat;
+
     SAFE_PARCEL(windowInfoHandle->readFromParcel, &input);
 
     SAFE_PARCEL(input.read, transparentRegion);
-    SAFE_PARCEL(input.readUint32, &transform);
+    SAFE_PARCEL(input.readUint32, &bufferTransform);
     SAFE_PARCEL(input.readBool, &transformToDisplayInverse);
+    SAFE_PARCEL(input.readBool, &borderEnabled);
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    borderWidth = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    borderColor.r = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    borderColor.g = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    borderColor.b = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    borderColor.a = tmpFloat;
 
     uint32_t tmpUint32 = 0;
     SAFE_PARCEL(input.readUint32, &tmpUint32);
@@ -220,7 +262,14 @@
     SAFE_PARCEL(input.readUint32, &backgroundBlurRadius);
     SAFE_PARCEL(input.readParcelable, &metadata);
 
-    SAFE_PARCEL(input.readFloat, &bgColorAlpha);
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    bgColor.r = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    bgColor.g = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    bgColor.b = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    bgColor.a = tmpFloat;
     SAFE_PARCEL(input.readUint32, &tmpUint32);
     bgColorDataspace = static_cast<ui::Dataspace>(tmpUint32);
     SAFE_PARCEL(input.readBool, &colorSpaceAgnostic);
@@ -240,6 +289,7 @@
     SAFE_PARCEL(input.readFloat, &frameRate);
     SAFE_PARCEL(input.readByte, &frameRateCompatibility);
     SAFE_PARCEL(input.readByte, &changeFrameRateStrategy);
+    SAFE_PARCEL(input.readByte, &defaultFrameRateCompatibility);
     SAFE_PARCEL(input.readUint32, &tmpUint32);
     fixedTransformHint = static_cast<ui::Transform::RotationFlags>(tmpUint32);
     SAFE_PARCEL(input.readBool, &autoRefresh);
@@ -280,6 +330,19 @@
     } else {
         bufferData = nullptr;
     }
+
+    SAFE_PARCEL(input.readParcelable, &trustedPresentationThresholds);
+    SAFE_PARCEL(input.readParcelable, &trustedPresentationListener);
+
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    currentHdrSdrRatio = tmpFloat;
+    SAFE_PARCEL(input.readFloat, &tmpFloat);
+    desiredHdrSdrRatio = tmpFloat;
+
+    int32_t tmpInt32;
+    SAFE_PARCEL(input.readInt32, &tmpInt32);
+    cachingHint = static_cast<gui::CachingHint>(tmpInt32);
+
     return NO_ERROR;
 }
 
@@ -458,14 +521,9 @@
         what &= ~eRelativeLayerChanged;
         z = other.z;
     }
-    if (other.what & eSizeChanged) {
-        what |= eSizeChanged;
-        w = other.w;
-        h = other.h;
-    }
     if (other.what & eAlphaChanged) {
         what |= eAlphaChanged;
-        alpha = other.alpha;
+        color.a = other.color.a;
     }
     if (other.what & eMatrixChanged) {
         what |= eMatrixChanged;
@@ -507,12 +565,9 @@
         what |= eReparent;
         parentSurfaceControlForChild = other.parentSurfaceControlForChild;
     }
-    if (other.what & eDestroySurface) {
-        what |= eDestroySurface;
-    }
-    if (other.what & eTransformChanged) {
-        what |= eTransformChanged;
-        transform = other.transform;
+    if (other.what & eBufferTransformChanged) {
+        what |= eBufferTransformChanged;
+        bufferTransform = other.bufferTransform;
     }
     if (other.what & eTransformToDisplayInverseChanged) {
         what |= eTransformToDisplayInverseChanged;
@@ -526,10 +581,24 @@
         what |= eBufferChanged;
         bufferData = other.bufferData;
     }
+    if (other.what & eTrustedPresentationInfoChanged) {
+        what |= eTrustedPresentationInfoChanged;
+        trustedPresentationListener = other.trustedPresentationListener;
+        trustedPresentationThresholds = other.trustedPresentationThresholds;
+    }
     if (other.what & eDataspaceChanged) {
         what |= eDataspaceChanged;
         dataspace = other.dataspace;
     }
+    if (other.what & eExtendedRangeBrightnessChanged) {
+        what |= eExtendedRangeBrightnessChanged;
+        desiredHdrSdrRatio = other.desiredHdrSdrRatio;
+        currentHdrSdrRatio = other.currentHdrSdrRatio;
+    }
+    if (other.what & eCachingHintChanged) {
+        what |= eCachingHintChanged;
+        cachingHint = other.cachingHint;
+    }
     if (other.what & eHdrMetadataChanged) {
         what |= eHdrMetadataChanged;
         hdrMetadata = other.hdrMetadata;
@@ -559,8 +628,7 @@
     }
     if (other.what & eBackgroundColorChanged) {
         what |= eBackgroundColorChanged;
-        color = other.color;
-        bgColorAlpha = other.bgColorAlpha;
+        bgColor = other.bgColor;
         bgColorDataspace = other.bgColorDataspace;
     }
     if (other.what & eMetadataChanged) {
@@ -571,6 +639,16 @@
         what |= eShadowRadiusChanged;
         shadowRadius = other.shadowRadius;
     }
+    if (other.what & eRenderBorderChanged) {
+        what |= eRenderBorderChanged;
+        borderEnabled = other.borderEnabled;
+        borderWidth = other.borderWidth;
+        borderColor = other.borderColor;
+    }
+    if (other.what & eDefaultFrameRateCompatibilityChanged) {
+        what |= eDefaultFrameRateCompatibilityChanged;
+        defaultFrameRateCompatibility = other.defaultFrameRateCompatibility;
+    }
     if (other.what & eFrameRateSelectionPriority) {
         what |= eFrameRateSelectionPriority;
         frameRateSelectionPriority = other.frameRateSelectionPriority;
@@ -614,7 +692,7 @@
     }
     if (other.what & eColorChanged) {
         what |= eColorChanged;
-        color = other.color;
+        color.rgb = other.color.rgb;
     }
     if (other.what & eColorSpaceAgnosticChanged) {
         what |= eColorSpaceAgnosticChanged;
@@ -624,6 +702,9 @@
         what |= eDimmingEnabledChanged;
         dimmingEnabled = other.dimmingEnabled;
     }
+    if (other.what & eFlushJankData) {
+        what |= eFlushJankData;
+    }
     if ((other.what & what) != other.what) {
         ALOGE("Unmerged SurfaceComposer Transaction properties. LayerState::merge needs updating? "
               "other.what=0x%" PRIX64 " what=0x%" PRIX64 " unmerged flags=0x%" PRIX64,
@@ -631,12 +712,83 @@
     }
 }
 
+uint64_t layer_state_t::diff(const layer_state_t& other) const {
+    uint64_t diff = 0;
+    CHECK_DIFF2(diff, ePositionChanged, other, x, y);
+    if (other.what & eLayerChanged) {
+        diff |= eLayerChanged;
+        diff &= ~eRelativeLayerChanged;
+    }
+    CHECK_DIFF(diff, eAlphaChanged, other, color.a);
+    CHECK_DIFF(diff, eMatrixChanged, other, matrix);
+    if (other.what & eTransparentRegionChanged &&
+        (!transparentRegion.hasSameRects(other.transparentRegion))) {
+        diff |= eTransparentRegionChanged;
+    }
+    if (other.what & eFlagsChanged) {
+        uint64_t changedFlags = (flags & other.mask) ^ (other.flags & other.mask);
+        if (changedFlags) diff |= eFlagsChanged;
+    }
+    CHECK_DIFF(diff, eLayerStackChanged, other, layerStack);
+    CHECK_DIFF(diff, eCornerRadiusChanged, other, cornerRadius);
+    CHECK_DIFF(diff, eBackgroundBlurRadiusChanged, other, backgroundBlurRadius);
+    if (other.what & eBlurRegionsChanged) diff |= eBlurRegionsChanged;
+    if (other.what & eRelativeLayerChanged) {
+        diff |= eRelativeLayerChanged;
+        diff &= ~eLayerChanged;
+    }
+    if (other.what & eReparent &&
+        !SurfaceControl::isSameSurface(parentSurfaceControlForChild,
+                                       other.parentSurfaceControlForChild)) {
+        diff |= eReparent;
+    }
+    CHECK_DIFF(diff, eBufferTransformChanged, other, bufferTransform);
+    CHECK_DIFF(diff, eTransformToDisplayInverseChanged, other, transformToDisplayInverse);
+    CHECK_DIFF(diff, eCropChanged, other, crop);
+    if (other.what & eBufferChanged) diff |= eBufferChanged;
+    CHECK_DIFF(diff, eDataspaceChanged, other, dataspace);
+    CHECK_DIFF2(diff, eExtendedRangeBrightnessChanged, other, currentHdrSdrRatio,
+                desiredHdrSdrRatio);
+    CHECK_DIFF(diff, eCachingHintChanged, other, cachingHint);
+    CHECK_DIFF(diff, eHdrMetadataChanged, other, hdrMetadata);
+    if (other.what & eSurfaceDamageRegionChanged &&
+        (!surfaceDamageRegion.hasSameRects(other.surfaceDamageRegion))) {
+        diff |= eSurfaceDamageRegionChanged;
+    }
+    CHECK_DIFF(diff, eApiChanged, other, api);
+    if (other.what & eSidebandStreamChanged) diff |= eSidebandStreamChanged;
+    CHECK_DIFF(diff, eApiChanged, other, api);
+    CHECK_DIFF(diff, eColorTransformChanged, other, colorTransform);
+    if (other.what & eHasListenerCallbacksChanged) diff |= eHasListenerCallbacksChanged;
+    if (other.what & eInputInfoChanged) diff |= eInputInfoChanged;
+    CHECK_DIFF2(diff, eBackgroundColorChanged, other, bgColor, bgColorDataspace);
+    if (other.what & eMetadataChanged) diff |= eMetadataChanged;
+    CHECK_DIFF(diff, eShadowRadiusChanged, other, shadowRadius);
+    CHECK_DIFF3(diff, eRenderBorderChanged, other, borderEnabled, borderWidth, borderColor);
+    CHECK_DIFF(diff, eDefaultFrameRateCompatibilityChanged, other, defaultFrameRateCompatibility);
+    CHECK_DIFF(diff, eFrameRateSelectionPriority, other, frameRateSelectionPriority);
+    CHECK_DIFF3(diff, eFrameRateChanged, other, frameRate, frameRateCompatibility,
+                changeFrameRateStrategy);
+    CHECK_DIFF(diff, eFixedTransformHintChanged, other, fixedTransformHint);
+    CHECK_DIFF(diff, eAutoRefreshChanged, other, autoRefresh);
+    CHECK_DIFF(diff, eTrustedOverlayChanged, other, isTrustedOverlay);
+    CHECK_DIFF(diff, eStretchChanged, other, stretchEffect);
+    CHECK_DIFF(diff, eBufferCropChanged, other, bufferCrop);
+    CHECK_DIFF(diff, eDestinationFrameChanged, other, destinationFrame);
+    if (other.what & eProducerDisconnect) diff |= eProducerDisconnect;
+    CHECK_DIFF(diff, eDropInputModeChanged, other, dropInputMode);
+    CHECK_DIFF(diff, eColorChanged, other, color.rgb);
+    CHECK_DIFF(diff, eColorSpaceAgnosticChanged, other, colorSpaceAgnostic);
+    CHECK_DIFF(diff, eDimmingEnabledChanged, other, dimmingEnabled);
+    return diff;
+}
+
 bool layer_state_t::hasBufferChanges() const {
     return what & layer_state_t::eBufferChanged;
 }
 
 bool layer_state_t::hasValidBuffer() const {
-    return bufferData && (bufferData->buffer || bufferData->cachedBuffer.isValid());
+    return bufferData && (bufferData->hasBuffer() || bufferData->cachedBuffer.isValid());
 }
 
 status_t layer_state_t::matrix22_t::write(Parcel& output) const {
@@ -662,29 +814,44 @@
     changes |= !other.focusRequests.empty();
     focusRequests.insert(focusRequests.end(), std::make_move_iterator(other.focusRequests.begin()),
                          std::make_move_iterator(other.focusRequests.end()));
-    changes |= other.syncInputWindows && !syncInputWindows;
-    syncInputWindows |= other.syncInputWindows;
+    changes |= !other.windowInfosReportedListeners.empty();
+    windowInfosReportedListeners.insert(other.windowInfosReportedListeners.begin(),
+                                        other.windowInfosReportedListeners.end());
     return changes;
 }
 
 bool InputWindowCommands::empty() const {
-    return focusRequests.empty() && !syncInputWindows;
+    return focusRequests.empty() && windowInfosReportedListeners.empty();
 }
 
 void InputWindowCommands::clear() {
     focusRequests.clear();
-    syncInputWindows = false;
+    windowInfosReportedListeners.clear();
 }
 
 status_t InputWindowCommands::write(Parcel& output) const {
     SAFE_PARCEL(output.writeParcelableVector, focusRequests);
-    SAFE_PARCEL(output.writeBool, syncInputWindows);
+
+    SAFE_PARCEL(output.writeInt32, windowInfosReportedListeners.size());
+    for (const auto& listener : windowInfosReportedListeners) {
+        SAFE_PARCEL(output.writeStrongBinder, listener);
+    }
+
     return NO_ERROR;
 }
 
 status_t InputWindowCommands::read(const Parcel& input) {
     SAFE_PARCEL(input.readParcelableVector, &focusRequests);
-    SAFE_PARCEL(input.readBool, &syncInputWindows);
+
+    int listenerSize = 0;
+    SAFE_PARCEL_READ_SIZE(input.readInt32, &listenerSize, input.dataSize());
+    windowInfosReportedListeners.reserve(listenerSize);
+    for (int i = 0; i < listenerSize; i++) {
+        sp<gui::IWindowInfosReportedListener> listener;
+        SAFE_PARCEL(input.readStrongBinder, &listener);
+        windowInfosReportedListeners.insert(listener);
+    }
+
     return NO_ERROR;
 }
 
@@ -730,6 +897,11 @@
     SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(dataspace));
     SAFE_PARCEL(output->writeBool, allowProtected);
     SAFE_PARCEL(output->writeBool, grayscale);
+    SAFE_PARCEL(output->writeInt32, excludeHandles.size());
+    for (auto& excludeHandle : excludeHandles) {
+        SAFE_PARCEL(output->writeStrongBinder, excludeHandle);
+    }
+    SAFE_PARCEL(output->writeBool, hintForSeamlessTransition);
     return NO_ERROR;
 }
 
@@ -746,6 +918,15 @@
     dataspace = static_cast<ui::Dataspace>(value);
     SAFE_PARCEL(input->readBool, &allowProtected);
     SAFE_PARCEL(input->readBool, &grayscale);
+    int32_t numExcludeHandles = 0;
+    SAFE_PARCEL_READ_SIZE(input->readInt32, &numExcludeHandles, input->dataSize());
+    excludeHandles.reserve(numExcludeHandles);
+    for (int i = 0; i < numExcludeHandles; i++) {
+        sp<IBinder> binder;
+        SAFE_PARCEL(input->readStrongBinder, &binder);
+        excludeHandles.emplace(binder);
+    }
+    SAFE_PARCEL(input->readBool, &hintForSeamlessTransition);
     return NO_ERROR;
 }
 
@@ -773,10 +954,6 @@
     SAFE_PARCEL(CaptureArgs::writeToParcel, output);
 
     SAFE_PARCEL(output->writeStrongBinder, layerHandle);
-    SAFE_PARCEL(output->writeInt32, excludeHandles.size());
-    for (auto el : excludeHandles) {
-        SAFE_PARCEL(output->writeStrongBinder, el);
-    }
     SAFE_PARCEL(output->writeBool, childrenOnly);
     return NO_ERROR;
 }
@@ -786,15 +963,6 @@
 
     SAFE_PARCEL(input->readStrongBinder, &layerHandle);
 
-    int32_t numExcludeHandles = 0;
-    SAFE_PARCEL_READ_SIZE(input->readInt32, &numExcludeHandles, input->dataSize());
-    excludeHandles.reserve(numExcludeHandles);
-    for (int i = 0; i < numExcludeHandles; i++) {
-        sp<IBinder> binder;
-        SAFE_PARCEL(input->readStrongBinder, &binder);
-        excludeHandles.emplace(binder);
-    }
-
     SAFE_PARCEL(input->readBool, &childrenOnly);
     return NO_ERROR;
 }
@@ -836,6 +1004,7 @@
     SAFE_PARCEL(output->writeUint64, cachedBuffer.id);
     SAFE_PARCEL(output->writeBool, hasBarrier);
     SAFE_PARCEL(output->writeUint64, barrierFrameNumber);
+    SAFE_PARCEL(output->writeUint32, producerId);
 
     return NO_ERROR;
 }
@@ -874,8 +1043,25 @@
 
     SAFE_PARCEL(input->readBool, &hasBarrier);
     SAFE_PARCEL(input->readUint64, &barrierFrameNumber);
+    SAFE_PARCEL(input->readUint32, &producerId);
 
     return NO_ERROR;
 }
 
+status_t TrustedPresentationListener::writeToParcel(Parcel* parcel) const {
+    SAFE_PARCEL(parcel->writeStrongBinder, callbackInterface);
+    SAFE_PARCEL(parcel->writeInt32, callbackId);
+    return NO_ERROR;
+}
+
+status_t TrustedPresentationListener::readFromParcel(const Parcel* parcel) {
+    sp<IBinder> tmpBinder = nullptr;
+    SAFE_PARCEL(parcel->readNullableStrongBinder, &tmpBinder);
+    if (tmpBinder) {
+        callbackInterface = checked_interface_cast<ITransactionCompletedListener>(tmpBinder);
+    }
+    SAFE_PARCEL(parcel->readInt32, &callbackId);
+    return NO_ERROR;
+}
+
 }; // namespace android
diff --git a/libs/gui/LayerStatePermissions.cpp b/libs/gui/LayerStatePermissions.cpp
new file mode 100644
index 0000000..28697ca
--- /dev/null
+++ b/libs/gui/LayerStatePermissions.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/IPCThreadState.h>
+#include <gui/LayerStatePermissions.h>
+#include <private/android_filesystem_config.h>
+#ifndef __ANDROID_VNDK__
+#include <binder/PermissionCache.h>
+#endif // __ANDROID_VNDK__
+#include <gui/LayerState.h>
+
+namespace android {
+std::unordered_map<std::string, int> LayerStatePermissions::mPermissionMap = {
+        // If caller has ACCESS_SURFACE_FLINGER, they automatically get ROTATE_SURFACE_FLINGER
+        // permission, as well
+        {"android.permission.ACCESS_SURFACE_FLINGER",
+         layer_state_t::Permission::ACCESS_SURFACE_FLINGER |
+                 layer_state_t::Permission::ROTATE_SURFACE_FLINGER},
+        {"android.permission.ROTATE_SURFACE_FLINGER",
+         layer_state_t::Permission::ROTATE_SURFACE_FLINGER},
+        {"android.permission.INTERNAL_SYSTEM_WINDOW",
+         layer_state_t::Permission::INTERNAL_SYSTEM_WINDOW},
+};
+
+static bool callingThreadHasPermission(const std::string& permission __attribute__((unused)),
+                                       int pid __attribute__((unused)),
+                                       int uid __attribute__((unused))) {
+#ifndef __ANDROID_VNDK__
+    return uid == AID_GRAPHICS || uid == AID_SYSTEM ||
+            PermissionCache::checkPermission(String16(permission.c_str()), pid, uid);
+#endif // __ANDROID_VNDK__
+    return false;
+}
+
+uint32_t LayerStatePermissions::getTransactionPermissions(int pid, int uid) {
+    uint32_t permissions = 0;
+    for (auto [permissionName, permissionVal] : mPermissionMap) {
+        if (callingThreadHasPermission(permissionName, pid, uid)) {
+            permissions |= permissionVal;
+        }
+    }
+
+    return permissions;
+}
+} // namespace android
diff --git a/libs/gui/ScreenCaptureResults.cpp b/libs/gui/ScreenCaptureResults.cpp
index fe38706..601a5f9 100644
--- a/libs/gui/ScreenCaptureResults.cpp
+++ b/libs/gui/ScreenCaptureResults.cpp
@@ -17,6 +17,7 @@
 #include <gui/ScreenCaptureResults.h>
 
 #include <private/gui/ParcelUtils.h>
+#include <ui/FenceResult.h>
 
 namespace android::gui {
 
@@ -28,17 +29,17 @@
         SAFE_PARCEL(parcel->writeBool, false);
     }
 
-    if (fence != Fence::NO_FENCE) {
+    if (fenceResult.ok() && fenceResult.value() != Fence::NO_FENCE) {
         SAFE_PARCEL(parcel->writeBool, true);
-        SAFE_PARCEL(parcel->write, *fence);
+        SAFE_PARCEL(parcel->write, *fenceResult.value());
     } else {
         SAFE_PARCEL(parcel->writeBool, false);
+        SAFE_PARCEL(parcel->writeInt32, fenceStatus(fenceResult));
     }
 
     SAFE_PARCEL(parcel->writeBool, capturedSecureLayers);
     SAFE_PARCEL(parcel->writeBool, capturedHdrLayers);
     SAFE_PARCEL(parcel->writeUint32, static_cast<uint32_t>(capturedDataspace));
-    SAFE_PARCEL(parcel->writeInt32, result);
     return NO_ERROR;
 }
 
@@ -53,8 +54,13 @@
     bool hasFence;
     SAFE_PARCEL(parcel->readBool, &hasFence);
     if (hasFence) {
-        fence = new Fence();
-        SAFE_PARCEL(parcel->read, *fence);
+        fenceResult = sp<Fence>::make();
+        SAFE_PARCEL(parcel->read, *fenceResult.value());
+    } else {
+        status_t status;
+        SAFE_PARCEL(parcel->readInt32, &status);
+        fenceResult = status == NO_ERROR ? FenceResult(Fence::NO_FENCE)
+                                         : FenceResult(base::unexpected(status));
     }
 
     SAFE_PARCEL(parcel->readBool, &capturedSecureLayers);
@@ -62,7 +68,6 @@
     uint32_t dataspace = 0;
     SAFE_PARCEL(parcel->readUint32, &dataspace);
     capturedDataspace = static_cast<ui::Dataspace>(dataspace);
-    SAFE_PARCEL(parcel->readInt32, &result);
     return NO_ERROR;
 }
 
diff --git a/libs/gui/Surface.cpp b/libs/gui/Surface.cpp
index 16edfd4..ed69100 100644
--- a/libs/gui/Surface.cpp
+++ b/libs/gui/Surface.cpp
@@ -30,15 +30,18 @@
 #include <android/gui/DisplayStatInfo.h>
 #include <android/native_window.h>
 
+#include <gui/FenceMonitor.h>
+#include <gui/TraceUtils.h>
 #include <utils/Log.h>
-#include <utils/Trace.h>
 #include <utils/NativeHandle.h>
+#include <utils/Trace.h>
 
 #include <ui/DynamicDisplayInfo.h>
 #include <ui/Fence.h>
 #include <ui/GraphicBuffer.h>
 #include <ui/Region.h>
 
+#include <gui/AidlStatusUtil.h>
 #include <gui/BufferItem.h>
 #include <gui/IProducerListener.h>
 
@@ -49,10 +52,17 @@
 
 namespace android {
 
+using gui::aidl_utils::statusTFromBinderStatus;
 using ui::Dataspace;
 
 namespace {
 
+enum {
+    // moved from nativewindow/include/system/window.h, to be removed
+    NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT = 28,
+    NATIVE_WINDOW_GET_HDR_SUPPORT = 29,
+};
+
 bool isInterceptorRegistrationOp(int op) {
     return op == NATIVE_WINDOW_SET_CANCEL_INTERCEPTOR ||
             op == NATIVE_WINDOW_SET_DEQUEUE_INTERCEPTOR ||
@@ -182,7 +192,7 @@
     gui::DisplayStatInfo stats;
     binder::Status status = composerServiceAIDL()->getDisplayStats(nullptr, &stats);
     if (!status.isOk()) {
-        return status.transactionError();
+        return statusTFromBinderStatus(status);
     }
 
     *outRefreshDuration = stats.vsyncPeriod;
@@ -345,33 +355,25 @@
     return NO_ERROR;
 }
 
+// Deprecated(b/242763577): to be removed, this method should not be used
+// The reason this method still exists here is to support compiled vndk
+// Surface support should not be tied to the display
+// Return true since most displays should have this support
 status_t Surface::getWideColorSupport(bool* supported) {
     ATRACE_CALL();
 
-    const sp<IBinder> display = ComposerServiceAIDL::getInstance().getInternalDisplayToken();
-    if (display == nullptr) {
-        return NAME_NOT_FOUND;
-    }
-
-    *supported = false;
-    binder::Status status = composerServiceAIDL()->isWideColorDisplay(display, supported);
-    return status.transactionError();
+    *supported = true;
+    return NO_ERROR;
 }
 
+// Deprecated(b/242763577): to be removed, this method should not be used
+// The reason this method still exists here is to support compiled vndk
+// Surface support should not be tied to the display
+// Return true since most displays should have this support
 status_t Surface::getHdrSupport(bool* supported) {
     ATRACE_CALL();
 
-    const sp<IBinder> display = ComposerServiceAIDL::getInstance().getInternalDisplayToken();
-    if (display == nullptr) {
-        return NAME_NOT_FOUND;
-    }
-
-    ui::DynamicDisplayInfo info;
-    if (status_t err = composerService()->getDynamicDisplayInfo(display, &info); err != NO_ERROR) {
-        return err;
-    }
-
-    *supported = !info.hdrCapabilities.getSupportedHdrTypes().empty();
+    *supported = true;
     return NO_ERROR;
 }
 
@@ -544,82 +546,6 @@
     return NO_ERROR;
 }
 
-class FenceMonitor {
-public:
-    explicit FenceMonitor(const char* name) : mName(name), mFencesQueued(0), mFencesSignaled(0) {
-        std::thread thread(&FenceMonitor::loop, this);
-        pthread_setname_np(thread.native_handle(), mName);
-        thread.detach();
-    }
-
-    void queueFence(const sp<Fence>& fence) {
-        char message[64];
-
-        std::lock_guard<std::mutex> lock(mMutex);
-        if (fence->getSignalTime() != Fence::SIGNAL_TIME_PENDING) {
-            snprintf(message, sizeof(message), "%s fence %u has signaled", mName, mFencesQueued);
-            ATRACE_NAME(message);
-            // Need an increment on both to make the trace number correct.
-            mFencesQueued++;
-            mFencesSignaled++;
-            return;
-        }
-        snprintf(message, sizeof(message), "Trace %s fence %u", mName, mFencesQueued);
-        ATRACE_NAME(message);
-
-        mQueue.push_back(fence);
-        mCondition.notify_one();
-        mFencesQueued++;
-        ATRACE_INT(mName, int32_t(mQueue.size()));
-    }
-
-private:
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wmissing-noreturn"
-    void loop() {
-        while (true) {
-            threadLoop();
-        }
-    }
-#pragma clang diagnostic pop
-
-    void threadLoop() {
-        sp<Fence> fence;
-        uint32_t fenceNum;
-        {
-            std::unique_lock<std::mutex> lock(mMutex);
-            while (mQueue.empty()) {
-                mCondition.wait(lock);
-            }
-            fence = mQueue[0];
-            fenceNum = mFencesSignaled;
-        }
-        {
-            char message[64];
-            snprintf(message, sizeof(message), "waiting for %s %u", mName, fenceNum);
-            ATRACE_NAME(message);
-
-            status_t result = fence->waitForever(message);
-            if (result != OK) {
-                ALOGE("Error waiting for fence: %d", result);
-            }
-        }
-        {
-            std::lock_guard<std::mutex> lock(mMutex);
-            mQueue.pop_front();
-            mFencesSignaled++;
-            ATRACE_INT(mName, int32_t(mQueue.size()));
-        }
-    }
-
-    const char* mName;
-    uint32_t mFencesQueued;
-    uint32_t mFencesSignaled;
-    std::deque<sp<Fence>> mQueue;
-    std::condition_variable mCondition;
-    std::mutex mMutex;
-};
-
 void Surface::getDequeueBufferInputLocked(
         IGraphicBufferProducer::DequeueBufferInput* dequeueInput) {
     LOG_ALWAYS_FATAL_IF(dequeueInput == nullptr, "input is null");
@@ -634,7 +560,7 @@
 }
 
 int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
-    ATRACE_CALL();
+    ATRACE_FORMAT("dequeueBuffer - %s", getDebugName());
     ALOGV("Surface::dequeueBuffer");
 
     IGraphicBufferProducer::DequeueBufferInput dqInput;
@@ -693,7 +619,7 @@
     ALOGE_IF(fence == nullptr, "Surface::dequeueBuffer: received null Fence! buf=%d", buf);
 
     if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
-        static FenceMonitor hwcReleaseThread("HWC release");
+        static gui::FenceMonitor hwcReleaseThread("HWC release");
         hwcReleaseThread.queueFence(fence);
     }
 
@@ -892,7 +818,7 @@
         sp<GraphicBuffer>& gbuf(mSlots[slot].buffer);
 
         if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
-            static FenceMonitor hwcReleaseThread("HWC release");
+            static gui::FenceMonitor hwcReleaseThread("HWC release");
             hwcReleaseThread.queueFence(output.fence);
         }
 
@@ -1162,7 +1088,7 @@
     mQueueBufferCondition.broadcast();
 
     if (CC_UNLIKELY(atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS))) {
-        static FenceMonitor gpuCompletionThread("GPU completion");
+        static gui::FenceMonitor gpuCompletionThread("GPU completion");
         gpuCompletionThread.queueFence(fence);
     }
 }
@@ -1263,10 +1189,10 @@
     mQueriedSupportedTimestamps = true;
 
     std::vector<FrameEvent> supportedFrameTimestamps;
-    status_t err = composerService()->getSupportedFrameTimestamps(
-            &supportedFrameTimestamps);
+    binder::Status status =
+            composerServiceAIDL()->getSupportedFrameTimestamps(&supportedFrameTimestamps);
 
-    if (err != NO_ERROR) {
+    if (!status.isOk()) {
         return;
     }
 
@@ -1294,15 +1220,12 @@
                 if (err == NO_ERROR) {
                     return NO_ERROR;
                 }
-                sp<ISurfaceComposer> surfaceComposer = composerService();
+                sp<gui::ISurfaceComposer> surfaceComposer = composerServiceAIDL();
                 if (surfaceComposer == nullptr) {
                     return -EPERM; // likely permissions error
                 }
-                if (surfaceComposer->authenticateSurfaceTexture(mGraphicBufferProducer)) {
-                    *value = 1;
-                } else {
-                    *value = 0;
-                }
+                // ISurfaceComposer no longer supports authenticateSurfaceTexture
+                *value = 0;
                 return NO_ERROR;
             }
             case NATIVE_WINDOW_CONCRETE_TYPE:
@@ -1873,9 +1796,15 @@
     auto frameTimelineVsyncId = static_cast<int64_t>(va_arg(args, int64_t));
     auto inputEventId = static_cast<int32_t>(va_arg(args, int32_t));
     auto startTimeNanos = static_cast<int64_t>(va_arg(args, int64_t));
+    auto useForRefreshRateSelection = static_cast<bool>(va_arg(args, int32_t));
 
     ALOGV("Surface::%s", __func__);
-    return setFrameTimelineInfo(frameNumber, {frameTimelineVsyncId, inputEventId, startTimeNanos});
+    FrameTimelineInfo ftlInfo;
+    ftlInfo.vsyncId = frameTimelineVsyncId;
+    ftlInfo.inputEventId = inputEventId;
+    ftlInfo.startTimeNanos = startTimeNanos;
+    ftlInfo.useForRefreshRateSelection = useForRefreshRateSelection;
+    return setFrameTimelineInfo(frameNumber, ftlInfo);
 }
 
 bool Surface::transformToDisplayInverse() const {
@@ -2635,23 +2564,19 @@
     mSurfaceListener->onBuffersDiscarded(discardedBufs);
 }
 
-status_t Surface::setFrameRate(float frameRate, int8_t compatibility,
-                               int8_t changeFrameRateStrategy) {
-    ATRACE_CALL();
-    ALOGV("Surface::setFrameRate");
-
-    if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
-                           "Surface::setFrameRate")) {
-        return BAD_VALUE;
-    }
-
-    return composerService()->setFrameRate(mGraphicBufferProducer, frameRate, compatibility,
-                                           changeFrameRateStrategy);
+[[deprecated]] status_t Surface::setFrameRate(float /*frameRate*/, int8_t /*compatibility*/,
+                                              int8_t /*changeFrameRateStrategy*/) {
+    ALOGI("Surface::setFrameRate is deprecated, setFrameRate hint is dropped as destination is not "
+          "SurfaceFlinger");
+    // ISurfaceComposer no longer supports setFrameRate, we will return NO_ERROR when the api is
+    // called to avoid apps crashing, as BAD_VALUE can generate fatal exception in apps.
+    return NO_ERROR;
 }
 
 status_t Surface::setFrameTimelineInfo(uint64_t /*frameNumber*/,
-                                       const FrameTimelineInfo& frameTimelineInfo) {
-    return composerService()->setFrameTimelineInfo(mGraphicBufferProducer, frameTimelineInfo);
+                                       const FrameTimelineInfo& /*frameTimelineInfo*/) {
+    // ISurfaceComposer no longer supports setFrameTimelineInfo
+    return BAD_VALUE;
 }
 
 sp<IBinder> Surface::getSurfaceControlHandle() const {
@@ -2664,4 +2589,12 @@
     mSurfaceControlHandle = nullptr;
 }
 
+const char* Surface::getDebugName() {
+    std::unique_lock lock{mNameMutex};
+    if (mName.empty()) {
+        mName = getConsumerName();
+    }
+    return mName.c_str();
+}
+
 }; // namespace android
diff --git a/libs/gui/SurfaceComposerClient.cpp b/libs/gui/SurfaceComposerClient.cpp
index 6d44f10..aff03e0 100644
--- a/libs/gui/SurfaceComposerClient.cpp
+++ b/libs/gui/SurfaceComposerClient.cpp
@@ -16,11 +16,17 @@
 
 #define LOG_TAG "SurfaceComposerClient"
 
+#include <semaphore.h>
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <android/gui/BnWindowInfosReportedListener.h>
 #include <android/gui/DisplayState.h>
+#include <android/gui/ISurfaceComposerClient.h>
 #include <android/gui/IWindowInfosListener.h>
+#include <android/gui/TrustedPresentationThresholds.h>
+#include <android/os/IInputConstants.h>
+#include <gui/TraceUtils.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/SortedVector.h>
@@ -33,11 +39,11 @@
 
 #include <system/graphics.h>
 
+#include <gui/AidlStatusUtil.h>
 #include <gui/BufferItemConsumer.h>
 #include <gui/CpuConsumer.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/ISurfaceComposer.h>
-#include <gui/ISurfaceComposerClient.h>
 #include <gui/LayerState.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
@@ -47,6 +53,8 @@
 #include <ui/DisplayState.h>
 #include <ui/DynamicDisplayInfo.h>
 
+#include <android-base/thread_annotations.h>
+#include <gui/LayerStatePermissions.h>
 #include <private/gui/ComposerService.h>
 #include <private/gui/ComposerServiceAIDL.h>
 
@@ -58,9 +66,11 @@
 using aidl::android::hardware::graphics::common::DisplayDecorationSupport;
 using gui::FocusRequest;
 using gui::IRegionSamplingListener;
+using gui::TrustedPresentationThresholds;
 using gui::WindowInfo;
 using gui::WindowInfoHandle;
 using gui::WindowInfosListener;
+using gui::aidl_utils::statusTFromBinderStatus;
 using ui::ColorMode;
 // ---------------------------------------------------------------------------
 
@@ -73,6 +83,8 @@
 int64_t generateId() {
     return (((int64_t)getpid()) << 32) | ++idCounter;
 }
+
+void emptyCallback(nsecs_t, const sp<Fence>&, const std::vector<SurfaceControlStats>&) {}
 } // namespace
 
 ComposerService::ComposerService()
@@ -111,7 +123,6 @@
     if (instance.mComposerService == nullptr) {
         if (ComposerService::getInstance().connectLocked()) {
             ALOGD("ComposerService reconnected");
-            WindowInfosListenerReporter::getInstance()->reconnect(instance.mComposerService);
         }
     }
     return instance.mComposerService;
@@ -159,6 +170,7 @@
     if (instance.mComposerService == nullptr) {
         if (ComposerServiceAIDL::getInstance().connectLocked()) {
             ALOGD("ComposerServiceAIDL reconnected");
+            WindowInfosListenerReporter::getInstance()->reconnect(instance.mComposerService);
         }
     }
     return instance.mComposerService;
@@ -240,6 +252,14 @@
                 surfaceControls,
         CallbackId::Type callbackType) {
     std::lock_guard<std::mutex> lock(mMutex);
+    return addCallbackFunctionLocked(callbackFunction, surfaceControls, callbackType);
+}
+
+CallbackId TransactionCompletedListener::addCallbackFunctionLocked(
+        const TransactionCompletedCallback& callbackFunction,
+        const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
+                surfaceControls,
+        CallbackId::Type callbackType) {
     startListeningLocked();
 
     CallbackId callbackId(getNextIdLocked(), callbackType);
@@ -248,6 +268,11 @@
 
     for (const auto& surfaceControl : surfaceControls) {
         callbackSurfaceControls[surfaceControl->getHandle()] = surfaceControl;
+
+        if (callbackType == CallbackId::Type::ON_COMPLETE &&
+            mJankListeners.count(surfaceControl->getLayerId()) != 0) {
+            callbackId.includeJankData = true;
+        }
     }
 
     return callbackId;
@@ -296,15 +321,26 @@
 }
 
 void TransactionCompletedListener::addSurfaceControlToCallbacks(
-        const sp<SurfaceControl>& surfaceControl,
-        const std::unordered_set<CallbackId, CallbackIdHash>& callbackIds) {
+        SurfaceComposerClient::CallbackInfo& callbackInfo,
+        const sp<SurfaceControl>& surfaceControl) {
     std::lock_guard<std::mutex> lock(mMutex);
 
-    for (auto callbackId : callbackIds) {
+    bool includingJankData = false;
+    for (auto callbackId : callbackInfo.callbackIds) {
         mCallbacks[callbackId].surfaceControls.emplace(std::piecewise_construct,
                                                        std::forward_as_tuple(
                                                                surfaceControl->getHandle()),
                                                        std::forward_as_tuple(surfaceControl));
+        includingJankData = includingJankData || callbackId.includeJankData;
+    }
+
+    // If no registered callback is requesting jank data, but there is a jank listener registered
+    // on the new surface control, add a synthetic callback that requests the jank data.
+    if (!includingJankData && mJankListeners.count(surfaceControl->getLayerId()) != 0) {
+        CallbackId callbackId =
+                addCallbackFunctionLocked(&emptyCallback, callbackInfo.surfaceControls,
+                                          CallbackId::Type::ON_COMPLETE);
+        callbackInfo.callbackIds.emplace(callbackId);
     }
 }
 
@@ -380,10 +416,11 @@
                                       surfaceStats.previousReleaseFence, surfaceStats.transformHint,
                                       surfaceStats.eventStats,
                                       surfaceStats.currentMaxAcquiredBufferCount);
-                if (callbacksMap[callbackId].surfaceControls[surfaceStats.surfaceControl]) {
+                if (callbacksMap[callbackId].surfaceControls[surfaceStats.surfaceControl] &&
+                    surfaceStats.transformHint.has_value()) {
                     callbacksMap[callbackId]
                             .surfaceControls[surfaceStats.surfaceControl]
-                            ->setTransformHint(surfaceStats.transformHint);
+                            ->setTransformHint(*surfaceStats.transformHint);
                 }
                 // If there is buffer id set, we look up any pending client release buffer callbacks
                 // and call them. This is a performance optimization when we have a transaction
@@ -449,23 +486,24 @@
     }
 }
 
-void TransactionCompletedListener::onTransactionQueueStalled() {
-      std::unordered_map<void*, std::function<void()>> callbackCopy;
-      {
-          std::scoped_lock<std::mutex> lock(mMutex);
-          callbackCopy = mQueueStallListeners;
-      }
-      for (auto const& it : callbackCopy) {
-          it.second();
-      }
+void TransactionCompletedListener::onTransactionQueueStalled(const String8& reason) {
+    std::unordered_map<void*, std::function<void(const std::string&)>> callbackCopy;
+    {
+        std::scoped_lock<std::mutex> lock(mMutex);
+        callbackCopy = mQueueStallListeners;
+    }
+    for (auto const& it : callbackCopy) {
+        it.second(reason.c_str());
+    }
 }
 
-void TransactionCompletedListener::addQueueStallListener(std::function<void()> stallListener,
-                                                         void* id) {
+void TransactionCompletedListener::addQueueStallListener(
+        std::function<void(const std::string&)> stallListener, void* id) {
     std::scoped_lock<std::mutex> lock(mMutex);
     mQueueStallListeners[id] = stallListener;
 }
-void TransactionCompletedListener::removeQueueStallListener(void *id) {
+
+void TransactionCompletedListener::removeQueueStallListener(void* id) {
     std::scoped_lock<std::mutex> lock(mMutex);
     mQueueStallListeners.erase(id);
 }
@@ -510,6 +548,45 @@
     }
 }
 
+SurfaceComposerClient::PresentationCallbackRAII::PresentationCallbackRAII(
+        TransactionCompletedListener* tcl, int id) {
+    mTcl = tcl;
+    mId = id;
+}
+
+SurfaceComposerClient::PresentationCallbackRAII::~PresentationCallbackRAII() {
+    mTcl->clearTrustedPresentationCallback(mId);
+}
+
+sp<SurfaceComposerClient::PresentationCallbackRAII>
+TransactionCompletedListener::addTrustedPresentationCallback(TrustedPresentationCallback tpc,
+                                                             int id, void* context) {
+    std::scoped_lock<std::mutex> lock(mMutex);
+    mTrustedPresentationCallbacks[id] =
+            std::tuple<TrustedPresentationCallback, void*>(tpc, context);
+    return new SurfaceComposerClient::PresentationCallbackRAII(this, id);
+}
+
+void TransactionCompletedListener::clearTrustedPresentationCallback(int id) {
+    std::scoped_lock<std::mutex> lock(mMutex);
+    mTrustedPresentationCallbacks.erase(id);
+}
+
+void TransactionCompletedListener::onTrustedPresentationChanged(int id,
+                                                                bool presentedWithinThresholds) {
+    TrustedPresentationCallback tpc;
+    void* context;
+    {
+        std::scoped_lock<std::mutex> lock(mMutex);
+        auto it = mTrustedPresentationCallbacks.find(id);
+        if (it == mTrustedPresentationCallbacks.end()) {
+            return;
+        }
+        std::tie(tpc, context) = it->second;
+    }
+    tpc(context, presentedWithinThresholds);
+}
+
 // ---------------------------------------------------------------------------
 
 void removeDeadBufferCallback(void* /*context*/, uint64_t graphicBufferId);
@@ -557,11 +634,13 @@
         return NO_ERROR;
     }
 
-    uint64_t cache(const sp<GraphicBuffer>& buffer) {
+    uint64_t cache(const sp<GraphicBuffer>& buffer,
+                   std::optional<client_cache_t>& outUncacheBuffer) {
         std::lock_guard<std::mutex> lock(mMutex);
 
         if (mBuffers.size() >= BUFFER_CACHE_MAX_SIZE) {
-            evictLeastRecentlyUsedBuffer();
+            outUncacheBuffer = findLeastRecentlyUsedBuffer();
+            mBuffers.erase(outUncacheBuffer->id);
         }
 
         buffer->addDeathCallback(removeDeadBufferCallback, nullptr);
@@ -572,16 +651,13 @@
 
     void uncache(uint64_t cacheId) {
         std::lock_guard<std::mutex> lock(mMutex);
-        uncacheLocked(cacheId);
-    }
-
-    void uncacheLocked(uint64_t cacheId) REQUIRES(mMutex) {
-        mBuffers.erase(cacheId);
-        SurfaceComposerClient::doUncacheBufferTransaction(cacheId);
+        if (mBuffers.erase(cacheId)) {
+            SurfaceComposerClient::doUncacheBufferTransaction(cacheId);
+        }
     }
 
 private:
-    void evictLeastRecentlyUsedBuffer() REQUIRES(mMutex) {
+    client_cache_t findLeastRecentlyUsedBuffer() REQUIRES(mMutex) {
         auto itr = mBuffers.begin();
         uint64_t minCounter = itr->second;
         auto minBuffer = itr;
@@ -595,7 +671,8 @@
             }
             itr++;
         }
-        uncacheLocked(minBuffer->first);
+
+        return {.token = getToken(), .id = minBuffer->first};
     }
 
     uint64_t getCounter() REQUIRES(mMutex) {
@@ -625,12 +702,11 @@
 
 SurfaceComposerClient::Transaction::Transaction(const Transaction& other)
       : mId(other.mId),
-        mForceSynchronous(other.mForceSynchronous),
         mTransactionNestCount(other.mTransactionNestCount),
         mAnimation(other.mAnimation),
         mEarlyWakeupStart(other.mEarlyWakeupStart),
         mEarlyWakeupEnd(other.mEarlyWakeupEnd),
-        mContainsBuffer(other.mContainsBuffer),
+        mMayContainBuffer(other.mMayContainBuffer),
         mDesiredPresentTime(other.mDesiredPresentTime),
         mIsAutoTimestamp(other.mIsAutoTimestamp),
         mFrameTimelineInfo(other.mFrameTimelineInfo),
@@ -641,11 +717,16 @@
     mListenerCallbacks = other.mListenerCallbacks;
 }
 
-void SurfaceComposerClient::Transaction::sanitize() {
+void SurfaceComposerClient::Transaction::sanitize(int pid, int uid) {
+    uint32_t permissions = LayerStatePermissions::getTransactionPermissions(pid, uid);
     for (auto & [handle, composerState] : mComposerStates) {
-        composerState.state.sanitize(0 /* permissionMask */);
+        composerState.state.sanitize(permissions);
     }
-    mInputWindowCommands.clear();
+    if (!mInputWindowCommands.empty() &&
+        (permissions & layer_state_t::Permission::ACCESS_SURFACE_FLINGER) == 0) {
+        ALOGE("Only privileged callers are allowed to send input commands.");
+        mInputWindowCommands.clear();
+    }
 }
 
 std::unique_ptr<SurfaceComposerClient::Transaction>
@@ -659,16 +740,15 @@
 
 
 status_t SurfaceComposerClient::Transaction::readFromParcel(const Parcel* parcel) {
-    const uint32_t forceSynchronous = parcel->readUint32();
+    const uint64_t transactionId = parcel->readUint64();
     const uint32_t transactionNestCount = parcel->readUint32();
     const bool animation = parcel->readBool();
     const bool earlyWakeupStart = parcel->readBool();
     const bool earlyWakeupEnd = parcel->readBool();
-    const bool containsBuffer = parcel->readBool();
     const int64_t desiredPresentTime = parcel->readInt64();
     const bool isAutoTimestamp = parcel->readBool();
     FrameTimelineInfo frameTimelineInfo;
-    SAFE_PARCEL(frameTimelineInfo.read, *parcel);
+    frameTimelineInfo.readFromParcel(parcel);
 
     sp<IBinder> applyToken;
     parcel->readNullableStrongBinder(&applyToken);
@@ -735,13 +815,33 @@
     InputWindowCommands inputWindowCommands;
     inputWindowCommands.read(*parcel);
 
+    count = static_cast<size_t>(parcel->readUint32());
+    if (count > parcel->dataSize()) {
+        return BAD_VALUE;
+    }
+    std::vector<client_cache_t> uncacheBuffers(count);
+    for (size_t i = 0; i < count; i++) {
+        sp<IBinder> tmpBinder;
+        SAFE_PARCEL(parcel->readStrongBinder, &tmpBinder);
+        uncacheBuffers[i].token = tmpBinder;
+        SAFE_PARCEL(parcel->readUint64, &uncacheBuffers[i].id);
+    }
+
+    count = static_cast<size_t>(parcel->readUint32());
+    if (count > parcel->dataSize()) {
+        return BAD_VALUE;
+    }
+    std::vector<uint64_t> mergedTransactionIds(count);
+    for (size_t i = 0; i < count; i++) {
+        SAFE_PARCEL(parcel->readUint64, &mergedTransactionIds[i]);
+    }
+
     // Parsing was successful. Update the object.
-    mForceSynchronous = forceSynchronous;
+    mId = transactionId;
     mTransactionNestCount = transactionNestCount;
     mAnimation = animation;
     mEarlyWakeupStart = earlyWakeupStart;
     mEarlyWakeupEnd = earlyWakeupEnd;
-    mContainsBuffer = containsBuffer;
     mDesiredPresentTime = desiredPresentTime;
     mIsAutoTimestamp = isAutoTimestamp;
     mFrameTimelineInfo = frameTimelineInfo;
@@ -750,6 +850,8 @@
     mComposerStates = composerStates;
     mInputWindowCommands = inputWindowCommands;
     mApplyToken = applyToken;
+    mUncacheBuffers = std::move(uncacheBuffers);
+    mMergedTransactionIds = std::move(mergedTransactionIds);
     return NO_ERROR;
 }
 
@@ -767,15 +869,14 @@
 
     const_cast<SurfaceComposerClient::Transaction*>(this)->cacheBuffers();
 
-    parcel->writeUint32(mForceSynchronous);
+    parcel->writeUint64(mId);
     parcel->writeUint32(mTransactionNestCount);
     parcel->writeBool(mAnimation);
     parcel->writeBool(mEarlyWakeupStart);
     parcel->writeBool(mEarlyWakeupEnd);
-    parcel->writeBool(mContainsBuffer);
     parcel->writeInt64(mDesiredPresentTime);
     parcel->writeBool(mIsAutoTimestamp);
-    SAFE_PARCEL(mFrameTimelineInfo.write, *parcel);
+    mFrameTimelineInfo.writeToParcel(parcel);
     parcel->writeStrongBinder(mApplyToken);
     parcel->writeUint32(static_cast<uint32_t>(mDisplayStates.size()));
     for (auto const& displayState : mDisplayStates) {
@@ -802,11 +903,23 @@
     }
 
     mInputWindowCommands.write(*parcel);
+
+    SAFE_PARCEL(parcel->writeUint32, static_cast<uint32_t>(mUncacheBuffers.size()));
+    for (const client_cache_t& uncacheBuffer : mUncacheBuffers) {
+        SAFE_PARCEL(parcel->writeStrongBinder, uncacheBuffer.token.promote());
+        SAFE_PARCEL(parcel->writeUint64, uncacheBuffer.id);
+    }
+
+    SAFE_PARCEL(parcel->writeUint32, static_cast<uint32_t>(mMergedTransactionIds.size()));
+    for (auto mergedTransactionId : mMergedTransactionIds) {
+        SAFE_PARCEL(parcel->writeUint64, mergedTransactionId);
+    }
+
     return NO_ERROR;
 }
 
 void SurfaceComposerClient::Transaction::releaseBufferIfOverwriting(const layer_state_t& state) {
-    if (!(state.what & layer_state_t::eBufferChanged)) {
+    if (!(state.what & layer_state_t::eBufferChanged) || !state.bufferData->hasBuffer()) {
         return;
     }
 
@@ -826,6 +939,22 @@
 }
 
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::merge(Transaction&& other) {
+    while (mMergedTransactionIds.size() + other.mMergedTransactionIds.size() >
+                   MAX_MERGE_HISTORY_LENGTH - 1 &&
+           mMergedTransactionIds.size() > 0) {
+        mMergedTransactionIds.pop_back();
+    }
+    if (other.mMergedTransactionIds.size() == MAX_MERGE_HISTORY_LENGTH) {
+        mMergedTransactionIds.insert(mMergedTransactionIds.begin(),
+                                     other.mMergedTransactionIds.begin(),
+                                     other.mMergedTransactionIds.end() - 1);
+    } else if (other.mMergedTransactionIds.size() > 0u) {
+        mMergedTransactionIds.insert(mMergedTransactionIds.begin(),
+                                     other.mMergedTransactionIds.begin(),
+                                     other.mMergedTransactionIds.end());
+    }
+    mMergedTransactionIds.insert(mMergedTransactionIds.begin(), other.mId);
+
     for (auto const& [handle, composerState] : other.mComposerStates) {
         if (mComposerStates.count(handle) == 0) {
             mComposerStates[handle] = composerState;
@@ -864,19 +993,22 @@
         // register all surface controls for all callbackIds for this listener that is merging
         for (const auto& surfaceControl : currentProcessCallbackInfo.surfaceControls) {
             TransactionCompletedListener::getInstance()
-                    ->addSurfaceControlToCallbacks(surfaceControl,
-                                                   currentProcessCallbackInfo.callbackIds);
+                    ->addSurfaceControlToCallbacks(currentProcessCallbackInfo, surfaceControl);
         }
     }
 
+    for (const auto& cacheId : other.mUncacheBuffers) {
+        mUncacheBuffers.push_back(cacheId);
+    }
+
     mInputWindowCommands.merge(other.mInputWindowCommands);
 
-    mContainsBuffer |= other.mContainsBuffer;
+    mMayContainBuffer |= other.mMayContainBuffer;
     mEarlyWakeupStart = mEarlyWakeupStart || other.mEarlyWakeupStart;
     mEarlyWakeupEnd = mEarlyWakeupEnd || other.mEarlyWakeupEnd;
     mApplyToken = other.mApplyToken;
 
-    mFrameTimelineInfo.merge(other.mFrameTimelineInfo);
+    mergeFrameTimelineInfo(mFrameTimelineInfo, other.mFrameTimelineInfo);
 
     other.clear();
     return *this;
@@ -887,36 +1019,46 @@
     mDisplayStates.clear();
     mListenerCallbacks.clear();
     mInputWindowCommands.clear();
-    mContainsBuffer = false;
-    mForceSynchronous = 0;
+    mUncacheBuffers.clear();
+    mMayContainBuffer = false;
     mTransactionNestCount = 0;
     mAnimation = false;
     mEarlyWakeupStart = false;
     mEarlyWakeupEnd = false;
     mDesiredPresentTime = 0;
     mIsAutoTimestamp = true;
-    mFrameTimelineInfo.clear();
+    clearFrameTimelineInfo(mFrameTimelineInfo);
     mApplyToken = nullptr;
+    mMergedTransactionIds.clear();
 }
 
 uint64_t SurfaceComposerClient::Transaction::getId() {
     return mId;
 }
 
+std::vector<uint64_t> SurfaceComposerClient::Transaction::getMergedTransactionIds() {
+    return mMergedTransactionIds;
+}
+
 void SurfaceComposerClient::doUncacheBufferTransaction(uint64_t cacheId) {
     sp<ISurfaceComposer> sf(ComposerService::getComposerService());
 
     client_cache_t uncacheBuffer;
     uncacheBuffer.token = BufferCache::getInstance().getToken();
     uncacheBuffer.id = cacheId;
-
-    sp<IBinder> applyToken = IInterface::asBinder(TransactionCompletedListener::getIInstance());
-    sf->setTransactionState(FrameTimelineInfo{}, {}, {}, 0, applyToken, {}, systemTime(), true,
-                            uncacheBuffer, false, {}, generateId());
+    Vector<ComposerState> composerStates;
+    status_t status = sf->setTransactionState(FrameTimelineInfo{}, composerStates, {},
+                                              ISurfaceComposer::eOneWay,
+                                              Transaction::getDefaultApplyToken(), {}, systemTime(),
+                                              true, {uncacheBuffer}, false, {}, generateId(), {});
+    if (status != NO_ERROR) {
+        ALOGE_AND_TRACE("SurfaceComposerClient::doUncacheBufferTransaction - %s",
+                        strerror(-status));
+    }
 }
 
 void SurfaceComposerClient::Transaction::cacheBuffers() {
-    if (!mContainsBuffer) {
+    if (!mMayContainBuffer) {
         return;
     }
 
@@ -946,7 +1088,11 @@
             s->bufferData->buffer = nullptr;
         } else {
             // Cache-miss. Include the buffer and send the new cacheId.
-            cacheId = BufferCache::getInstance().cache(s->bufferData->buffer);
+            std::optional<client_cache_t> uncacheBuffer;
+            cacheId = BufferCache::getInstance().cache(s->bufferData->buffer, uncacheBuffer);
+            if (uncacheBuffer) {
+                mUncacheBuffers.push_back(*uncacheBuffer);
+            }
         }
         s->bufferData->flags |= BufferData::BufferDataChange::cachedBufferChanged;
         s->bufferData->cachedBuffer.token = BufferCache::getInstance().getToken();
@@ -961,12 +1107,58 @@
     }
 }
 
+class SyncCallback {
+public:
+    static auto getCallback(std::shared_ptr<SyncCallback>& callbackContext) {
+        return [callbackContext](void* /* unused context */, nsecs_t /* latchTime */,
+                                 const sp<Fence>& /* presentFence */,
+                                 const std::vector<SurfaceControlStats>& /* stats */) {
+            if (!callbackContext) {
+                ALOGE("failed to get callback context for SyncCallback");
+                return;
+            }
+            LOG_ALWAYS_FATAL_IF(sem_post(&callbackContext->mSemaphore), "sem_post failed");
+        };
+    }
+    ~SyncCallback() {
+        if (mInitialized) {
+            LOG_ALWAYS_FATAL_IF(sem_destroy(&mSemaphore), "sem_destroy failed");
+        }
+    }
+    void init() {
+        LOG_ALWAYS_FATAL_IF(clock_gettime(CLOCK_MONOTONIC, &mTimeoutTimespec) == -1,
+                            "clock_gettime() fail! in SyncCallback::init");
+        mTimeoutTimespec.tv_sec += 4;
+        LOG_ALWAYS_FATAL_IF(sem_init(&mSemaphore, 0, 0), "sem_init failed");
+        mInitialized = true;
+    }
+    void wait() {
+        int result = sem_clockwait(&mSemaphore, CLOCK_MONOTONIC, &mTimeoutTimespec);
+        if (result && errno != ETIMEDOUT && errno != EINTR) {
+            LOG_ALWAYS_FATAL("sem_clockwait failed(%d)", errno);
+        } else if (errno == ETIMEDOUT) {
+            ALOGW("Sync transaction timed out waiting for commit callback.");
+        }
+    }
+    void* getContext() { return static_cast<void*>(this); }
+
+private:
+    sem_t mSemaphore;
+    bool mInitialized = false;
+    timespec mTimeoutTimespec;
+};
+
 status_t SurfaceComposerClient::Transaction::apply(bool synchronous, bool oneWay) {
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
 
-    sp<ISurfaceComposer> sf(ComposerService::getComposerService());
+    std::shared_ptr<SyncCallback> syncCallback = std::make_shared<SyncCallback>();
+    if (synchronous) {
+        syncCallback->init();
+        addTransactionCommittedCallback(SyncCallback::getCallback(syncCallback),
+                                        /*callbackContext=*/nullptr);
+    }
 
     bool hasListenerCallbacks = !mListenerCallbacks.empty();
     std::vector<ListenerCallbacks> listenerCallbacks;
@@ -1001,27 +1193,22 @@
     Vector<DisplayState> displayStates;
     uint32_t flags = 0;
 
-    mForceSynchronous |= synchronous;
-
-    for (auto const& kv : mComposerStates){
+    for (auto const& kv : mComposerStates) {
         composerStates.add(kv.second);
     }
 
     displayStates = std::move(mDisplayStates);
 
-    if (mForceSynchronous) {
-        flags |= ISurfaceComposer::eSynchronous;
-    }
     if (mAnimation) {
         flags |= ISurfaceComposer::eAnimation;
     }
     if (oneWay) {
-      if (mForceSynchronous) {
-          ALOGE("Transaction attempted to set synchronous and one way at the same time"
-                " this is an invalid request. Synchronous will win for safety");
-      } else {
-          flags |= ISurfaceComposer::eOneWay;
-      }
+        if (synchronous) {
+            ALOGE("Transaction attempted to set synchronous and one way at the same time"
+                  " this is an invalid request. Synchronous will win for safety");
+        } else {
+            flags |= ISurfaceComposer::eOneWay;
+        }
     }
 
     // If both mEarlyWakeupStart and mEarlyWakeupEnd are set
@@ -1033,31 +1220,58 @@
         flags |= ISurfaceComposer::eEarlyWakeupEnd;
     }
 
-    sp<IBinder> applyToken = mApplyToken
-            ? mApplyToken
-            : IInterface::asBinder(TransactionCompletedListener::getIInstance());
+    sp<IBinder> applyToken = mApplyToken ? mApplyToken : sApplyToken;
 
+    sp<ISurfaceComposer> sf(ComposerService::getComposerService());
     sf->setTransactionState(mFrameTimelineInfo, composerStates, displayStates, flags, applyToken,
                             mInputWindowCommands, mDesiredPresentTime, mIsAutoTimestamp,
-                            {} /*uncacheBuffer - only set in doUncacheBufferTransaction*/,
-                            hasListenerCallbacks, listenerCallbacks, mId);
+                            mUncacheBuffers, hasListenerCallbacks, listenerCallbacks, mId,
+                            mMergedTransactionIds);
     mId = generateId();
 
     // Clear the current states and flags
     clear();
 
+    if (synchronous) {
+        syncCallback->wait();
+    }
+
     mStatus = NO_ERROR;
     return NO_ERROR;
 }
 
+sp<IBinder> SurfaceComposerClient::Transaction::sApplyToken = new BBinder();
+
+sp<IBinder> SurfaceComposerClient::Transaction::getDefaultApplyToken() {
+    return sApplyToken;
+}
+
+void SurfaceComposerClient::Transaction::setDefaultApplyToken(sp<IBinder> applyToken) {
+    sApplyToken = applyToken;
+}
+
+status_t SurfaceComposerClient::Transaction::sendSurfaceFlushJankDataTransaction(
+        const sp<SurfaceControl>& sc) {
+    Transaction t;
+    layer_state_t* s = t.getLayerState(sc);
+    if (!s) {
+        return BAD_INDEX;
+    }
+
+    s->what |= layer_state_t::eFlushJankData;
+    t.registerSurfaceControlForCallback(sc);
+    return t.apply(/*sync=*/false, /* oneWay=*/true);
+}
 // ---------------------------------------------------------------------------
 
-sp<IBinder> SurfaceComposerClient::createDisplay(const String8& displayName, bool secure) {
+sp<IBinder> SurfaceComposerClient::createDisplay(const String8& displayName, bool secure,
+                                                 float requestedRefereshRate) {
     sp<IBinder> display = nullptr;
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->createDisplay(std::string(
                                                                              displayName.c_str()),
-                                                                     secure, &display);
+                                                                     secure, requestedRefereshRate,
+                                                                     &display);
     return status.isOk() ? display : nullptr;
 }
 
@@ -1080,21 +1294,6 @@
     return physicalDisplayIds;
 }
 
-status_t SurfaceComposerClient::getPrimaryPhysicalDisplayId(PhysicalDisplayId* id) {
-    int64_t displayId;
-    binder::Status status =
-            ComposerServiceAIDL::getComposerService()->getPrimaryPhysicalDisplayId(&displayId);
-    if (status.isOk()) {
-        *id = *DisplayId::fromValue<PhysicalDisplayId>(static_cast<uint64_t>(displayId));
-    }
-    return status.transactionError();
-}
-
-std::optional<PhysicalDisplayId> SurfaceComposerClient::getInternalDisplayId() {
-    ComposerServiceAIDL& instance = ComposerServiceAIDL::getInstance();
-    return instance.getInternalDisplayId();
-}
-
 sp<IBinder> SurfaceComposerClient::getPhysicalDisplayToken(PhysicalDisplayId displayId) {
     sp<IBinder> display = nullptr;
     binder::Status status =
@@ -1103,11 +1302,6 @@
     return status.isOk() ? display : nullptr;
 }
 
-sp<IBinder> SurfaceComposerClient::getInternalDisplayToken() {
-    ComposerServiceAIDL& instance = ComposerServiceAIDL::getInstance();
-    return instance.getInternalDisplayToken();
-}
-
 void SurfaceComposerClient::Transaction::setAnimationTransaction() {
     mAnimation = true;
 }
@@ -1141,8 +1335,7 @@
     auto& callbackInfo = mListenerCallbacks[TransactionCompletedListener::getIInstance()];
     callbackInfo.surfaceControls.insert(sc);
 
-    TransactionCompletedListener::getInstance()
-            ->addSurfaceControlToCallbacks(sc, callbackInfo.callbackIds);
+    TransactionCompletedListener::getInstance()->addSurfaceControlToCallbacks(callbackInfo, sc);
 }
 
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setPosition(
@@ -1170,21 +1363,6 @@
     return setFlags(sc, layer_state_t::eLayerHidden, layer_state_t::eLayerHidden);
 }
 
-SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setSize(
-        const sp<SurfaceControl>& sc, uint32_t w, uint32_t h) {
-    layer_state_t* s = getLayerState(sc);
-    if (!s) {
-        mStatus = BAD_INDEX;
-        return *this;
-    }
-    s->what |= layer_state_t::eSizeChanged;
-    s->w = w;
-    s->h = h;
-
-    registerSurfaceControlForCallback(sc);
-    return *this;
-}
-
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setLayer(
         const sp<SurfaceControl>& sc, int32_t z) {
     layer_state_t* s = getLayerState(sc);
@@ -1227,7 +1405,9 @@
     if ((mask & layer_state_t::eLayerOpaque) || (mask & layer_state_t::eLayerHidden) ||
         (mask & layer_state_t::eLayerSecure) || (mask & layer_state_t::eLayerSkipScreenshot) ||
         (mask & layer_state_t::eEnableBackpressure) ||
-        (mask & layer_state_t::eLayerIsDisplayDecoration)) {
+        (mask & layer_state_t::eIgnoreDestinationFrame) ||
+        (mask & layer_state_t::eLayerIsDisplayDecoration) ||
+        (mask & layer_state_t::eLayerIsRefreshRateIndicator)) {
         s->what |= layer_state_t::eFlagsChanged;
     }
     s->flags &= ~mask;
@@ -1278,7 +1458,7 @@
         ALOGE("SurfaceComposerClient::Transaction::setAlpha: invalid alpha %f, clamping", alpha);
     }
     s->what |= layer_state_t::eAlphaChanged;
-    s->alpha = std::clamp(alpha, 0.f, 1.f);
+    s->color.a = std::clamp(alpha, 0.f, 1.f);
 
     registerSurfaceControlForCallback(sc);
     return *this;
@@ -1409,7 +1589,7 @@
         return *this;
     }
     s->what |= layer_state_t::eColorChanged;
-    s->color = color;
+    s->color.rgb = color;
 
     registerSurfaceControlForCallback(sc);
     return *this;
@@ -1424,8 +1604,8 @@
     }
 
     s->what |= layer_state_t::eBackgroundColorChanged;
-    s->color = color;
-    s->bgColorAlpha = alpha;
+    s->bgColor.rgb = color;
+    s->bgColor.a = alpha;
     s->bgColorDataspace = dataspace;
 
     registerSurfaceControlForCallback(sc);
@@ -1439,8 +1619,8 @@
         mStatus = BAD_INDEX;
         return *this;
     }
-    s->what |= layer_state_t::eTransformChanged;
-    s->transform = transform;
+    s->what |= layer_state_t::eBufferTransformChanged;
+    s->bufferTransform = transform;
 
     registerSurfaceControlForCallback(sc);
     return *this;
@@ -1478,7 +1658,6 @@
     s->what &= ~layer_state_t::eBufferChanged;
     s->bufferData = nullptr;
 
-    mContainsBuffer = false;
     return bufferData;
 }
 
@@ -1497,7 +1676,7 @@
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setBuffer(
         const sp<SurfaceControl>& sc, const sp<GraphicBuffer>& buffer,
         const std::optional<sp<Fence>>& fence, const std::optional<uint64_t>& optFrameNumber,
-        ReleaseBufferCallback callback) {
+        uint32_t producerId, ReleaseBufferCallback callback) {
     layer_state_t* s = getLayerState(sc);
     if (!s) {
         mStatus = BAD_INDEX;
@@ -1506,28 +1685,25 @@
 
     releaseBufferIfOverwriting(*s);
 
-    if (buffer == nullptr) {
-        s->what &= ~layer_state_t::eBufferChanged;
-        s->bufferData = nullptr;
-        mContainsBuffer = false;
-        return *this;
-    }
-
     std::shared_ptr<BufferData> bufferData = std::make_shared<BufferData>();
     bufferData->buffer = buffer;
-    uint64_t frameNumber = sc->resolveFrameNumber(optFrameNumber);
-    bufferData->frameNumber = frameNumber;
-    bufferData->flags |= BufferData::BufferDataChange::frameNumberChanged;
-    if (fence) {
-        bufferData->acquireFence = *fence;
-        bufferData->flags |= BufferData::BufferDataChange::fenceChanged;
+    if (buffer) {
+        uint64_t frameNumber = sc->resolveFrameNumber(optFrameNumber);
+        bufferData->frameNumber = frameNumber;
+        bufferData->producerId = producerId;
+        bufferData->flags |= BufferData::BufferDataChange::frameNumberChanged;
+        if (fence) {
+            bufferData->acquireFence = *fence;
+            bufferData->flags |= BufferData::BufferDataChange::fenceChanged;
+        }
+        bufferData->releaseBufferEndpoint =
+                IInterface::asBinder(TransactionCompletedListener::getIInstance());
+        setReleaseBufferCallback(bufferData.get(), callback);
     }
-    bufferData->releaseBufferEndpoint =
-            IInterface::asBinder(TransactionCompletedListener::getIInstance());
+
     if (mIsAutoTimestamp) {
         mDesiredPresentTime = systemTime();
     }
-    setReleaseBufferCallback(bufferData.get(), callback);
     s->what |= layer_state_t::eBufferChanged;
     s->bufferData = std::move(bufferData);
     registerSurfaceControlForCallback(sc);
@@ -1544,7 +1720,26 @@
                                        const std::vector<SurfaceControlStats>&) {},
                                     nullptr);
 
-    mContainsBuffer = true;
+    mMayContainBuffer = true;
+    return *this;
+}
+
+SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::unsetBuffer(
+        const sp<SurfaceControl>& sc) {
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        mStatus = BAD_INDEX;
+        return *this;
+    }
+
+    if (!(s->what & layer_state_t::eBufferChanged)) {
+        return *this;
+    }
+
+    releaseBufferIfOverwriting(*s);
+
+    s->what &= ~layer_state_t::eBufferChanged;
+    s->bufferData = nullptr;
     return *this;
 }
 
@@ -1579,6 +1774,35 @@
     return *this;
 }
 
+SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setExtendedRangeBrightness(
+        const sp<SurfaceControl>& sc, float currentBufferRatio, float desiredRatio) {
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        mStatus = BAD_INDEX;
+        return *this;
+    }
+    s->what |= layer_state_t::eExtendedRangeBrightnessChanged;
+    s->currentHdrSdrRatio = currentBufferRatio;
+    s->desiredHdrSdrRatio = desiredRatio;
+
+    registerSurfaceControlForCallback(sc);
+    return *this;
+}
+
+SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setCachingHint(
+        const sp<SurfaceControl>& sc, gui::CachingHint cachingHint) {
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        mStatus = BAD_INDEX;
+        return *this;
+    }
+    s->what |= layer_state_t::eCachingHintChanged;
+    s->cachingHint = cachingHint;
+
+    registerSurfaceControlForCallback(sc);
+    return *this;
+}
+
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setHdrMetadata(
         const sp<SurfaceControl>& sc, const HdrMetadata& hdrMetadata) {
     layer_state_t* s = getLayerState(sc);
@@ -1732,8 +1956,10 @@
     return *this;
 }
 
-SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::syncInputWindows() {
-    mInputWindowCommands.syncInputWindows = true;
+SurfaceComposerClient::Transaction&
+SurfaceComposerClient::Transaction::addWindowInfosReportedListener(
+        sp<gui::IWindowInfosReportedListener> windowInfosReportedListener) {
+    mInputWindowCommands.windowInfosReportedListeners.insert(windowInfosReportedListener);
     return *this;
 }
 
@@ -1840,6 +2066,19 @@
     return *this;
 }
 
+SurfaceComposerClient::Transaction&
+SurfaceComposerClient::Transaction::setDefaultFrameRateCompatibility(const sp<SurfaceControl>& sc,
+                                                                     int8_t compatibility) {
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        mStatus = BAD_INDEX;
+        return *this;
+    }
+    s->what |= layer_state_t::eDefaultFrameRateCompatibilityChanged;
+    s->defaultFrameRateCompatibility = compatibility;
+    return *this;
+}
+
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setFixedTransformHint(
         const sp<SurfaceControl>& sc, int32_t fixedTransformHint) {
     layer_state_t* s = getLayerState(sc);
@@ -1858,7 +2097,7 @@
 
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setFrameTimelineInfo(
         const FrameTimelineInfo& frameTimelineInfo) {
-    mFrameTimelineInfo.merge(frameTimelineInfo);
+    mergeFrameTimelineInfo(mFrameTimelineInfo, frameTimelineInfo);
     return *this;
 }
 
@@ -1952,6 +2191,23 @@
     return *this;
 }
 
+SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::enableBorder(
+        const sp<SurfaceControl>& sc, bool shouldEnable, float width, const half4& color) {
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        mStatus = BAD_INDEX;
+        return *this;
+    }
+
+    s->what |= layer_state_t::eRenderBorderChanged;
+    s->borderEnabled = shouldEnable;
+    s->borderWidth = width;
+    s->borderColor = color;
+
+    registerSurfaceControlForCallback(sc);
+    return *this;
+}
+
 // ---------------------------------------------------------------------------
 
 DisplayState& SurfaceComposerClient::Transaction::getDisplayState(const sp<IBinder>& token) {
@@ -2007,7 +2263,6 @@
     s.layerStackSpaceRect = layerStackRect;
     s.orientedDisplaySpaceRect = displayRect;
     s.what |= DisplayState::eDisplayProjectionChanged;
-    mForceSynchronous = true; // TODO: do we actually still need this?
 }
 
 void SurfaceComposerClient::Transaction::setDisplaySize(const sp<IBinder>& token, uint32_t width, uint32_t height) {
@@ -2017,6 +2272,73 @@
     s.what |= DisplayState::eDisplaySizeChanged;
 }
 
+// copied from FrameTimelineInfo::merge()
+void SurfaceComposerClient::Transaction::mergeFrameTimelineInfo(FrameTimelineInfo& t,
+                                                                const FrameTimelineInfo& other) {
+    // When merging vsync Ids we take the oldest valid one
+    if (t.vsyncId != FrameTimelineInfo::INVALID_VSYNC_ID &&
+        other.vsyncId != FrameTimelineInfo::INVALID_VSYNC_ID) {
+        if (other.vsyncId > t.vsyncId) {
+            t.vsyncId = other.vsyncId;
+            t.inputEventId = other.inputEventId;
+            t.startTimeNanos = other.startTimeNanos;
+            t.useForRefreshRateSelection = other.useForRefreshRateSelection;
+        }
+    } else if (t.vsyncId == FrameTimelineInfo::INVALID_VSYNC_ID) {
+        t.vsyncId = other.vsyncId;
+        t.inputEventId = other.inputEventId;
+        t.startTimeNanos = other.startTimeNanos;
+        t.useForRefreshRateSelection = other.useForRefreshRateSelection;
+    }
+}
+
+// copied from FrameTimelineInfo::clear()
+void SurfaceComposerClient::Transaction::clearFrameTimelineInfo(FrameTimelineInfo& t) {
+    t.vsyncId = FrameTimelineInfo::INVALID_VSYNC_ID;
+    t.inputEventId = os::IInputConstants::INVALID_INPUT_EVENT_ID;
+    t.startTimeNanos = 0;
+    t.useForRefreshRateSelection = false;
+}
+
+SurfaceComposerClient::Transaction&
+SurfaceComposerClient::Transaction::setTrustedPresentationCallback(
+        const sp<SurfaceControl>& sc, TrustedPresentationCallback cb,
+        const TrustedPresentationThresholds& thresholds, void* context,
+        sp<SurfaceComposerClient::PresentationCallbackRAII>& outCallbackRef) {
+    auto listener = TransactionCompletedListener::getInstance();
+    outCallbackRef = listener->addTrustedPresentationCallback(cb, sc->getLayerId(), context);
+
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        mStatus = BAD_INDEX;
+        return *this;
+    }
+    s->what |= layer_state_t::eTrustedPresentationInfoChanged;
+    s->trustedPresentationThresholds = thresholds;
+    s->trustedPresentationListener.callbackInterface = TransactionCompletedListener::getIInstance();
+    s->trustedPresentationListener.callbackId = sc->getLayerId();
+
+    return *this;
+}
+
+SurfaceComposerClient::Transaction&
+SurfaceComposerClient::Transaction::clearTrustedPresentationCallback(const sp<SurfaceControl>& sc) {
+    auto listener = TransactionCompletedListener::getInstance();
+    listener->clearTrustedPresentationCallback(sc->getLayerId());
+
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        mStatus = BAD_INDEX;
+        return *this;
+    }
+    s->what |= layer_state_t::eTrustedPresentationInfoChanged;
+    s->trustedPresentationThresholds = TrustedPresentationThresholds();
+    s->trustedPresentationListener.callbackInterface = nullptr;
+    s->trustedPresentationListener.callbackId = -1;
+
+    return *this;
+}
+
 // ---------------------------------------------------------------------------
 
 SurfaceComposerClient::SurfaceComposerClient() : mStatus(NO_INIT) {}
@@ -2025,11 +2347,11 @@
       : mStatus(NO_ERROR), mClient(client) {}
 
 void SurfaceComposerClient::onFirstRef() {
-    sp<ISurfaceComposer> sf(ComposerService::getComposerService());
+    sp<gui::ISurfaceComposer> sf(ComposerServiceAIDL::getComposerService());
     if (sf != nullptr && mStatus == NO_INIT) {
         sp<ISurfaceComposerClient> conn;
-        conn = sf->createConnection();
-        if (conn != nullptr) {
+        binder::Status status = sf->createConnection(&conn);
+        if (status.isOk() && conn != nullptr) {
             mClient = conn;
             mStatus = NO_ERROR;
         }
@@ -2066,8 +2388,14 @@
     mStatus = NO_INIT;
 }
 
+status_t SurfaceComposerClient::bootFinished() {
+    sp<gui::ISurfaceComposer> sf(ComposerServiceAIDL::getComposerService());
+    binder::Status status = sf->bootFinished();
+    return statusTFromBinderStatus(status);
+}
+
 sp<SurfaceControl> SurfaceComposerClient::createSurface(const String8& name, uint32_t w, uint32_t h,
-                                                        PixelFormat format, uint32_t flags,
+                                                        PixelFormat format, int32_t flags,
                                                         const sp<IBinder>& parentHandle,
                                                         LayerMetadata metadata,
                                                         uint32_t* outTransformHint) {
@@ -2077,38 +2405,13 @@
     return s;
 }
 
-sp<SurfaceControl> SurfaceComposerClient::createWithSurfaceParent(const String8& name, uint32_t w,
-                                                                  uint32_t h, PixelFormat format,
-                                                                  uint32_t flags, Surface* parent,
-                                                                  LayerMetadata metadata,
-                                                                  uint32_t* outTransformHint) {
-    sp<SurfaceControl> sur;
-    status_t err = mStatus;
-
-    if (mStatus == NO_ERROR) {
-        sp<IBinder> handle;
-        sp<IGraphicBufferProducer> parentGbp = parent->getIGraphicBufferProducer();
-        sp<IGraphicBufferProducer> gbp;
-
-        uint32_t transformHint = 0;
-        int32_t id = -1;
-        err = mClient->createWithSurfaceParent(name, w, h, format, flags, parentGbp,
-                                               std::move(metadata), &handle, &gbp, &id,
-                                               &transformHint);
-        if (outTransformHint) {
-            *outTransformHint = transformHint;
-        }
-        ALOGE_IF(err, "SurfaceComposerClient::createWithSurfaceParent error %s", strerror(-err));
-        if (err == NO_ERROR) {
-            return new SurfaceControl(this, handle, gbp, id, transformHint);
-        }
-    }
-    return nullptr;
+static std::string toString(const String16& string) {
+    return std::string(String8(string).c_str());
 }
 
 status_t SurfaceComposerClient::createSurfaceChecked(const String8& name, uint32_t w, uint32_t h,
                                                      PixelFormat format,
-                                                     sp<SurfaceControl>* outSurface, uint32_t flags,
+                                                     sp<SurfaceControl>* outSurface, int32_t flags,
                                                      const sp<IBinder>& parentHandle,
                                                      LayerMetadata metadata,
                                                      uint32_t* outTransformHint) {
@@ -2116,21 +2419,18 @@
     status_t err = mStatus;
 
     if (mStatus == NO_ERROR) {
-        sp<IBinder> handle;
-        sp<IGraphicBufferProducer> gbp;
-
-        uint32_t transformHint = 0;
-        int32_t id = -1;
-        err = mClient->createSurface(name, w, h, format, flags, parentHandle, std::move(metadata),
-                                     &handle, &gbp, &id, &transformHint);
-
+        gui::CreateSurfaceResult result;
+        binder::Status status = mClient->createSurface(std::string(name.string()), flags,
+                                                       parentHandle, std::move(metadata), &result);
+        err = statusTFromBinderStatus(status);
         if (outTransformHint) {
-            *outTransformHint = transformHint;
+            *outTransformHint = result.transformHint;
         }
         ALOGE_IF(err, "SurfaceComposerClient::createSurface error %s", strerror(-err));
         if (err == NO_ERROR) {
-            *outSurface =
-                    new SurfaceControl(this, handle, gbp, id, w, h, format, transformHint, flags);
+            *outSurface = new SurfaceControl(this, result.handle, result.layerId,
+                                             toString(result.layerName), w, h, format,
+                                             result.transformHint, flags);
         }
     }
     return err;
@@ -2141,12 +2441,22 @@
         return nullptr;
     }
 
-    sp<IBinder> handle;
     sp<IBinder> mirrorFromHandle = mirrorFromSurface->getHandle();
-    int32_t layer_id = -1;
-    status_t err = mClient->mirrorSurface(mirrorFromHandle, &handle, &layer_id);
+    gui::CreateSurfaceResult result;
+    const binder::Status status = mClient->mirrorSurface(mirrorFromHandle, &result);
+    const status_t err = statusTFromBinderStatus(status);
     if (err == NO_ERROR) {
-        return new SurfaceControl(this, handle, nullptr, layer_id, true /* owned */);
+        return new SurfaceControl(this, result.handle, result.layerId, toString(result.layerName));
+    }
+    return nullptr;
+}
+
+sp<SurfaceControl> SurfaceComposerClient::mirrorDisplay(DisplayId displayId) {
+    gui::CreateSurfaceResult result;
+    const binder::Status status = mClient->mirrorDisplay(displayId.value, &result);
+    const status_t err = statusTFromBinderStatus(status);
+    if (err == NO_ERROR) {
+        return new SurfaceControl(this, result.handle, result.layerId, toString(result.layerName));
     }
     return nullptr;
 }
@@ -2155,7 +2465,8 @@
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
-    return mClient->clearLayerFrameStats(token);
+    const binder::Status status = mClient->clearLayerFrameStats(token);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getLayerFrameStats(const sp<IBinder>& token,
@@ -2163,21 +2474,28 @@
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
-    return mClient->getLayerFrameStats(token, outStats);
+    gui::FrameStats stats;
+    const binder::Status status = mClient->getLayerFrameStats(token, &stats);
+    if (status.isOk()) {
+        outStats->refreshPeriodNano = stats.refreshPeriodNano;
+        outStats->desiredPresentTimesNano.setCapacity(stats.desiredPresentTimesNano.size());
+        for (const auto& t : stats.desiredPresentTimesNano) {
+            outStats->desiredPresentTimesNano.add(t);
+        }
+        outStats->actualPresentTimesNano.setCapacity(stats.actualPresentTimesNano.size());
+        for (const auto& t : stats.actualPresentTimesNano) {
+            outStats->actualPresentTimesNano.add(t);
+        }
+        outStats->frameReadyTimesNano.setCapacity(stats.frameReadyTimesNano.size());
+        for (const auto& t : stats.frameReadyTimesNano) {
+            outStats->frameReadyTimesNano.add(t);
+        }
+    }
+    return statusTFromBinderStatus(status);
 }
 
 // ----------------------------------------------------------------------------
 
-status_t SurfaceComposerClient::enableVSyncInjections(bool enable) {
-    sp<ISurfaceComposer> sf(ComposerService::getComposerService());
-    return sf->enableVSyncInjections(enable);
-}
-
-status_t SurfaceComposerClient::injectVSync(nsecs_t when) {
-    sp<ISurfaceComposer> sf(ComposerService::getComposerService());
-    return sf->injectVSync(when);
-}
-
 status_t SurfaceComposerClient::getDisplayState(const sp<IBinder>& display,
                                                 ui::DisplayState* state) {
     gui::DisplayState ds;
@@ -2189,23 +2507,135 @@
         state->layerStackSpaceRect =
                 ui::Size(ds.layerStackSpaceRect.width, ds.layerStackSpaceRect.height);
     }
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
-status_t SurfaceComposerClient::getStaticDisplayInfo(const sp<IBinder>& display,
-                                                     ui::StaticDisplayInfo* info) {
-    return ComposerService::getComposerService()->getStaticDisplayInfo(display, info);
+status_t SurfaceComposerClient::getStaticDisplayInfo(int64_t displayId,
+                                                     ui::StaticDisplayInfo* outInfo) {
+    using Tag = android::gui::DeviceProductInfo::ManufactureOrModelDate::Tag;
+    gui::StaticDisplayInfo ginfo;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getStaticDisplayInfo(displayId, &ginfo);
+    if (status.isOk()) {
+        // convert gui::StaticDisplayInfo to ui::StaticDisplayInfo
+        outInfo->connectionType = static_cast<ui::DisplayConnectionType>(ginfo.connectionType);
+        outInfo->density = ginfo.density;
+        outInfo->secure = ginfo.secure;
+        outInfo->installOrientation = static_cast<ui::Rotation>(ginfo.installOrientation);
+
+        DeviceProductInfo info;
+        std::optional<gui::DeviceProductInfo> dpi = ginfo.deviceProductInfo;
+        gui::DeviceProductInfo::ManufactureOrModelDate& date = dpi->manufactureOrModelDate;
+        info.name = dpi->name;
+        if (dpi->manufacturerPnpId.size() > 0) {
+            // copid from PnpId = std::array<char, 4> in ui/DeviceProductInfo.h
+            constexpr int kMaxPnpIdSize = 4;
+            size_t count = std::max<size_t>(kMaxPnpIdSize, dpi->manufacturerPnpId.size());
+            std::copy_n(dpi->manufacturerPnpId.begin(), count, info.manufacturerPnpId.begin());
+        }
+        if (dpi->relativeAddress.size() > 0) {
+            std::copy(dpi->relativeAddress.begin(), dpi->relativeAddress.end(),
+                      std::back_inserter(info.relativeAddress));
+        }
+        info.productId = dpi->productId;
+        if (date.getTag() == Tag::modelYear) {
+            DeviceProductInfo::ModelYear modelYear;
+            modelYear.year = static_cast<uint32_t>(date.get<Tag::modelYear>().year);
+            info.manufactureOrModelDate = modelYear;
+        } else if (date.getTag() == Tag::manufactureYear) {
+            DeviceProductInfo::ManufactureYear manufactureYear;
+            manufactureYear.year = date.get<Tag::manufactureYear>().modelYear.year;
+            info.manufactureOrModelDate = manufactureYear;
+        } else if (date.getTag() == Tag::manufactureWeekAndYear) {
+            DeviceProductInfo::ManufactureWeekAndYear weekAndYear;
+            weekAndYear.year =
+                    date.get<Tag::manufactureWeekAndYear>().manufactureYear.modelYear.year;
+            weekAndYear.week = date.get<Tag::manufactureWeekAndYear>().week;
+            info.manufactureOrModelDate = weekAndYear;
+        }
+
+        outInfo->deviceProductInfo = info;
+    }
+    return statusTFromBinderStatus(status);
 }
 
-status_t SurfaceComposerClient::getDynamicDisplayInfo(const sp<IBinder>& display,
-                                                      ui::DynamicDisplayInfo* info) {
-    return ComposerService::getComposerService()->getDynamicDisplayInfo(display, info);
+void SurfaceComposerClient::getDynamicDisplayInfoInternal(gui::DynamicDisplayInfo& ginfo,
+                                                          ui::DynamicDisplayInfo*& outInfo) {
+    // convert gui::DynamicDisplayInfo to ui::DynamicDisplayInfo
+    outInfo->supportedDisplayModes.clear();
+    outInfo->supportedDisplayModes.reserve(ginfo.supportedDisplayModes.size());
+    for (const auto& mode : ginfo.supportedDisplayModes) {
+        ui::DisplayMode outMode;
+        outMode.id = mode.id;
+        outMode.resolution.width = mode.resolution.width;
+        outMode.resolution.height = mode.resolution.height;
+        outMode.xDpi = mode.xDpi;
+        outMode.yDpi = mode.yDpi;
+        outMode.refreshRate = mode.refreshRate;
+        outMode.appVsyncOffset = mode.appVsyncOffset;
+        outMode.sfVsyncOffset = mode.sfVsyncOffset;
+        outMode.presentationDeadline = mode.presentationDeadline;
+        outMode.group = mode.group;
+        std::transform(mode.supportedHdrTypes.begin(), mode.supportedHdrTypes.end(),
+                       std::back_inserter(outMode.supportedHdrTypes),
+                       [](const int32_t& value) { return static_cast<ui::Hdr>(value); });
+        outInfo->supportedDisplayModes.push_back(outMode);
+    }
+
+    outInfo->activeDisplayModeId = ginfo.activeDisplayModeId;
+    outInfo->renderFrameRate = ginfo.renderFrameRate;
+
+    outInfo->supportedColorModes.clear();
+    outInfo->supportedColorModes.reserve(ginfo.supportedColorModes.size());
+    for (const auto& cmode : ginfo.supportedColorModes) {
+        outInfo->supportedColorModes.push_back(static_cast<ui::ColorMode>(cmode));
+    }
+
+    outInfo->activeColorMode = static_cast<ui::ColorMode>(ginfo.activeColorMode);
+
+    std::vector<ui::Hdr> types;
+    types.reserve(ginfo.hdrCapabilities.supportedHdrTypes.size());
+    for (const auto& hdr : ginfo.hdrCapabilities.supportedHdrTypes) {
+        types.push_back(static_cast<ui::Hdr>(hdr));
+    }
+    outInfo->hdrCapabilities = HdrCapabilities(types, ginfo.hdrCapabilities.maxLuminance,
+                                               ginfo.hdrCapabilities.maxAverageLuminance,
+                                               ginfo.hdrCapabilities.minLuminance);
+
+    outInfo->autoLowLatencyModeSupported = ginfo.autoLowLatencyModeSupported;
+    outInfo->gameContentTypeSupported = ginfo.gameContentTypeSupported;
+    outInfo->preferredBootDisplayMode = ginfo.preferredBootDisplayMode;
+}
+
+status_t SurfaceComposerClient::getDynamicDisplayInfoFromId(int64_t displayId,
+                                                            ui::DynamicDisplayInfo* outInfo) {
+    gui::DynamicDisplayInfo ginfo;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getDynamicDisplayInfoFromId(displayId,
+                                                                                   &ginfo);
+    if (status.isOk()) {
+        getDynamicDisplayInfoInternal(ginfo, outInfo);
+    }
+    return statusTFromBinderStatus(status);
+}
+
+status_t SurfaceComposerClient::getDynamicDisplayInfoFromToken(const sp<IBinder>& display,
+                                                               ui::DynamicDisplayInfo* outInfo) {
+    gui::DynamicDisplayInfo ginfo;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getDynamicDisplayInfoFromToken(display,
+                                                                                      &ginfo);
+    if (status.isOk()) {
+        getDynamicDisplayInfoInternal(ginfo, outInfo);
+    }
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getActiveDisplayMode(const sp<IBinder>& display,
                                                      ui::DisplayMode* mode) {
     ui::DynamicDisplayInfo info;
-    status_t result = getDynamicDisplayInfo(display, &info);
+
+    status_t result = getDynamicDisplayInfoFromToken(display, &info);
     if (result != NO_ERROR) {
         return result;
     }
@@ -2219,58 +2649,109 @@
     return NAME_NOT_FOUND;
 }
 
-status_t SurfaceComposerClient::setDesiredDisplayModeSpecs(
-        const sp<IBinder>& displayToken, ui::DisplayModeId defaultMode, bool allowGroupSwitching,
-        float primaryRefreshRateMin, float primaryRefreshRateMax, float appRequestRefreshRateMin,
-        float appRequestRefreshRateMax) {
-    return ComposerService::getComposerService()
-            ->setDesiredDisplayModeSpecs(displayToken, defaultMode, allowGroupSwitching,
-                                         primaryRefreshRateMin, primaryRefreshRateMax,
-                                         appRequestRefreshRateMin, appRequestRefreshRateMax);
+status_t SurfaceComposerClient::setDesiredDisplayModeSpecs(const sp<IBinder>& displayToken,
+                                                           const gui::DisplayModeSpecs& specs) {
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->setDesiredDisplayModeSpecs(displayToken,
+                                                                                  specs);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getDesiredDisplayModeSpecs(const sp<IBinder>& displayToken,
-                                                           ui::DisplayModeId* outDefaultMode,
-                                                           bool* outAllowGroupSwitching,
-                                                           float* outPrimaryRefreshRateMin,
-                                                           float* outPrimaryRefreshRateMax,
-                                                           float* outAppRequestRefreshRateMin,
-                                                           float* outAppRequestRefreshRateMax) {
-    return ComposerService::getComposerService()
-            ->getDesiredDisplayModeSpecs(displayToken, outDefaultMode, outAllowGroupSwitching,
-                                         outPrimaryRefreshRateMin, outPrimaryRefreshRateMax,
-                                         outAppRequestRefreshRateMin, outAppRequestRefreshRateMax);
+                                                           gui::DisplayModeSpecs* outSpecs) {
+    if (!outSpecs) {
+        return BAD_VALUE;
+    }
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getDesiredDisplayModeSpecs(displayToken,
+                                                                                  outSpecs);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getDisplayNativePrimaries(const sp<IBinder>& display,
         ui::DisplayPrimaries& outPrimaries) {
-    return ComposerService::getComposerService()->getDisplayNativePrimaries(display, outPrimaries);
+    gui::DisplayPrimaries primaries;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getDisplayNativePrimaries(display,
+                                                                                 &primaries);
+    if (status.isOk()) {
+        outPrimaries.red.X = primaries.red.X;
+        outPrimaries.red.Y = primaries.red.Y;
+        outPrimaries.red.Z = primaries.red.Z;
+
+        outPrimaries.green.X = primaries.green.X;
+        outPrimaries.green.Y = primaries.green.Y;
+        outPrimaries.green.Z = primaries.green.Z;
+
+        outPrimaries.blue.X = primaries.blue.X;
+        outPrimaries.blue.Y = primaries.blue.Y;
+        outPrimaries.blue.Z = primaries.blue.Z;
+
+        outPrimaries.white.X = primaries.white.X;
+        outPrimaries.white.Y = primaries.white.Y;
+        outPrimaries.white.Z = primaries.white.Z;
+    }
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::setActiveColorMode(const sp<IBinder>& display,
         ColorMode colorMode) {
-    return ComposerService::getComposerService()->setActiveColorMode(display, colorMode);
+    binder::Status status = ComposerServiceAIDL::getComposerService()
+                                    ->setActiveColorMode(display, static_cast<int>(colorMode));
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getBootDisplayModeSupport(bool* support) {
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->getBootDisplayModeSupport(support);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
+}
+
+status_t SurfaceComposerClient::getOverlaySupport(gui::OverlayProperties* outProperties) {
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getOverlaySupport(outProperties);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::setBootDisplayMode(const sp<IBinder>& display,
                                                    ui::DisplayModeId displayModeId) {
-    return ComposerService::getComposerService()->setBootDisplayMode(display, displayModeId);
+    binder::Status status = ComposerServiceAIDL::getComposerService()
+                                    ->setBootDisplayMode(display, static_cast<int>(displayModeId));
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::clearBootDisplayMode(const sp<IBinder>& display) {
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->clearBootDisplayMode(display);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
+}
+
+status_t SurfaceComposerClient::getHdrConversionCapabilities(
+        std::vector<gui::HdrConversionCapability>* hdrConversionCapabilities) {
+    binder::Status status = ComposerServiceAIDL::getComposerService()->getHdrConversionCapabilities(
+            hdrConversionCapabilities);
+    return statusTFromBinderStatus(status);
+}
+
+status_t SurfaceComposerClient::setHdrConversionStrategy(
+        gui::HdrConversionStrategy hdrConversionStrategy, ui::Hdr* outPreferredHdrOutputType) {
+    int hdrType;
+    binder::Status status = ComposerServiceAIDL::getComposerService()
+                                    ->setHdrConversionStrategy(hdrConversionStrategy, &hdrType);
+    *outPreferredHdrOutputType = static_cast<ui::Hdr>(hdrType);
+    return statusTFromBinderStatus(status);
+}
+
+status_t SurfaceComposerClient::getHdrOutputConversionSupport(bool* isSupported) {
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getHdrOutputConversionSupport(isSupported);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::setOverrideFrameRate(uid_t uid, float frameRate) {
-    return ComposerService::getComposerService()->setOverrideFrameRate(uid, frameRate);
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->setOverrideFrameRate(uid, frameRate);
+    return statusTFromBinderStatus(status);
 }
 
 void SurfaceComposerClient::setAutoLowLatencyMode(const sp<IBinder>& display, bool on) {
@@ -2289,57 +2770,137 @@
 status_t SurfaceComposerClient::getCompositionPreference(
         ui::Dataspace* defaultDataspace, ui::PixelFormat* defaultPixelFormat,
         ui::Dataspace* wideColorGamutDataspace, ui::PixelFormat* wideColorGamutPixelFormat) {
-    return ComposerService::getComposerService()
-            ->getCompositionPreference(defaultDataspace, defaultPixelFormat,
-                                       wideColorGamutDataspace, wideColorGamutPixelFormat);
+    gui::CompositionPreference pref;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getCompositionPreference(&pref);
+    if (status.isOk()) {
+        *defaultDataspace = static_cast<ui::Dataspace>(pref.defaultDataspace);
+        *defaultPixelFormat = static_cast<ui::PixelFormat>(pref.defaultPixelFormat);
+        *wideColorGamutDataspace = static_cast<ui::Dataspace>(pref.wideColorGamutDataspace);
+        *wideColorGamutPixelFormat = static_cast<ui::PixelFormat>(pref.wideColorGamutPixelFormat);
+    }
+    return statusTFromBinderStatus(status);
 }
 
 bool SurfaceComposerClient::getProtectedContentSupport() {
     bool supported = false;
-    ComposerService::getComposerService()->getProtectedContentSupport(&supported);
+    ComposerServiceAIDL::getComposerService()->getProtectedContentSupport(&supported);
     return supported;
 }
 
 status_t SurfaceComposerClient::clearAnimationFrameStats() {
-    return ComposerService::getComposerService()->clearAnimationFrameStats();
+    binder::Status status = ComposerServiceAIDL::getComposerService()->clearAnimationFrameStats();
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getAnimationFrameStats(FrameStats* outStats) {
-    return ComposerService::getComposerService()->getAnimationFrameStats(outStats);
+    gui::FrameStats stats;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getAnimationFrameStats(&stats);
+    if (status.isOk()) {
+        outStats->refreshPeriodNano = stats.refreshPeriodNano;
+        outStats->desiredPresentTimesNano.setCapacity(stats.desiredPresentTimesNano.size());
+        for (const auto& t : stats.desiredPresentTimesNano) {
+            outStats->desiredPresentTimesNano.add(t);
+        }
+        outStats->actualPresentTimesNano.setCapacity(stats.actualPresentTimesNano.size());
+        for (const auto& t : stats.actualPresentTimesNano) {
+            outStats->actualPresentTimesNano.add(t);
+        }
+        outStats->frameReadyTimesNano.setCapacity(stats.frameReadyTimesNano.size());
+        for (const auto& t : stats.frameReadyTimesNano) {
+            outStats->frameReadyTimesNano.add(t);
+        }
+    }
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::overrideHdrTypes(const sp<IBinder>& display,
                                                  const std::vector<ui::Hdr>& hdrTypes) {
-    return ComposerService::getComposerService()->overrideHdrTypes(display, hdrTypes);
+    std::vector<int32_t> hdrTypesVector;
+    hdrTypesVector.reserve(hdrTypes.size());
+    for (auto t : hdrTypes) {
+        hdrTypesVector.push_back(static_cast<int32_t>(t));
+    }
+
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->overrideHdrTypes(display, hdrTypesVector);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::onPullAtom(const int32_t atomId, std::string* outData,
                                            bool* success) {
-    return ComposerService::getComposerService()->onPullAtom(atomId, outData, success);
+    gui::PullAtomData pad;
+    binder::Status status = ComposerServiceAIDL::getComposerService()->onPullAtom(atomId, &pad);
+    if (status.isOk()) {
+        outData->assign(pad.data.begin(), pad.data.end());
+        *success = pad.success;
+    }
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getDisplayedContentSamplingAttributes(const sp<IBinder>& display,
                                                                       ui::PixelFormat* outFormat,
                                                                       ui::Dataspace* outDataspace,
                                                                       uint8_t* outComponentMask) {
-    return ComposerService::getComposerService()
-            ->getDisplayedContentSamplingAttributes(display, outFormat, outDataspace,
-                                                    outComponentMask);
+    if (!outFormat || !outDataspace || !outComponentMask) {
+        return BAD_VALUE;
+    }
+
+    gui::ContentSamplingAttributes attrs;
+    binder::Status status = ComposerServiceAIDL::getComposerService()
+                                    ->getDisplayedContentSamplingAttributes(display, &attrs);
+    if (status.isOk()) {
+        *outFormat = static_cast<ui::PixelFormat>(attrs.format);
+        *outDataspace = static_cast<ui::Dataspace>(attrs.dataspace);
+        *outComponentMask = static_cast<uint8_t>(attrs.componentMask);
+    }
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::setDisplayContentSamplingEnabled(const sp<IBinder>& display,
                                                                  bool enable, uint8_t componentMask,
                                                                  uint64_t maxFrames) {
-    return ComposerService::getComposerService()->setDisplayContentSamplingEnabled(display, enable,
-                                                                                   componentMask,
-                                                                                   maxFrames);
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()
+                    ->setDisplayContentSamplingEnabled(display, enable,
+                                                       static_cast<int8_t>(componentMask),
+                                                       static_cast<int64_t>(maxFrames));
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::getDisplayedContentSample(const sp<IBinder>& display,
                                                           uint64_t maxFrames, uint64_t timestamp,
                                                           DisplayedFrameStats* outStats) {
-    return ComposerService::getComposerService()->getDisplayedContentSample(display, maxFrames,
-                                                                            timestamp, outStats);
+    if (!outStats) {
+        return BAD_VALUE;
+    }
+
+    gui::DisplayedFrameStats stats;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getDisplayedContentSample(display, maxFrames,
+                                                                                 timestamp, &stats);
+    if (status.isOk()) {
+        // convert gui::DisplayedFrameStats to ui::DisplayedFrameStats
+        outStats->numFrames = static_cast<uint64_t>(stats.numFrames);
+        outStats->component_0_sample.reserve(stats.component_0_sample.size());
+        for (const auto& s : stats.component_0_sample) {
+            outStats->component_0_sample.push_back(static_cast<uint64_t>(s));
+        }
+        outStats->component_1_sample.reserve(stats.component_1_sample.size());
+        for (const auto& s : stats.component_1_sample) {
+            outStats->component_1_sample.push_back(static_cast<uint64_t>(s));
+        }
+        outStats->component_2_sample.reserve(stats.component_2_sample.size());
+        for (const auto& s : stats.component_2_sample) {
+            outStats->component_2_sample.push_back(static_cast<uint64_t>(s));
+        }
+        outStats->component_3_sample.reserve(stats.component_3_sample.size());
+        for (const auto& s : stats.component_3_sample) {
+            outStats->component_3_sample.push_back(static_cast<uint64_t>(s));
+        }
+    }
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::isWideColorDisplay(const sp<IBinder>& display,
@@ -2347,39 +2908,55 @@
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->isWideColorDisplay(display,
                                                                           outIsWideColorDisplay);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::addRegionSamplingListener(
         const Rect& samplingArea, const sp<IBinder>& stopLayerHandle,
         const sp<IRegionSamplingListener>& listener) {
-    return ComposerService::getComposerService()->addRegionSamplingListener(samplingArea,
-                                                                            stopLayerHandle,
-                                                                            listener);
+    gui::ARect rect;
+    rect.left = samplingArea.left;
+    rect.top = samplingArea.top;
+    rect.right = samplingArea.right;
+    rect.bottom = samplingArea.bottom;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->addRegionSamplingListener(rect,
+                                                                                 stopLayerHandle,
+                                                                                 listener);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::removeRegionSamplingListener(
         const sp<IRegionSamplingListener>& listener) {
-    return ComposerService::getComposerService()->removeRegionSamplingListener(listener);
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->removeRegionSamplingListener(listener);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::addFpsListener(int32_t taskId,
                                                const sp<gui::IFpsListener>& listener) {
-    return ComposerService::getComposerService()->addFpsListener(taskId, listener);
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->addFpsListener(taskId, listener);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::removeFpsListener(const sp<gui::IFpsListener>& listener) {
-    return ComposerService::getComposerService()->removeFpsListener(listener);
+    binder::Status status = ComposerServiceAIDL::getComposerService()->removeFpsListener(listener);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::addTunnelModeEnabledListener(
         const sp<gui::ITunnelModeEnabledListener>& listener) {
-    return ComposerService::getComposerService()->addTunnelModeEnabledListener(listener);
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->addTunnelModeEnabledListener(listener);
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::removeTunnelModeEnabledListener(
         const sp<gui::ITunnelModeEnabledListener>& listener) {
-    return ComposerService::getComposerService()->removeTunnelModeEnabledListener(listener);
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->removeTunnelModeEnabledListener(listener);
+    return statusTFromBinderStatus(status);
 }
 
 bool SurfaceComposerClient::getDisplayBrightnessSupport(const sp<IBinder>& displayToken) {
@@ -2395,7 +2972,7 @@
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->setDisplayBrightness(displayToken,
                                                                             brightness);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::addHdrLayerInfoListener(
@@ -2403,7 +2980,7 @@
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->addHdrLayerInfoListener(displayToken,
                                                                                listener);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::removeHdrLayerInfoListener(
@@ -2411,45 +2988,79 @@
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->removeHdrLayerInfoListener(displayToken,
                                                                                   listener);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::notifyPowerBoost(int32_t boostId) {
     binder::Status status = ComposerServiceAIDL::getComposerService()->notifyPowerBoost(boostId);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 status_t SurfaceComposerClient::setGlobalShadowSettings(const half4& ambientColor,
                                                         const half4& spotColor, float lightPosY,
                                                         float lightPosZ, float lightRadius) {
-    return ComposerService::getComposerService()->setGlobalShadowSettings(ambientColor, spotColor,
-                                                                          lightPosY, lightPosZ,
-                                                                          lightRadius);
+    gui::Color ambientColorG, spotColorG;
+    ambientColorG.r = ambientColor.r;
+    ambientColorG.g = ambientColor.g;
+    ambientColorG.b = ambientColor.b;
+    ambientColorG.a = ambientColor.a;
+    spotColorG.r = spotColor.r;
+    spotColorG.g = spotColor.g;
+    spotColorG.b = spotColor.b;
+    spotColorG.a = spotColor.a;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->setGlobalShadowSettings(ambientColorG,
+                                                                               spotColorG,
+                                                                               lightPosY, lightPosZ,
+                                                                               lightRadius);
+    return statusTFromBinderStatus(status);
 }
 
 std::optional<DisplayDecorationSupport> SurfaceComposerClient::getDisplayDecorationSupport(
         const sp<IBinder>& displayToken) {
+    std::optional<gui::DisplayDecorationSupport> gsupport;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getDisplayDecorationSupport(displayToken,
+                                                                                   &gsupport);
     std::optional<DisplayDecorationSupport> support;
-    ComposerService::getComposerService()->getDisplayDecorationSupport(displayToken, &support);
+    if (status.isOk() && gsupport.has_value()) {
+        support.emplace(DisplayDecorationSupport{
+          .format =
+                static_cast<aidl::android::hardware::graphics::common::PixelFormat>(
+                gsupport->format),
+          .alphaInterpretation =
+                static_cast<aidl::android::hardware::graphics::common::AlphaInterpretation>(
+                        gsupport->alphaInterpretation)
+        });
+    }
     return support;
 }
 
-int SurfaceComposerClient::getGPUContextPriority() {
-    return ComposerService::getComposerService()->getGPUContextPriority();
+int SurfaceComposerClient::getGpuContextPriority() {
+    int priority;
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getGpuContextPriority(&priority);
+    if (!status.isOk()) {
+        status_t err = statusTFromBinderStatus(status);
+        ALOGE("getGpuContextPriority failed to read data:  %s (%d)", strerror(-err), err);
+        return 0;
+    }
+    return priority;
 }
 
 status_t SurfaceComposerClient::addWindowInfosListener(
         const sp<WindowInfosListener>& windowInfosListener,
         std::pair<std::vector<gui::WindowInfo>, std::vector<gui::DisplayInfo>>* outInitialInfo) {
     return WindowInfosListenerReporter::getInstance()
-            ->addWindowInfosListener(windowInfosListener, ComposerService::getComposerService(),
+            ->addWindowInfosListener(windowInfosListener, ComposerServiceAIDL::getComposerService(),
                                      outInitialInfo);
 }
 
 status_t SurfaceComposerClient::removeWindowInfosListener(
         const sp<WindowInfosListener>& windowInfosListener) {
     return WindowInfosListenerReporter::getInstance()
-            ->removeWindowInfosListener(windowInfosListener, ComposerService::getComposerService());
+            ->removeWindowInfosListener(windowInfosListener,
+                                        ComposerServiceAIDL::getComposerService());
 }
 
 // ----------------------------------------------------------------------------
@@ -2460,7 +3071,7 @@
     if (s == nullptr) return NO_INIT;
 
     binder::Status status = s->captureDisplay(captureArgs, captureListener);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 status_t ScreenshotClient::captureDisplay(DisplayId displayId,
@@ -2469,7 +3080,7 @@
     if (s == nullptr) return NO_INIT;
 
     binder::Status status = s->captureDisplayById(displayId.value, captureListener);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 status_t ScreenshotClient::captureLayers(const LayerCaptureArgs& captureArgs,
@@ -2478,7 +3089,7 @@
     if (s == nullptr) return NO_INIT;
 
     binder::Status status = s->captureLayers(captureArgs, captureListener);
-    return status.transactionError();
+    return statusTFromBinderStatus(status);
 }
 
 // ---------------------------------------------------------------------------------
@@ -2501,6 +3112,7 @@
     while (true) {
         {
             std::unique_lock<std::mutex> lock(mMutex);
+            base::ScopedLockAssertion assumeLocked(mMutex);
             callbackInfos = std::move(mCallbackInfos);
             mCallbackInfos = {};
         }
@@ -2513,6 +3125,7 @@
 
         {
             std::unique_lock<std::mutex> lock(mMutex);
+            base::ScopedLockAssertion assumeLocked(mMutex);
             if (mCallbackInfos.size() == 0) {
                 mReleaseCallbackPending.wait(lock);
             }
diff --git a/libs/gui/SurfaceControl.cpp b/libs/gui/SurfaceControl.cpp
index 654fb33..c5f9c38 100644
--- a/libs/gui/SurfaceControl.cpp
+++ b/libs/gui/SurfaceControl.cpp
@@ -26,6 +26,7 @@
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/Log.h>
+#include <utils/Looper.h>
 #include <utils/threads.h>
 
 #include <binder/IPCThreadState.h>
@@ -34,8 +35,9 @@
 #include <ui/Rect.h>
 #include <ui/StaticDisplayInfo.h>
 
-#include <gui/BufferQueueCore.h>
 #include <gui/BLASTBufferQueue.h>
+#include <gui/BufferQueueCore.h>
+#include <gui/Choreographer.h>
 #include <gui/ISurfaceComposer.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
@@ -49,13 +51,12 @@
 // ============================================================================
 
 SurfaceControl::SurfaceControl(const sp<SurfaceComposerClient>& client, const sp<IBinder>& handle,
-                               const sp<IGraphicBufferProducer>& gbp, int32_t layerId,
-                               uint32_t w, uint32_t h, PixelFormat format, uint32_t transform,
-                               uint32_t flags)
+                               int32_t layerId, const std::string& name, uint32_t w, uint32_t h,
+                               PixelFormat format, uint32_t transform, uint32_t flags)
       : mClient(client),
         mHandle(handle),
-        mGraphicBufferProducer(gbp),
         mLayerId(layerId),
+        mName(name),
         mTransformHint(transform),
         mWidth(w),
         mHeight(h),
@@ -65,9 +66,9 @@
 SurfaceControl::SurfaceControl(const sp<SurfaceControl>& other) {
     mClient = other->mClient;
     mHandle = other->mHandle;
-    mGraphicBufferProducer = other->mGraphicBufferProducer;
     mTransformHint = other->mTransformHint;
     mLayerId = other->mLayerId;
+    mName = other->mName;
     mWidth = other->mWidth;
     mHeight = other->mHeight;
     mFormat = other->mFormat;
@@ -165,11 +166,11 @@
 
 void SurfaceControl::updateDefaultBufferSize(uint32_t width, uint32_t height) {
     Mutex::Autolock _l(mLock);
-    mWidth = width; mHeight = height;
+    mWidth = width;
+    mHeight = height;
     if (mBbq) {
         mBbq->update(mBbqChild, width, height, mFormat);
     }
-
 }
 
 sp<IBinder> SurfaceControl::getLayerStateHandle() const
@@ -188,6 +189,28 @@
     return mLayerId;
 }
 
+const std::string& SurfaceControl::getName() const {
+    return mName;
+}
+
+std::shared_ptr<Choreographer> SurfaceControl::getChoreographer() {
+    if (mChoreographer) {
+        return mChoreographer;
+    }
+    sp<Looper> looper = Looper::getForThread();
+    if (!looper.get()) {
+        ALOGE("%s: No looper prepared for thread", __func__);
+        return nullptr;
+    }
+    mChoreographer = std::make_shared<Choreographer>(looper, getHandle());
+    status_t result = mChoreographer->initialize();
+    if (result != OK) {
+        ALOGE("Failed to initialize choreographer");
+        mChoreographer = nullptr;
+    }
+    return mChoreographer;
+}
+
 sp<IGraphicBufferProducer> SurfaceControl::getIGraphicBufferProducer()
 {
     getSurface();
@@ -215,6 +238,7 @@
     SAFE_PARCEL(parcel.writeStrongBinder, ISurfaceComposerClient::asBinder(mClient->getClient()));
     SAFE_PARCEL(parcel.writeStrongBinder, mHandle);
     SAFE_PARCEL(parcel.writeInt32, mLayerId);
+    SAFE_PARCEL(parcel.writeUtf8AsUtf16, mName);
     SAFE_PARCEL(parcel.writeUint32, mTransformHint);
     SAFE_PARCEL(parcel.writeUint32, mWidth);
     SAFE_PARCEL(parcel.writeUint32, mHeight);
@@ -228,6 +252,7 @@
     sp<IBinder> client;
     sp<IBinder> handle;
     int32_t layerId;
+    std::string layerName;
     uint32_t transformHint;
     uint32_t width;
     uint32_t height;
@@ -236,18 +261,17 @@
     SAFE_PARCEL(parcel.readStrongBinder, &client);
     SAFE_PARCEL(parcel.readStrongBinder, &handle);
     SAFE_PARCEL(parcel.readInt32, &layerId);
+    SAFE_PARCEL(parcel.readUtf8FromUtf16, &layerName);
     SAFE_PARCEL(parcel.readUint32, &transformHint);
     SAFE_PARCEL(parcel.readUint32, &width);
     SAFE_PARCEL(parcel.readUint32, &height);
     SAFE_PARCEL(parcel.readUint32, &format);
 
     // We aren't the original owner of the surface.
-    *outSurfaceControl =
-            new SurfaceControl(new SurfaceComposerClient(
-                                       interface_cast<ISurfaceComposerClient>(client)),
-                               handle.get(), nullptr, layerId,
-                               width, height, format,
-                               transformHint);
+    *outSurfaceControl = new SurfaceControl(new SurfaceComposerClient(
+                                                    interface_cast<ISurfaceComposerClient>(client)),
+                                            handle.get(), layerId, layerName, width, height, format,
+                                            transformHint);
 
     return NO_ERROR;
 }
diff --git a/libs/gui/SyncFeatures.cpp b/libs/gui/SyncFeatures.cpp
index 1a8fc1a..2d863c2 100644
--- a/libs/gui/SyncFeatures.cpp
+++ b/libs/gui/SyncFeatures.cpp
@@ -36,8 +36,12 @@
         mHasFenceSync(false),
         mHasWaitSync(false) {
     EGLDisplay dpy = eglGetDisplay(EGL_DEFAULT_DISPLAY);
-    // This can only be called after EGL has been initialized; otherwise the
-    // check below will abort.
+    // eglQueryString can only be called after EGL has been initialized;
+    // otherwise the check below will abort.  If RenderEngine is using SkiaVk,
+    // EGL will not have been initialized.  There's no problem with initializing
+    // it again here (it is ref counted), and then terminating it later.
+    EGLBoolean initialized = eglInitialize(dpy, nullptr, nullptr);
+    LOG_ALWAYS_FATAL_IF(!initialized, "eglInitialize failed");
     const char* exts = eglQueryString(dpy, EGL_EXTENSIONS);
     LOG_ALWAYS_FATAL_IF(exts == nullptr, "eglQueryString failed");
     if (strstr(exts, "EGL_ANDROID_native_fence_sync")) {
@@ -63,6 +67,8 @@
         mString.append(" EGL_KHR_wait_sync");
     }
     mString.append("]");
+    // Terminate EGL to match the eglInitialize above
+    eglTerminate(dpy);
 }
 
 bool SyncFeatures::useNativeFenceSync() const {
diff --git a/libs/gui/TEST_MAPPING b/libs/gui/TEST_MAPPING
index 1c43530..9415035 100644
--- a/libs/gui/TEST_MAPPING
+++ b/libs/gui/TEST_MAPPING
@@ -3,5 +3,11 @@
     {
       "path": "frameworks/native/libs/nativewindow"
     }
+  ],
+  "postsubmit": [
+    {
+      // TODO(257123981): move this to presubmit after dealing with existing breakages.
+      "name": "libgui_test"
+    }
   ]
 }
diff --git a/libs/gui/TransactionTracing.cpp b/libs/gui/TransactionTracing.cpp
deleted file mode 100644
index eedc3df..0000000
--- a/libs/gui/TransactionTracing.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "gui/TransactionTracing.h"
-#include "gui/ISurfaceComposer.h"
-
-#include <private/gui/ComposerService.h>
-
-namespace android {
-
-sp<TransactionTraceListener> TransactionTraceListener::sInstance = nullptr;
-std::mutex TransactionTraceListener::sMutex;
-
-TransactionTraceListener::TransactionTraceListener() {}
-
-sp<TransactionTraceListener> TransactionTraceListener::getInstance() {
-    const std::lock_guard<std::mutex> lock(sMutex);
-
-    if (sInstance == nullptr) {
-        sInstance = new TransactionTraceListener;
-
-        sp<ISurfaceComposer> sf(ComposerService::getComposerService());
-        sf->addTransactionTraceListener(sInstance);
-    }
-
-    return sInstance;
-}
-
-binder::Status TransactionTraceListener::onToggled(bool enabled) {
-    ALOGD("TransactionTraceListener: onToggled listener called");
-    mTracingEnabled = enabled;
-
-    return binder::Status::ok();
-}
-
-bool TransactionTraceListener::isTracingEnabled() {
-    return mTracingEnabled;
-}
-
-} // namespace android
\ No newline at end of file
diff --git a/libs/gui/VsyncEventData.cpp b/libs/gui/VsyncEventData.cpp
index 23f0921..8e00c2f 100644
--- a/libs/gui/VsyncEventData.cpp
+++ b/libs/gui/VsyncEventData.cpp
@@ -23,6 +23,9 @@
 
 namespace android::gui {
 
+static_assert(VsyncEventData::kFrameTimelinesCapacity == 7,
+              "Must update value in DisplayEventReceiver.java#FRAME_TIMELINES_CAPACITY (and here)");
+
 int64_t VsyncEventData::preferredVsyncId() const {
     return frameTimelines[preferredFrameTimelineIndex].vsyncId;
 }
@@ -43,11 +46,15 @@
 
     SAFE_PARCEL(parcel->readInt64, &vsync.frameInterval);
 
-    uint64_t uintPreferredFrameTimelineIndex;
-    SAFE_PARCEL(parcel->readUint64, &uintPreferredFrameTimelineIndex);
+    uint32_t uintPreferredFrameTimelineIndex;
+    SAFE_PARCEL(parcel->readUint32, &uintPreferredFrameTimelineIndex);
     vsync.preferredFrameTimelineIndex = static_cast<size_t>(uintPreferredFrameTimelineIndex);
 
-    for (int i = 0; i < VsyncEventData::kFrameTimelinesLength; i++) {
+    uint32_t uintFrameTimelinesLength;
+    SAFE_PARCEL(parcel->readUint32, &uintFrameTimelinesLength);
+    vsync.frameTimelinesLength = static_cast<size_t>(uintFrameTimelinesLength);
+
+    for (size_t i = 0; i < vsync.frameTimelinesLength; i++) {
         SAFE_PARCEL(parcel->readInt64, &vsync.frameTimelines[i].vsyncId);
         SAFE_PARCEL(parcel->readInt64, &vsync.frameTimelines[i].deadlineTimestamp);
         SAFE_PARCEL(parcel->readInt64, &vsync.frameTimelines[i].expectedPresentationTime);
@@ -57,8 +64,9 @@
 }
 status_t ParcelableVsyncEventData::writeToParcel(Parcel* parcel) const {
     SAFE_PARCEL(parcel->writeInt64, vsync.frameInterval);
-    SAFE_PARCEL(parcel->writeUint64, vsync.preferredFrameTimelineIndex);
-    for (int i = 0; i < VsyncEventData::kFrameTimelinesLength; i++) {
+    SAFE_PARCEL(parcel->writeUint32, vsync.preferredFrameTimelineIndex);
+    SAFE_PARCEL(parcel->writeUint32, vsync.frameTimelinesLength);
+    for (size_t i = 0; i < vsync.frameTimelinesLength; i++) {
         SAFE_PARCEL(parcel->writeInt64, vsync.frameTimelines[i].vsyncId);
         SAFE_PARCEL(parcel->writeInt64, vsync.frameTimelines[i].deadlineTimestamp);
         SAFE_PARCEL(parcel->writeInt64, vsync.frameTimelines[i].expectedPresentationTime);
diff --git a/libs/gui/WindowInfo.cpp b/libs/gui/WindowInfo.cpp
index 4e966d1..6df9ff1 100644
--- a/libs/gui/WindowInfo.cpp
+++ b/libs/gui/WindowInfo.cpp
@@ -76,7 +76,7 @@
             info.inputConfig == inputConfig && info.displayId == displayId &&
             info.replaceTouchableRegionWithCrop == replaceTouchableRegionWithCrop &&
             info.applicationInfo == applicationInfo && info.layoutParamsType == layoutParamsType &&
-            info.layoutParamsFlags == layoutParamsFlags && info.isClone == isClone;
+            info.layoutParamsFlags == layoutParamsFlags;
 }
 
 status_t WindowInfo::writeToParcel(android::Parcel* parcel) const {
@@ -124,8 +124,8 @@
         parcel->write(touchableRegion) ?:
         parcel->writeBool(replaceTouchableRegionWithCrop) ?:
         parcel->writeStrongBinder(touchableRegionCropHandle.promote()) ?:
-        parcel->writeStrongBinder(windowToken) ?:
-        parcel->writeBool(isClone);
+        parcel->writeStrongBinder(windowToken);
+        parcel->writeStrongBinder(focusTransferTarget);
     // clang-format on
     return status;
 }
@@ -177,7 +177,8 @@
         parcel->readBool(&replaceTouchableRegionWithCrop) ?:
         parcel->readNullableStrongBinder(&touchableRegionCropHandleSp) ?:
         parcel->readNullableStrongBinder(&windowToken) ?:
-        parcel->readBool(&isClone);
+        parcel->readNullableStrongBinder(&focusTransferTarget);
+
     // clang-format on
 
     if (status != OK) {
diff --git a/libs/gui/WindowInfosListenerReporter.cpp b/libs/gui/WindowInfosListenerReporter.cpp
index cfc7dbc..76e7b6e 100644
--- a/libs/gui/WindowInfosListenerReporter.cpp
+++ b/libs/gui/WindowInfosListenerReporter.cpp
@@ -14,8 +14,10 @@
  * limitations under the License.
  */
 
-#include <gui/ISurfaceComposer.h>
+#include <android/gui/ISurfaceComposer.h>
+#include <gui/AidlStatusUtil.h>
 #include <gui/WindowInfosListenerReporter.h>
+#include "gui/WindowInfosUpdate.h"
 
 namespace android {
 
@@ -23,6 +25,7 @@
 using gui::IWindowInfosReportedListener;
 using gui::WindowInfo;
 using gui::WindowInfosListener;
+using gui::aidl_utils::statusTFromBinderStatus;
 
 sp<WindowInfosListenerReporter> WindowInfosListenerReporter::getInstance() {
     static sp<WindowInfosListenerReporter> sInstance = new WindowInfosListenerReporter;
@@ -31,13 +34,14 @@
 
 status_t WindowInfosListenerReporter::addWindowInfosListener(
         const sp<WindowInfosListener>& windowInfosListener,
-        const sp<ISurfaceComposer>& surfaceComposer,
+        const sp<gui::ISurfaceComposer>& surfaceComposer,
         std::pair<std::vector<gui::WindowInfo>, std::vector<gui::DisplayInfo>>* outInitialInfo) {
     status_t status = OK;
     {
         std::scoped_lock lock(mListenersMutex);
         if (mWindowInfosListeners.empty()) {
-            status = surfaceComposer->addWindowInfosListener(this);
+            binder::Status s = surfaceComposer->addWindowInfosListener(this);
+            status = statusTFromBinderStatus(s);
         }
 
         if (status == OK) {
@@ -55,12 +59,17 @@
 
 status_t WindowInfosListenerReporter::removeWindowInfosListener(
         const sp<WindowInfosListener>& windowInfosListener,
-        const sp<ISurfaceComposer>& surfaceComposer) {
+        const sp<gui::ISurfaceComposer>& surfaceComposer) {
     status_t status = OK;
     {
         std::scoped_lock lock(mListenersMutex);
+        if (mWindowInfosListeners.find(windowInfosListener) == mWindowInfosListeners.end()) {
+            return status;
+        }
+
         if (mWindowInfosListeners.size() == 1) {
-            status = surfaceComposer->removeWindowInfosListener(this);
+            binder::Status s = surfaceComposer->removeWindowInfosListener(this);
+            status = statusTFromBinderStatus(s);
             // Clear the last stored state since we're disabling updates and don't want to hold
             // stale values
             mLastWindowInfos.clear();
@@ -76,9 +85,10 @@
 }
 
 binder::Status WindowInfosListenerReporter::onWindowInfosChanged(
-        const std::vector<WindowInfo>& windowInfos, const std::vector<DisplayInfo>& displayInfos,
+        const gui::WindowInfosUpdate& update,
         const sp<IWindowInfosReportedListener>& windowInfosReportedListener) {
-    std::unordered_set<sp<WindowInfosListener>, SpHash<WindowInfosListener>> windowInfosListeners;
+    std::unordered_set<sp<WindowInfosListener>, gui::SpHash<WindowInfosListener>>
+            windowInfosListeners;
 
     {
         std::scoped_lock lock(mListenersMutex);
@@ -86,12 +96,12 @@
             windowInfosListeners.insert(listener);
         }
 
-        mLastWindowInfos = windowInfos;
-        mLastDisplayInfos = displayInfos;
+        mLastWindowInfos = update.windowInfos;
+        mLastDisplayInfos = update.displayInfos;
     }
 
     for (auto listener : windowInfosListeners) {
-        listener->onWindowInfosChanged(windowInfos, displayInfos);
+        listener->onWindowInfosChanged(update);
     }
 
     if (windowInfosReportedListener) {
@@ -101,7 +111,7 @@
     return binder::Status::ok();
 }
 
-void WindowInfosListenerReporter::reconnect(const sp<ISurfaceComposer>& composerService) {
+void WindowInfosListenerReporter::reconnect(const sp<gui::ISurfaceComposer>& composerService) {
     std::scoped_lock lock(mListenersMutex);
     if (!mWindowInfosListeners.empty()) {
         composerService->addWindowInfosListener(this);
diff --git a/libs/gui/WindowInfosUpdate.cpp b/libs/gui/WindowInfosUpdate.cpp
new file mode 100644
index 0000000..38ae5ef
--- /dev/null
+++ b/libs/gui/WindowInfosUpdate.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gui/WindowInfosUpdate.h>
+#include <private/gui/ParcelUtils.h>
+
+namespace android::gui {
+
+status_t WindowInfosUpdate::readFromParcel(const android::Parcel* parcel) {
+    if (parcel == nullptr) {
+        ALOGE("%s: Null parcel", __func__);
+        return BAD_VALUE;
+    }
+
+    uint32_t size;
+
+    SAFE_PARCEL(parcel->readUint32, &size);
+    windowInfos.reserve(size);
+    for (uint32_t i = 0; i < size; i++) {
+        windowInfos.push_back({});
+        SAFE_PARCEL(windowInfos.back().readFromParcel, parcel);
+    }
+
+    SAFE_PARCEL(parcel->readUint32, &size);
+    displayInfos.reserve(size);
+    for (uint32_t i = 0; i < size; i++) {
+        displayInfos.push_back({});
+        SAFE_PARCEL(displayInfos.back().readFromParcel, parcel);
+    }
+
+    SAFE_PARCEL(parcel->readInt64, &vsyncId);
+    SAFE_PARCEL(parcel->readInt64, &timestamp);
+
+    return OK;
+}
+
+status_t WindowInfosUpdate::writeToParcel(android::Parcel* parcel) const {
+    if (parcel == nullptr) {
+        ALOGE("%s: Null parcel", __func__);
+        return BAD_VALUE;
+    }
+
+    SAFE_PARCEL(parcel->writeUint32, static_cast<uint32_t>(windowInfos.size()));
+    for (auto& windowInfo : windowInfos) {
+        SAFE_PARCEL(windowInfo.writeToParcel, parcel);
+    }
+
+    SAFE_PARCEL(parcel->writeUint32, static_cast<uint32_t>(displayInfos.size()));
+    for (auto& displayInfo : displayInfos) {
+        SAFE_PARCEL(displayInfo.writeToParcel, parcel);
+    }
+
+    SAFE_PARCEL(parcel->writeInt64, vsyncId);
+    SAFE_PARCEL(parcel->writeInt64, timestamp);
+
+    return OK;
+}
+
+} // namespace android::gui
diff --git a/libs/gui/aidl/android/gui/Rect.aidl b/libs/gui/aidl/android/gui/ARect.aidl
similarity index 98%
rename from libs/gui/aidl/android/gui/Rect.aidl
rename to libs/gui/aidl/android/gui/ARect.aidl
index 1b13761..5785907 100644
--- a/libs/gui/aidl/android/gui/Rect.aidl
+++ b/libs/gui/aidl/android/gui/ARect.aidl
@@ -20,7 +20,7 @@
 // TODO(b/221473398):
 // use hardware/interfaces/graphics/common/aidl/android/hardware/graphics/common/Rect.aidl
 /** @hide */
-parcelable Rect {
+parcelable ARect {
     /// Minimum X coordinate of the rectangle.
     int left;
 
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/CachingHint.aidl
similarity index 60%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/CachingHint.aidl
index 6929a6c..b35c795 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/CachingHint.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,17 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
+/*
+ * Hint for configuring caching behavior for a layer
  * @hide
  */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+@Backing(type="int")
+enum CachingHint {
+    // Caching is disabled. A layer may explicitly disable caching for
+    // improving image quality for some scenes.
+    Disabled = 0,
+    // Caching is enabled. A layer is cacheable by default.
+    Enabled = 1
 }
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/Color.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/Color.aidl
index 6929a6c..12af066 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/Color.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,12 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+parcelable Color {
+    float r;
+    float g;
+    float b;
+    float a;
 }
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/CompositionPreference.aidl
similarity index 63%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/CompositionPreference.aidl
index 6929a6c..b615824 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/CompositionPreference.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,12 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+parcelable CompositionPreference {
+    int /*ui::Dataspace*/   defaultDataspace;
+    int /*ui::PixelFormat*/ defaultPixelFormat;
+    int /*ui::Dataspace*/   wideColorGamutDataspace;
+    int /*ui::PixelFormat*/ wideColorGamutPixelFormat;
 }
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/ContentSamplingAttributes.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/ContentSamplingAttributes.aidl
index 6929a6c..5d913b1 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/ContentSamplingAttributes.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,11 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+parcelable ContentSamplingAttributes {
+    int /*ui::PixelFormat*/ format;
+    int /*ui::Dataspace*/ dataspace;
+    byte componentMask;
 }
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/CreateSurfaceResult.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/CreateSurfaceResult.aidl
index 6929a6c..eea12dc 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/CreateSurfaceResult.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,12 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+parcelable CreateSurfaceResult {
+    IBinder handle;
+    int layerId;
+    String layerName;
+    int transformHint;
 }
diff --git a/libs/gui/aidl/android/gui/DeviceProductInfo.aidl b/libs/gui/aidl/android/gui/DeviceProductInfo.aidl
new file mode 100644
index 0000000..98404cf
--- /dev/null
+++ b/libs/gui/aidl/android/gui/DeviceProductInfo.aidl
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+// Product-specific information about the display or the directly connected device on the
+// display chain. For example, if the display is transitively connected, this field may contain
+// product information about the intermediate device.
+
+/** @hide */
+parcelable DeviceProductInfo {
+    parcelable ModelYear {
+        int year;
+    }
+
+    parcelable ManufactureYear {
+        ModelYear modelYear;
+    }
+
+    parcelable ManufactureWeekAndYear {
+        ManufactureYear manufactureYear;
+
+        // 1-base week number. Week numbering may not be consistent between manufacturers.
+        int week;
+    }
+
+    union ManufactureOrModelDate {
+        ModelYear modelYear;
+        ManufactureYear manufactureYear;
+        ManufactureWeekAndYear manufactureWeekAndYear;
+    }
+
+    // Display name.
+    @utf8InCpp String name;
+
+    // NULL-terminated Manufacturer plug and play ID.
+    byte[] manufacturerPnpId;
+
+    // Manufacturer product ID.
+    @utf8InCpp String productId;
+
+    ManufactureOrModelDate manufactureOrModelDate;
+
+    byte[] relativeAddress;
+}
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/DisplayConnectionType.aidl
similarity index 64%
rename from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
rename to libs/gui/aidl/android/gui/DisplayConnectionType.aidl
index 6929a6c..72c4ede 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/DisplayConnectionType.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,11 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+@Backing(type="int")
+enum DisplayConnectionType {
+   Internal = 0,
+   External = 1
 }
diff --git a/libs/input/android/os/BlockUntrustedTouchesMode.aidl b/libs/gui/aidl/android/gui/DisplayDecorationSupport.aidl
similarity index 60%
rename from libs/input/android/os/BlockUntrustedTouchesMode.aidl
rename to libs/gui/aidl/android/gui/DisplayDecorationSupport.aidl
index 9504e99..0230496 100644
--- a/libs/input/android/os/BlockUntrustedTouchesMode.aidl
+++ b/libs/gui/aidl/android/gui/DisplayDecorationSupport.aidl
@@ -1,5 +1,5 @@
 /**
- * Copyright (c) 2020, The Android Open Source Project
+ * Copyright (c) 2022, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,22 +14,12 @@
  * limitations under the License.
  */
 
-package android.os;
+package android.gui;
 
-
-/**
-  * Block untrusted touches feature mode.
-  *
-  * @hide
-  */
-@Backing(type="int")
-enum BlockUntrustedTouchesMode {
-    /** Feature is off. */
-    DISABLED,
-
-    /** Untrusted touches are flagged but not blocked. */
-    PERMISSIVE,
-
-    /** Untrusted touches are blocked. */
-    BLOCK
+// TODO(b/222607970):
+// remove this aidl and use android.hardware.graphics.common.DisplayDecorationSupport
+/** @hide */
+parcelable DisplayDecorationSupport {
+    int format;
+    int alphaInterpretation;
 }
diff --git a/libs/gui/aidl/android/gui/Rect.aidl b/libs/gui/aidl/android/gui/DisplayMode.aidl
similarity index 60%
copy from libs/gui/aidl/android/gui/Rect.aidl
copy to libs/gui/aidl/android/gui/DisplayMode.aidl
index 1b13761..ce30426 100644
--- a/libs/gui/aidl/android/gui/Rect.aidl
+++ b/libs/gui/aidl/android/gui/DisplayMode.aidl
@@ -16,20 +16,22 @@
 
 package android.gui;
 
-// copied from libs/arect/include/android/rect.h
-// TODO(b/221473398):
-// use hardware/interfaces/graphics/common/aidl/android/hardware/graphics/common/Rect.aidl
+import android.gui.Size;
+
+// Mode supported by physical display.
+// Make sure to sync with libui DisplayMode.h
+
 /** @hide */
-parcelable Rect {
-    /// Minimum X coordinate of the rectangle.
-    int left;
+parcelable DisplayMode {
+    int id;
+    Size resolution;
+    float xDpi = 0.0f;
+    float yDpi = 0.0f;
+    int[] supportedHdrTypes;
 
-    /// Minimum Y coordinate of the rectangle.
-    int top;
-
-    /// Maximum X coordinate of the rectangle.
-    int right;
-
-    /// Maximum Y coordinate of the rectangle.
-    int bottom;
+    float refreshRate = 0.0f;
+    long appVsyncOffset = 0;
+    long sfVsyncOffset = 0;
+    long presentationDeadline = 0;
+    int group = -1;
 }
diff --git a/libs/gui/aidl/android/gui/DisplayModeSpecs.aidl b/libs/gui/aidl/android/gui/DisplayModeSpecs.aidl
new file mode 100644
index 0000000..af138c7
--- /dev/null
+++ b/libs/gui/aidl/android/gui/DisplayModeSpecs.aidl
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+/** @hide */
+parcelable DisplayModeSpecs {
+    /**
+     * Defines the refresh rates ranges that should be used by SF.
+     */
+    parcelable RefreshRateRanges {
+        /**
+         * Defines a range of refresh rates.
+         */
+        parcelable RefreshRateRange {
+            float min;
+            float max;
+        }
+
+        /**
+         *  The range of refresh rates that the display should run at.
+         */
+        RefreshRateRange physical;
+
+        /**
+         *  The range of refresh rates that apps should render at.
+         */
+        RefreshRateRange render;
+    }
+
+    /**
+     * Base mode ID. This is what system defaults to for all other settings, or
+     * if the refresh rate range is not available.
+     */
+    int defaultMode;
+
+    /**
+     * If true this will allow switching between modes in different display configuration
+     * groups. This way the user may see visual interruptions when the display mode changes.
+     */
+
+    boolean allowGroupSwitching;
+
+    /**
+     * The primary physical and render refresh rate ranges represent DisplayManager's general
+     * guidance on the display modes SurfaceFlinger will consider when switching refresh
+     * rates and scheduling the frame rate. Unless SurfaceFlinger has a specific reason to do
+     * otherwise, it will stay within this range.
+     */
+    RefreshRateRanges primaryRanges;
+
+    /**
+     * The app request physical and render refresh rate ranges allow SurfaceFlinger to consider
+     * more display modes when switching refresh rates. Although SurfaceFlinger will
+     * generally stay within the primary range, specific considerations, such as layer frame
+     * rate settings specified via the setFrameRate() API, may cause SurfaceFlinger to go
+     * outside the primary range. SurfaceFlinger never goes outside the app request range.
+     * The app request range will be greater than or equal to the primary refresh rate range,
+     * never smaller.
+     */
+    RefreshRateRanges appRequestRanges;
+}
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/DisplayPrimaries.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/DisplayPrimaries.aidl
index 6929a6c..dbf668c 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/DisplayPrimaries.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,20 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
+// copied from libui ConfigStoreTypes.h
 
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+parcelable DisplayPrimaries {
+    parcelable CieXyz {
+        float X;
+        float Y;
+        float Z;
+    }
+
+    CieXyz red;
+    CieXyz green;
+    CieXyz blue;
+    CieXyz white;
 }
diff --git a/libs/gui/aidl/android/gui/DisplayedFrameStats.aidl b/libs/gui/aidl/android/gui/DisplayedFrameStats.aidl
new file mode 100644
index 0000000..f4b6dad
--- /dev/null
+++ b/libs/gui/aidl/android/gui/DisplayedFrameStats.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+/** @hide */
+parcelable DisplayedFrameStats {
+    /* The number of frames represented by this sample. */
+    long numFrames = 0;
+
+    /* A histogram counting how many times a pixel of a given value was displayed onscreen for
+     * FORMAT_COMPONENT_0. The buckets of the histogram are evenly weighted, the number of buckets
+     * is device specific. eg, for RGBA_8888, if sampleComponent0 is {10, 6, 4, 1} this means that
+     * 10 red pixels were displayed onscreen in range 0x00->0x3F, 6 red pixels
+     * were displayed onscreen in range 0x40->0x7F, etc.
+     */
+    long[] component_0_sample;
+
+    /* The same sample definition as sampleComponent0, but for FORMAT_COMPONENT_1. */
+    long[] component_1_sample;
+
+    /* The same sample definition as sampleComponent0, but for FORMAT_COMPONENT_2. */
+    long[] component_2_sample;
+
+    /* The same sample definition as sampleComponent0, but for FORMAT_COMPONENT_3. */
+    long[] component_3_sample;
+}
diff --git a/libs/gui/aidl/android/gui/DynamicDisplayInfo.aidl b/libs/gui/aidl/android/gui/DynamicDisplayInfo.aidl
new file mode 100644
index 0000000..3114929
--- /dev/null
+++ b/libs/gui/aidl/android/gui/DynamicDisplayInfo.aidl
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+import android.gui.DisplayMode;
+import android.gui.HdrCapabilities;
+
+// Information about a physical display which may change on hotplug reconnect.
+// Make sure to sync with libui DynamicDisplayInfo.h
+
+/** @hide */
+parcelable DynamicDisplayInfo {
+    List<DisplayMode> supportedDisplayModes;
+
+    int activeDisplayModeId;
+    float renderFrameRate;
+
+    int[] supportedColorModes;
+    int activeColorMode;
+    HdrCapabilities hdrCapabilities;
+
+    // True if the display reports support for HDMI 2.1 Auto Low Latency Mode.
+    // For more information, see the HDMI 2.1 specification.
+    boolean autoLowLatencyModeSupported;
+
+    // True if the display reports support for Game Content Type.
+    // For more information, see the HDMI 1.4 specification.
+    boolean gameContentTypeSupported;
+
+    // The boot display mode preferred by the implementation.
+    int preferredBootDisplayMode;
+}
diff --git a/libs/gui/aidl/android/gui/Rect.aidl b/libs/gui/aidl/android/gui/FrameEvent.aidl
similarity index 60%
copy from libs/gui/aidl/android/gui/Rect.aidl
copy to libs/gui/aidl/android/gui/FrameEvent.aidl
index 1b13761..aaabdb5 100644
--- a/libs/gui/aidl/android/gui/Rect.aidl
+++ b/libs/gui/aidl/android/gui/FrameEvent.aidl
@@ -16,20 +16,20 @@
 
 package android.gui;
 
-// copied from libs/arect/include/android/rect.h
-// TODO(b/221473398):
-// use hardware/interfaces/graphics/common/aidl/android/hardware/graphics/common/Rect.aidl
+// Identifiers for all the events that may be recorded or reported.
+
 /** @hide */
-parcelable Rect {
-    /// Minimum X coordinate of the rectangle.
-    int left;
-
-    /// Minimum Y coordinate of the rectangle.
-    int top;
-
-    /// Maximum X coordinate of the rectangle.
-    int right;
-
-    /// Maximum Y coordinate of the rectangle.
-    int bottom;
+@Backing(type="int")
+enum FrameEvent {
+    POSTED = 0,
+    REQUESTED_PRESENT = 1,
+    LATCH = 2,
+    ACQUIRE = 3,
+    FIRST_REFRESH_START = 4,
+    LAST_REFRESH_START = 5,
+    GPU_COMPOSITION_DONE = 6,
+    DISPLAY_PRESENT = 7,
+    DEQUEUE_READY = 8,
+    RELEASE = 9,
+    EVENT_COUNT = 10 // Not an actual event.
 }
diff --git a/libs/gui/aidl/android/gui/FrameStats.aidl b/libs/gui/aidl/android/gui/FrameStats.aidl
new file mode 100644
index 0000000..a145e74
--- /dev/null
+++ b/libs/gui/aidl/android/gui/FrameStats.aidl
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+// Make sure to sync with libui FrameStats.h
+
+/** @hide */
+parcelable FrameStats {
+    /*
+     * Approximate refresh time, in nanoseconds.
+     */
+    long refreshPeriodNano;
+
+    /*
+     * The times in nanoseconds for when the frame contents were posted by the producer (e.g.
+     * the application). They are either explicitly set or defaulted to the time when
+     * Surface::queueBuffer() was called.
+     */
+    long[] desiredPresentTimesNano;
+
+    /*
+     * The times in milliseconds for when the frame contents were presented on the screen.
+     */
+    long[] actualPresentTimesNano;
+
+    /*
+     * The times in nanoseconds for when the frame contents were ready to be presented. Note that
+     * a frame can be posted and still it contents being rendered asynchronously in GL. In such a
+     * case these are the times when the frame contents were completely rendered (i.e. their fences
+     * signaled).
+     */
+    long[] frameReadyTimesNano;
+}
diff --git a/libs/gui/include/gui/FrameTimelineInfo.h b/libs/gui/aidl/android/gui/FrameTimelineInfo.aidl
similarity index 69%
rename from libs/gui/include/gui/FrameTimelineInfo.h
rename to libs/gui/aidl/android/gui/FrameTimelineInfo.aidl
index 255ce56..6a86c6a 100644
--- a/libs/gui/include/gui/FrameTimelineInfo.h
+++ b/libs/gui/aidl/android/gui/FrameTimelineInfo.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright 2021 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,36 +14,27 @@
  * limitations under the License.
  */
 
-#pragma once
+package android.gui;
 
-#include <stdint.h>
-
-#include <binder/Parcel.h>
-
-namespace android {
-
-struct FrameTimelineInfo {
+/** @hide */
+parcelable FrameTimelineInfo {
     // Needs to be in sync with android.graphics.FrameInfo.INVALID_VSYNC_ID in java
-    static constexpr int64_t INVALID_VSYNC_ID = -1;
+    const long INVALID_VSYNC_ID = -1;
 
     // The vsync id that was used to start the transaction
-    int64_t vsyncId = INVALID_VSYNC_ID;
+    long vsyncId = INVALID_VSYNC_ID;
 
     // The id of the input event that caused this buffer
     // Default is android::os::IInputConstants::INVALID_INPUT_EVENT_ID = 0
     // We copy the value of the input event ID instead of including the header, because libgui
     // header libraries containing FrameTimelineInfo must be available to vendors, but libinput is
     // not directly vendor available.
-    int32_t inputEventId = 0;
+    int inputEventId = 0;
 
     // The current time in nanoseconds the application started to render the frame.
-    int64_t startTimeNanos = 0;
+    long startTimeNanos = 0;
 
-    status_t write(Parcel& output) const;
-    status_t read(const Parcel& input);
-
-    void merge(const FrameTimelineInfo& other);
-    void clear();
-};
-
-} // namespace android
+    // Whether this vsyncId should be used to heuristically select the display refresh rate
+    // TODO(b/281695725): Clean this up once TextureView use setFrameRate API
+    boolean useForRefreshRateSelection = false;
+}
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/HdrCapabilities.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/HdrCapabilities.aidl
index 6929a6c..9d06da9 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/HdrCapabilities.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,14 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
+// Make sure to sync with libui HdrCapabilities.h
 
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+parcelable HdrCapabilities {
+    int[] supportedHdrTypes;
+    float maxLuminance;
+    float maxAverageLuminance;
+    float minLuminance;
 }
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/HdrConversionCapability.aidl
similarity index 63%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/HdrConversionCapability.aidl
index 6929a6c..1bcfd38 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/HdrConversionCapability.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,12 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
-}
+// TODO(b/265277221): use android.hardware.graphics.common.HdrConversionCapability.aidl
+/** @hide */
+parcelable HdrConversionCapability {
+    int sourceType;
+    int outputType;
+    boolean addsLatency;
+}
\ No newline at end of file
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/HdrConversionStrategy.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/HdrConversionStrategy.aidl
index 6929a6c..1be74b4 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/HdrConversionStrategy.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,12 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+// TODO(b/265277221): use android.hardware.graphics.common.HdrConversionStrategy.aidl
+/** @hide */
+union HdrConversionStrategy {
+    boolean passthrough = true;
+    int[] autoAllowedHdrTypes;
+    int forceHdrConversion;
 }
diff --git a/libs/gui/aidl/android/gui/IHdrConversionConstants.aidl b/libs/gui/aidl/android/gui/IHdrConversionConstants.aidl
new file mode 100644
index 0000000..7697f29
--- /dev/null
+++ b/libs/gui/aidl/android/gui/IHdrConversionConstants.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+/** @hide */
+interface IHdrConversionConstants
+{
+    /** HDR Conversion Mode when there is no conversion being done */
+    const int HdrConversionModePassthrough = 1;
+
+    /** HDR Conversion Mode when HDR conversion is decided by the system or implementation */
+    const int HdrConversionModeAuto = 2;
+
+    /** HDR Conversion Mode when the output HDR types is selected by the user or framework */
+    const int HdrConversionModeForce = 3;
+}
\ No newline at end of file
diff --git a/libs/gui/aidl/android/gui/IHdrLayerInfoListener.aidl b/libs/gui/aidl/android/gui/IHdrLayerInfoListener.aidl
index fc809c4..e8c36ee 100644
--- a/libs/gui/aidl/android/gui/IHdrLayerInfoListener.aidl
+++ b/libs/gui/aidl/android/gui/IHdrLayerInfoListener.aidl
@@ -19,7 +19,9 @@
 /** @hide */
 oneway interface IHdrLayerInfoListener {
     // Callback with the total number of HDR layers, the dimensions of the largest layer,
-    // and a placeholder flags
+    // a placeholder flags, and the max desired HDR/SDR ratio. The max desired HDR/SDR
+    // ratio may be positive infinity to indicate an unbounded ratio.
     // TODO (b/182312559): Define the flags (likely need an indicator that a UDFPS layer is present)
-    void onHdrLayerInfoChanged(int numberOfHdrLayers, int maxW, int maxH, int flags);
+    void onHdrLayerInfoChanged(int numberOfHdrLayers, int maxW, int maxH,
+            int flags, float maxDesiredHdrSdrRatio);
 }
\ No newline at end of file
diff --git a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
index b31b37b..ec3266c 100644
--- a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
+++ b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
@@ -16,39 +16,115 @@
 
 package android.gui;
 
-import android.gui.DisplayCaptureArgs;
+import android.gui.Color;
+import android.gui.CompositionPreference;
+import android.gui.ContentSamplingAttributes;
 import android.gui.DisplayBrightness;
+import android.gui.DisplayCaptureArgs;
+import android.gui.DisplayDecorationSupport;
+import android.gui.DisplayedFrameStats;
+import android.gui.DisplayModeSpecs;
+import android.gui.DisplayPrimaries;
 import android.gui.DisplayState;
 import android.gui.DisplayStatInfo;
+import android.gui.DynamicDisplayInfo;
+import android.gui.FrameEvent;
+import android.gui.FrameStats;
+import android.gui.HdrConversionCapability;
+import android.gui.HdrConversionStrategy;
+import android.gui.IDisplayEventConnection;
+import android.gui.IFpsListener;
 import android.gui.IHdrLayerInfoListener;
-import android.gui.LayerCaptureArgs;
+import android.gui.IRegionSamplingListener;
 import android.gui.IScreenCaptureListener;
+import android.gui.ISurfaceComposerClient;
+import android.gui.ITunnelModeEnabledListener;
+import android.gui.IWindowInfosListener;
+import android.gui.LayerCaptureArgs;
+import android.gui.LayerDebugInfo;
+import android.gui.OverlayProperties;
+import android.gui.PullAtomData;
+import android.gui.ARect;
+import android.gui.StaticDisplayInfo;
 
 /** @hide */
 interface ISurfaceComposer {
 
-    /* create a virtual display
+    enum VsyncSource {
+        eVsyncSourceApp = 0,
+        eVsyncSourceSurfaceFlinger = 1
+    }
+
+    enum EventRegistration {
+        modeChanged = 1 << 0,
+        frameRateOverride = 1 << 1,
+    }
+
+    /**
+     * Signal that we're done booting.
+     * Requires ACCESS_SURFACE_FLINGER permission
+     */
+    void bootFinished();
+
+    /**
+     * Create a display event connection
+     *
+     * layerHandle
+     *     Optional binder handle representing a Layer in SF to associate the new
+     *     DisplayEventConnection with. This handle can be found inside a surface control after
+     *     surface creation, see ISurfaceComposerClient::createSurface. Set to null if no layer
+     *     association should be made.
+     */
+    @nullable IDisplayEventConnection createDisplayEventConnection(VsyncSource vsyncSource,
+            EventRegistration eventRegistration, @nullable IBinder layerHandle);
+
+    /**
+     * Create a connection with SurfaceFlinger.
+     */
+    @nullable ISurfaceComposerClient createConnection();
+
+    /**
+     * Create a virtual display
+     *
+     * displayName
+     *     The name of the virtual display
+     * secure
+     *     Whether this virtual display is secure
+     * requestedRefreshRate
+     *     The refresh rate, frames per second, to request on the virtual display.
+     *     This is just a request, the actual rate may be adjusted to align well
+     *     with physical displays running concurrently. If 0 is specified, the
+     *     virtual display is refreshed at the physical display refresh rate.
+     *
      * requires ACCESS_SURFACE_FLINGER permission.
      */
-    @nullable IBinder createDisplay(@utf8InCpp String displayName, boolean secure);
+    @nullable IBinder createDisplay(@utf8InCpp String displayName, boolean secure,
+            float requestedRefreshRate);
 
-    /* destroy a virtual display
+    /**
+     * Destroy a virtual display
      * requires ACCESS_SURFACE_FLINGER permission.
      */
     void destroyDisplay(IBinder display);
 
-    /* get stable IDs for connected physical displays.
+    /**
+     * Get stable IDs for connected physical displays.
      */
     long[] getPhysicalDisplayIds();
 
-    long getPrimaryPhysicalDisplayId();
-
-    /* get token for a physical display given its stable ID obtained via getPhysicalDisplayIds or a
-     * DisplayEventReceiver hotplug event.
+    /**
+     * Get token for a physical display given its stable ID obtained via getPhysicalDisplayIds or
+     * a DisplayEventReceiver hotplug event.
      */
     @nullable IBinder getPhysicalDisplayToken(long displayId);
 
-    /* set display power mode. depending on the mode, it can either trigger
+    /**
+     * Returns the frame timestamps supported by SurfaceFlinger.
+     */
+    FrameEvent[] getSupportedFrameTimestamps();
+
+    /**
+     * Set display power mode. depending on the mode, it can either trigger
      * screen on, off or low power mode and wait for it to complete.
      * requires ACCESS_SURFACE_FLINGER permission.
      */
@@ -60,12 +136,33 @@
      * video frames */
     DisplayStatInfo getDisplayStats(@nullable IBinder display);
 
-     /**
+    /**
      * Get transactional state of given display.
      */
     DisplayState getDisplayState(IBinder display);
 
     /**
+     * Gets immutable information about given physical display.
+     */
+    StaticDisplayInfo getStaticDisplayInfo(long displayId);
+
+    /**
+     * Gets dynamic information about given physical display.
+     */
+    DynamicDisplayInfo getDynamicDisplayInfoFromId(long displayId);
+
+    DynamicDisplayInfo getDynamicDisplayInfoFromToken(IBinder display);
+
+    DisplayPrimaries getDisplayNativePrimaries(IBinder display);
+
+    void setActiveColorMode(IBinder display, int colorMode);
+
+    /**
+     * Sets the user-preferred display mode that a device should boot in.
+     */
+    void setBootDisplayMode(IBinder display, int displayModeId);
+
+    /**
      * Clears the user-preferred display mode. The device should now boot in system preferred
      * display mode.
      */
@@ -85,6 +182,28 @@
     boolean getBootDisplayModeSupport();
 
     /**
+     * Gets the HDR conversion capabilities of the device. The conversion capability defines whether
+     * conversion from sourceType to outputType is possible (with or without latency).
+     *
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+     List<HdrConversionCapability> getHdrConversionCapabilities();
+
+     /**
+      * Sets the HDR conversion strategy of the device.
+      * Returns the preferred HDR output type of the device, in case when HdrConversionStrategy has
+      * autoAllowedHdrTypes set. Returns Hdr::INVALID in other cases.
+      *
+      * Requires the ACCESS_SURFACE_FLINGER permission.
+      */
+     int setHdrConversionStrategy(in HdrConversionStrategy hdrConversionStrategy);
+
+     /**
+      * Gets whether HDR output conversion operations are supported on the device.
+      */
+     boolean getHdrOutputConversionSupport();
+
+    /**
      * Switches Auto Low Latency Mode on/off on the connected display, if it is
      * available. This should only be called if the display supports Auto Low
      * Latency Mode as reported in #getDynamicDisplayInfo.
@@ -110,7 +229,13 @@
      * match the size of the output buffer.
      */
     void captureDisplay(in DisplayCaptureArgs args, IScreenCaptureListener listener);
+
+    /**
+     * Capture the specified screen. This requires the READ_FRAME_BUFFER
+     * permission.
+     */
     void captureDisplayById(long displayId, IScreenCaptureListener listener);
+
     /**
      * Capture a subtree of the layer hierarchy, potentially ignoring the root node.
      * This requires READ_FRAME_BUFFER permission. This function will fail if there
@@ -118,13 +243,143 @@
      */
     void captureLayers(in LayerCaptureArgs args, IScreenCaptureListener listener);
 
-    /*
+    /**
+     * Clears the frame statistics for animations.
+     *
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+    void clearAnimationFrameStats();
+
+    /**
+     * Gets the frame statistics for animations.
+     *
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+    FrameStats getAnimationFrameStats();
+
+    /**
+     * Overrides the supported HDR modes for the given display device.
+     *
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+    void overrideHdrTypes(IBinder display, in int[] hdrTypes);
+
+    /**
+     * Pulls surfaceflinger atoms global stats and layer stats to pipe to statsd.
+     *
+     * Requires the calling uid be from system server.
+     */
+    PullAtomData onPullAtom(int atomId);
+
+    /**
+     * Gets the list of active layers in Z order for debugging purposes
+     *
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+    List<LayerDebugInfo> getLayerDebugInfo();
+
+    boolean getColorManagement();
+
+    /**
+     * Gets the composition preference of the default data space and default pixel format,
+     * as well as the wide color gamut data space and wide color gamut pixel format.
+     * If the wide color gamut data space is V0_SRGB, then it implies that the platform
+     * has no wide color gamut support.
+     *
+     */
+    CompositionPreference getCompositionPreference();
+
+    /**
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+    ContentSamplingAttributes getDisplayedContentSamplingAttributes(IBinder display);
+
+    /**
+     * Turns on the color sampling engine on the display.
+     *
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+    void setDisplayContentSamplingEnabled(IBinder display, boolean enable, byte componentMask, long maxFrames);
+
+    /**
+     * Returns statistics on the color profile of the last frame displayed for a given display
+     *
+     * Requires the ACCESS_SURFACE_FLINGER permission.
+     */
+    DisplayedFrameStats getDisplayedContentSample(IBinder display, long maxFrames, long timestamp);
+
+    /**
+     * Gets whether SurfaceFlinger can support protected content in GPU composition.
+     */
+    boolean getProtectedContentSupport();
+
+    /**
      * Queries whether the given display is a wide color display.
      * Requires the ACCESS_SURFACE_FLINGER permission.
      */
     boolean isWideColorDisplay(IBinder token);
 
-    /*
+    /**
+     * Registers a listener to stream median luma updates from SurfaceFlinger.
+     *
+     * The sampling area is bounded by both samplingArea and the given stopLayerHandle
+     * (i.e., only layers behind the stop layer will be captured and sampled).
+     *
+     * Multiple listeners may be provided so long as they have independent listeners.
+     * If multiple listeners are provided, the effective sampling region for each listener will
+     * be bounded by whichever stop layer has a lower Z value.
+     *
+     * Requires the same permissions as captureLayers and captureScreen.
+     */
+    void addRegionSamplingListener(in ARect samplingArea, @nullable IBinder stopLayerHandle, IRegionSamplingListener listener);
+
+    /**
+     * Removes a listener that was streaming median luma updates from SurfaceFlinger.
+     */
+    void removeRegionSamplingListener(IRegionSamplingListener listener);
+
+    /**
+     * Registers a listener that streams fps updates from SurfaceFlinger.
+     *
+     * The listener will stream fps updates for the layer tree rooted at the layer denoted by the
+     * task ID, i.e., the layer must have the task ID as part of its layer metadata with key
+     * METADATA_TASK_ID. If there is no such layer, then no fps is expected to be reported.
+     *
+     * Multiple listeners may be supported.
+     *
+     * Requires the READ_FRAME_BUFFER permission.
+     */
+    void addFpsListener(int taskId, IFpsListener listener);
+
+    /**
+     * Removes a listener that was streaming fps updates from SurfaceFlinger.
+     */
+    void removeFpsListener(IFpsListener listener);
+
+    /**
+     * Registers a listener to receive tunnel mode enabled updates from SurfaceFlinger.
+     *
+     * Requires ACCESS_SURFACE_FLINGER permission.
+     */
+    void addTunnelModeEnabledListener(ITunnelModeEnabledListener listener);
+
+    /**
+     * Removes a listener that was receiving tunnel mode enabled updates from SurfaceFlinger.
+     *
+     * Requires ACCESS_SURFACE_FLINGER permission.
+     */
+    void removeTunnelModeEnabledListener(ITunnelModeEnabledListener listener);
+
+    /**
+     * Sets the refresh rate boundaries for the display.
+     *
+     * @see DisplayModeSpecs.aidl for details.
+     */
+    void setDesiredDisplayModeSpecs(IBinder displayToken, in DisplayModeSpecs specs);
+
+    DisplayModeSpecs getDesiredDisplayModeSpecs(IBinder displayToken);
+
+    /**
      * Gets whether brightness operations are supported on a display.
      *
      * displayToken
@@ -138,7 +393,7 @@
      */
     boolean getDisplayBrightnessSupport(IBinder displayToken);
 
-    /*
+    /**
      * Sets the brightness of a display.
      *
      * displayToken
@@ -153,7 +408,7 @@
      */
     void setDisplayBrightness(IBinder displayToken, in DisplayBrightness brightness);
 
-    /*
+    /**
      * Adds a listener that receives HDR layer information. This is used in combination
      * with setDisplayBrightness to adjust the display brightness depending on factors such
      * as whether or not HDR is in use.
@@ -162,7 +417,7 @@
      */
     void addHdrLayerInfoListener(IBinder displayToken, IHdrLayerInfoListener listener);
 
-    /*
+    /**
      * Removes a listener that was added with addHdrLayerInfoListener.
      *
      * Returns NO_ERROR upon success, NAME_NOT_FOUND if the display is invalid, and BAD_VALUE if
@@ -171,7 +426,7 @@
      */
     void removeHdrLayerInfoListener(IBinder displayToken, IHdrLayerInfoListener listener);
 
-    /*
+    /**
      * Sends a power boost to the composer. This function is asynchronous.
      *
      * boostId
@@ -179,5 +434,75 @@
      *
      * Returns NO_ERROR upon success.
      */
-    void notifyPowerBoost(int boostId);
+    oneway void notifyPowerBoost(int boostId);
+
+    /*
+     * Sets the global configuration for all the shadows drawn by SurfaceFlinger. Shadow follows
+     * material design guidelines.
+     *
+     * ambientColor
+     *      Color to the ambient shadow. The alpha is premultiplied.
+     *
+     * spotColor
+     *      Color to the spot shadow. The alpha is premultiplied. The position of the spot shadow
+     *      depends on the light position.
+     *
+     * lightPosY/lightPosZ
+     *      Position of the light used to cast the spot shadow. The X value is always the display
+     *      width / 2.
+     *
+     * lightRadius
+     *      Radius of the light casting the shadow.
+     */
+    oneway void setGlobalShadowSettings(in Color ambientColor, in Color spotColor, float lightPosY, float lightPosZ, float lightRadius);
+
+    /**
+     * Gets whether a display supports DISPLAY_DECORATION layers.
+     *
+     * displayToken
+     *      The token of the display.
+     * outSupport
+     *      An output parameter for whether/how the display supports
+     *      DISPLAY_DECORATION layers.
+     *
+     * Returns NO_ERROR upon success. Otherwise,
+     *      NAME_NOT_FOUND if the display is invalid, or
+     *      BAD_VALUE      if the output parameter is invalid.
+     */
+    @nullable DisplayDecorationSupport getDisplayDecorationSupport(IBinder displayToken);
+
+    /**
+     * Set the override frame rate for a specified uid by GameManagerService.
+     * Passing the frame rate and uid to SurfaceFlinger to update the override mapping
+     * in the scheduler.
+     */
+    void setOverrideFrameRate(int uid, float frameRate);
+
+    /**
+     * Gets priority of the RenderEngine in SurfaceFlinger.
+     */
+    int getGpuContextPriority();
+
+    /**
+     * Gets the number of buffers SurfaceFlinger would need acquire. This number
+     * would be propagated to the client via MIN_UNDEQUEUED_BUFFERS so that the
+     * client could allocate enough buffers to match SF expectations of the
+     * pipeline depth. SurfaceFlinger will make sure that it will give the app at
+     * least the time configured as the 'appDuration' before trying to latch
+     * the buffer.
+     *
+     * The total buffers needed for a given configuration is basically the
+     * numbers of vsyncs a single buffer is used across the stack. For the default
+     * configuration a buffer is held ~1 vsync by the app, ~1 vsync by SurfaceFlinger
+     * and 1 vsync by the display. The extra buffers are calculated as the
+     * number of additional buffers on top of the 2 buffers already present
+     * in MIN_UNDEQUEUED_BUFFERS.
+     */
+    int getMaxAcquiredBufferCount();
+
+    void addWindowInfosListener(IWindowInfosListener windowInfosListener);
+
+    void removeWindowInfosListener(IWindowInfosListener windowInfosListener);
+
+    OverlayProperties getOverlaySupport();
 }
diff --git a/libs/gui/aidl/android/gui/ISurfaceComposerClient.aidl b/libs/gui/aidl/android/gui/ISurfaceComposerClient.aidl
new file mode 100644
index 0000000..68781ce
--- /dev/null
+++ b/libs/gui/aidl/android/gui/ISurfaceComposerClient.aidl
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+import android.gui.CreateSurfaceResult;
+import android.gui.FrameStats;
+import android.gui.LayerMetadata;
+
+/** @hide */
+interface ISurfaceComposerClient {
+
+    // flags for createSurface()
+    // (keep in sync with SurfaceControl.java)
+    const int eHidden = 0x00000004;
+    const int eDestroyBackbuffer = 0x00000020;
+    const int eSkipScreenshot = 0x00000040;
+    const int eSecure = 0x00000080;
+    const int eNonPremultiplied = 0x00000100;
+    const int eOpaque = 0x00000400;
+    const int eProtectedByApp = 0x00000800;
+    const int eProtectedByDRM = 0x00001000;
+    const int eCursorWindow = 0x00002000;
+    const int eNoColorFill = 0x00004000;
+
+    const int eFXSurfaceBufferQueue = 0x00000000;
+    const int eFXSurfaceEffect = 0x00020000;
+    const int eFXSurfaceBufferState = 0x00040000;
+    const int eFXSurfaceContainer = 0x00080000;
+    const int eFXSurfaceMask = 0x000F0000;
+
+    /**
+     * Requires ACCESS_SURFACE_FLINGER permission
+     */
+    CreateSurfaceResult createSurface(@utf8InCpp String name, int flags, @nullable IBinder parent, in LayerMetadata metadata);
+
+    /**
+     * Requires ACCESS_SURFACE_FLINGER permission
+     */
+    void clearLayerFrameStats(IBinder handle);
+
+    /**
+     * Requires ACCESS_SURFACE_FLINGER permission
+     */
+    FrameStats getLayerFrameStats(IBinder handle);
+
+    CreateSurfaceResult mirrorSurface(IBinder mirrorFromHandle);
+
+    CreateSurfaceResult mirrorDisplay(long displayId);
+}
diff --git a/libs/gui/aidl/android/gui/ITransactionTraceListener.aidl b/libs/gui/aidl/android/gui/ITransactionTraceListener.aidl
deleted file mode 100644
index 5cd12fd..0000000
--- a/libs/gui/aidl/android/gui/ITransactionTraceListener.aidl
+++ /dev/null
@@ -1,6 +0,0 @@
-package android.gui;
-
-/** @hide */
-interface ITransactionTraceListener {
-   void onToggled(boolean enabled);
-}
\ No newline at end of file
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/LayerDebugInfo.aidl
similarity index 63%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/LayerDebugInfo.aidl
index 6929a6c..faca980 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/LayerDebugInfo.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,6 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
-}
+parcelable LayerDebugInfo cpp_header "gui/LayerDebugInfo.h";
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/LayerMetadata.aidl
similarity index 63%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/LayerMetadata.aidl
index 6929a6c..1368ac5 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/LayerMetadata.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,6 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
-}
+parcelable LayerMetadata cpp_header "gui/LayerMetadata.h";
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/OverlayProperties.aidl
similarity index 61%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/OverlayProperties.aidl
index 6929a6c..5fb1a83 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/OverlayProperties.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,17 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
+/** @hide */
+parcelable OverlayProperties {
+    parcelable SupportedBufferCombinations {
+        int[] pixelFormats;
+        int[] standards;
+        int[] transfers;
+        int[] ranges;
+    }
+    SupportedBufferCombinations[] combinations;
 
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+    boolean supportMixedColorSpaces;
 }
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/PullAtomData.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/PullAtomData.aidl
index 6929a6c..c307cef 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/PullAtomData.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,10 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
-
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+/** @hide */
+parcelable PullAtomData {
+    byte[] data;
+    boolean success;
 }
diff --git a/libs/gui/aidl/android/gui/Rect.aidl b/libs/gui/aidl/android/gui/StaticDisplayInfo.aidl
similarity index 60%
copy from libs/gui/aidl/android/gui/Rect.aidl
copy to libs/gui/aidl/android/gui/StaticDisplayInfo.aidl
index 1b13761..0ccda56 100644
--- a/libs/gui/aidl/android/gui/Rect.aidl
+++ b/libs/gui/aidl/android/gui/StaticDisplayInfo.aidl
@@ -16,20 +16,15 @@
 
 package android.gui;
 
-// copied from libs/arect/include/android/rect.h
-// TODO(b/221473398):
-// use hardware/interfaces/graphics/common/aidl/android/hardware/graphics/common/Rect.aidl
+import android.gui.DisplayConnectionType;
+import android.gui.DeviceProductInfo;
+import android.gui.Rotation;
+
 /** @hide */
-parcelable Rect {
-    /// Minimum X coordinate of the rectangle.
-    int left;
-
-    /// Minimum Y coordinate of the rectangle.
-    int top;
-
-    /// Maximum X coordinate of the rectangle.
-    int right;
-
-    /// Maximum Y coordinate of the rectangle.
-    int bottom;
+parcelable StaticDisplayInfo {
+    DisplayConnectionType connectionType = DisplayConnectionType.Internal;
+    float density;
+    boolean secure;
+    @nullable DeviceProductInfo deviceProductInfo;
+    Rotation installOrientation = Rotation.Rotation0;
 }
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/aidl/android/gui/TrustedPresentationThresholds.aidl
similarity index 64%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/aidl/android/gui/TrustedPresentationThresholds.aidl
index 6929a6c..1eea5b4 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/aidl/android/gui/TrustedPresentationThresholds.aidl
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright 2022 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,11 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+package android.gui;
 
-import android.content.pm.PackageChangeEvent;
+parcelable TrustedPresentationThresholds {
+    float minAlpha = -1.0f;
+    float minFractionRendered = -1.0f;
 
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
+    int stabilityRequirementMs = 0;
 }
diff --git a/libs/gui/android/gui/FocusRequest.aidl b/libs/gui/android/gui/FocusRequest.aidl
index b13c600..62d1b68 100644
--- a/libs/gui/android/gui/FocusRequest.aidl
+++ b/libs/gui/android/gui/FocusRequest.aidl
@@ -24,15 +24,6 @@
     @nullable IBinder token;
     @utf8InCpp String windowName;
     /**
-     * The token that the caller expects currently to be focused. If the
-     * specified token does not match the currently focused window, this request will be dropped.
-     * If the specified focused token matches the currently focused window, the call will succeed.
-     * Set this to "null" if this call should succeed no matter what the currently focused token
-     * is.
-     */
-    @nullable IBinder focusedToken;
-    @utf8InCpp String focusedWindowName;
-    /**
      * SYSTEM_TIME_MONOTONIC timestamp in nanos set by the client (wm) when requesting the focus
      * change. This determines which request gets precedence if there is a focus change request
      * from another source such as pointer down.
diff --git a/libs/gui/android/gui/IWindowInfosListener.aidl b/libs/gui/android/gui/IWindowInfosListener.aidl
index a5b2762..400229d 100644
--- a/libs/gui/android/gui/IWindowInfosListener.aidl
+++ b/libs/gui/android/gui/IWindowInfosListener.aidl
@@ -16,12 +16,11 @@
 
 package android.gui;
 
-import android.gui.DisplayInfo;
 import android.gui.IWindowInfosReportedListener;
-import android.gui.WindowInfo;
+import android.gui.WindowInfosUpdate;
 
 /** @hide */
-oneway interface IWindowInfosListener
-{
-    void onWindowInfosChanged(in WindowInfo[] windowInfos, in DisplayInfo[] displayInfos, in @nullable IWindowInfosReportedListener windowInfosReportedListener);
+oneway interface IWindowInfosListener {
+    void onWindowInfosChanged(
+        in WindowInfosUpdate update, in @nullable IWindowInfosReportedListener windowInfosReportedListener);
 }
diff --git a/libs/gui/android/gui/WindowInfosUpdate.aidl b/libs/gui/android/gui/WindowInfosUpdate.aidl
new file mode 100644
index 0000000..0c6109d
--- /dev/null
+++ b/libs/gui/android/gui/WindowInfosUpdate.aidl
@@ -0,0 +1,22 @@
+/*
+** Copyright 2023, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+package android.gui;
+
+import android.gui.DisplayInfo;
+import android.gui.WindowInfo;
+
+parcelable WindowInfosUpdate cpp_header "gui/WindowInfosUpdate.h";
diff --git a/libs/gui/fuzzer/Android.bp b/libs/gui/fuzzer/Android.bp
new file mode 100644
index 0000000..82e1b5a
--- /dev/null
+++ b/libs/gui/fuzzer/Android.bp
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_native_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_native_license"],
+}
+
+cc_defaults {
+    name: "libgui_fuzzer_defaults",
+    static_libs: [
+        "android.hidl.token@1.0-utils",
+        "libbinder_random_parcel",
+        "libgui_aidl_static",
+        "libgui_window_info_static",
+        "libpdx",
+        "libgmock",
+        "libgui_mocks",
+        "libgmock_ndk",
+        "libgmock_main",
+        "libgtest_ndk_c++",
+        "libgmock_main_ndk",
+        "librenderengine_mocks",
+        "perfetto_trace_protos",
+        "libcompositionengine_mocks",
+        "perfetto_trace_protos",
+    ],
+    shared_libs: [
+        "android.hardware.configstore@1.0",
+        "android.hardware.configstore-utils",
+        "android.hardware.graphics.bufferqueue@1.0",
+        "android.hardware.graphics.bufferqueue@2.0",
+        "android.hardware.power-V4-cpp",
+        "android.hidl.token@1.0",
+        "libSurfaceFlingerProp",
+        "libgui",
+        "libbase",
+        "liblog",
+        "libEGL",
+        "libGLESv2",
+        "libbinder",
+        "libcutils",
+        "libhidlbase",
+        "libinput",
+        "libui",
+        "libutils",
+        "libnativewindow",
+        "libvndksupport",
+    ],
+    header_libs: [
+        "libdvr_headers",
+        "libui_fuzzableDataspaces_headers",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
+
+cc_fuzz {
+    name: "libgui_surfaceComposer_fuzzer",
+    srcs: [
+        "libgui_surfaceComposer_fuzzer.cpp",
+    ],
+    defaults: [
+        "libgui_fuzzer_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "libgui_surfaceComposerClient_fuzzer",
+    srcs: [
+        "libgui_surfaceComposerClient_fuzzer.cpp",
+    ],
+    defaults: [
+        "libgui_fuzzer_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "libgui_parcelable_fuzzer",
+    srcs: [
+        "libgui_parcelable_fuzzer.cpp",
+    ],
+    defaults: [
+        "libgui_fuzzer_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "libgui_bufferQueue_fuzzer",
+    srcs: [
+        "libgui_bufferQueue_fuzzer.cpp",
+    ],
+    defaults: [
+        "libgui_fuzzer_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "libgui_consumer_fuzzer",
+    srcs: [
+        "libgui_consumer_fuzzer.cpp",
+    ],
+    defaults: [
+        "libgui_fuzzer_defaults",
+    ],
+}
+
+cc_fuzz {
+    name: "libgui_displayEvent_fuzzer",
+    srcs: [
+        "libgui_displayEvent_fuzzer.cpp",
+    ],
+    defaults: [
+        "libgui_fuzzer_defaults",
+    ],
+}
diff --git a/libs/gui/fuzzer/README.md b/libs/gui/fuzzer/README.md
new file mode 100644
index 0000000..96e27c9
--- /dev/null
+++ b/libs/gui/fuzzer/README.md
@@ -0,0 +1,219 @@
+# Fuzzers for Libgui
+
+## Table of contents
++ [libgui_surfaceComposer_fuzzer](#SurfaceComposer)
++ [libgui_surfaceComposerClient_fuzzer](#SurfaceComposerClient)
++ [libgui_parcelable_fuzzer](#Libgui_Parcelable)
++ [libgui_bufferQueue_fuzzer](#BufferQueue)
++ [libgui_consumer_fuzzer](#Libgui_Consumer)
++ [libgui_displayEvent_fuzzer](#LibGui_DisplayEvent)
+
+# <a name="libgui_surfaceComposer_fuzzer"></a> Fuzzer for SurfaceComposer
+
+SurfaceComposer supports the following parameters:
+1. SurfaceWidth (parameter name:`width`)
+2. SurfaceHeight (parameter name:`height`)
+3. TransactionStateFlags (parameter name:`flags`)
+4. TransformHint (parameter name:`outTransformHint`)
+5. SurfacePixelFormat (parameter name:`format`)
+6. LayerId (parameter name:`outLayerId`)
+7. SurfaceComposerTags (parameter name:`surfaceTag`)
+8. PowerBoostID (parameter name:`boostId`)
+9. VsyncSource (parameter name:`vsyncSource`)
+10. EventRegistrationFlags (parameter name:`eventRegistration`)
+11. FrameRateCompatibility (parameter name:`frameRateCompatibility`)
+12. ChangeFrameRateStrategy (parameter name:`changeFrameRateStrategy`)
+13. HdrTypes (parameter name:`hdrTypes`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+|`surfaceTag` | 0.`BnSurfaceComposer::BOOT_FINISHED`, 1.`BnSurfaceComposer::CREATE_CONNECTION`, 2.`BnSurfaceComposer::GET_STATIC_DISPLAY_INFO`, 3.`BnSurfaceComposer::CREATE_DISPLAY_EVENT_CONNECTION`, 4.`BnSurfaceComposer::CREATE_DISPLAY`, 5.`BnSurfaceComposer::DESTROY_DISPLAY`, 6.`BnSurfaceComposer::GET_PHYSICAL_DISPLAY_TOKEN`, 7.`BnSurfaceComposer::SET_TRANSACTION_STATE`, 8.`BnSurfaceComposer::AUTHENTICATE_SURFACE`, 9.`BnSurfaceComposer::GET_SUPPORTED_FRAME_TIMESTAMPS`, 10.`BnSurfaceComposer::GET_DISPLAY_STATE`, 11.`BnSurfaceComposer::CAPTURE_DISPLAY`, 12.`BnSurfaceComposer::CAPTURE_LAYERS`, 13.`BnSurfaceComposer::CLEAR_ANIMATION_FRAME_STATS`, 14.`BnSurfaceComposer::GET_ANIMATION_FRAME_STATS`, 15.`BnSurfaceComposer::SET_POWER_MODE`, 16.`BnSurfaceComposer::GET_DISPLAY_STATS`, 17.`BnSurfaceComposer::SET_ACTIVE_COLOR_MODE`, 18.`BnSurfaceComposer::ENABLE_VSYNC_INJECTIONS`, 19.`BnSurfaceComposer::INJECT_VSYNC`, 20.`BnSurfaceComposer::GET_LAYER_DEBUG_INFO`, 21.`BnSurfaceComposer::GET_COMPOSITION_PREFERENCE`, 22.`BnSurfaceComposer::GET_COLOR_MANAGEMENT`, 23.`BnSurfaceComposer::GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES`, 24.`BnSurfaceComposer::SET_DISPLAY_CONTENT_SAMPLING_ENABLED`, 25.`BnSurfaceComposer::GET_DISPLAYED_CONTENT_SAMPLE`, 26.`BnSurfaceComposer::GET_PROTECTED_CONTENT_SUPPORT`, 27.`BnSurfaceComposer::IS_WIDE_COLOR_DISPLAY`, 28.`BnSurfaceComposer::GET_DISPLAY_NATIVE_PRIMARIES`, 29.`BnSurfaceComposer::GET_PHYSICAL_DISPLAY_IDS`, 30.`BnSurfaceComposer::ADD_REGION_SAMPLING_LISTENER`, 31.`BnSurfaceComposer::REMOVE_REGION_SAMPLING_LISTENER`, 32.`BnSurfaceComposer::SET_DESIRED_DISPLAY_MODE_SPECS`, 33.`BnSurfaceComposer::GET_DESIRED_DISPLAY_MODE_SPECS`, 34.`BnSurfaceComposer::GET_DISPLAY_BRIGHTNESS_SUPPORT`, 35.`BnSurfaceComposer::SET_DISPLAY_BRIGHTNESS`, 36.`BnSurfaceComposer::CAPTURE_DISPLAY_BY_ID`, 37.`BnSurfaceComposer::NOTIFY_POWER_BOOST`, 38.`BnSurfaceComposer::SET_GLOBAL_SHADOW_SETTINGS`, 39.`BnSurfaceComposer::SET_AUTO_LOW_LATENCY_MODE`, 40.`BnSurfaceComposer::SET_GAME_CONTENT_TYPE`, 41.`BnSurfaceComposer::SET_FRAME_RATE`, 42.`BnSurfaceComposer::ACQUIRE_FRAME_RATE_FLEXIBILITY_TOKEN`, 43.`BnSurfaceComposer::SET_FRAME_TIMELINE_INFO`, 44.`BnSurfaceComposer::ADD_TRANSACTION_TRACE_LISTENER`, 45.`BnSurfaceComposer::GET_GPU_CONTEXT_PRIORITY`, 46.`BnSurfaceComposer::GET_MAX_ACQUIRED_BUFFER_COUNT`, 47.`BnSurfaceComposer::GET_DYNAMIC_DISPLAY_INFO`, 48.`BnSurfaceComposer::ADD_FPS_LISTENER`, 49.`BnSurfaceComposer::REMOVE_FPS_LISTENER`, 50.`BnSurfaceComposer::OVERRIDE_HDR_TYPES`, 51.`BnSurfaceComposer::ADD_HDR_LAYER_INFO_LISTENER`, 52.`BnSurfaceComposer::REMOVE_HDR_LAYER_INFO_LISTENER`, 53.`BnSurfaceComposer::ON_PULL_ATOM`, 54.`BnSurfaceComposer::ADD_TUNNEL_MODE_ENABLED_LISTENER`, 55.`BnSurfaceComposer::REMOVE_TUNNEL_MODE_ENABLED_LISTENER` | Value obtained from FuzzedDataProvider|
+|`boostId`| 0.`hardware::power::Boost::INTERACTION`, 1.`hardware::power::Boost::DISPLAY_UPDATE_IMMINENT`, 2.`hardware::power::Boost::ML_ACC`, 3.`hardware::power::Boost::AUDIO_LAUNCH`, 4.`hardware::power::Boost::CAMERA_LAUNCH`, 5.`hardware::power::Boost::CAMERA_SHOT` |Value obtained from FuzzedDataProvider|
+|`vsyncSource`| 0.`ISurfaceComposer::eVsyncSourceApp`, 1.`ISurfaceComposer::eVsyncSourceSurfaceFlinger`, |Value obtained from FuzzedDataProvider|
+|`eventRegistration`| 0.`ISurfaceComposer::EventRegistration::modeChanged`, 1.`ISurfaceComposer::EventRegistration::frameRateOverride` |Value obtained from FuzzedDataProvider|
+|`frameRateCompatibility`| 0.`ANATIVEWINDOW_FRAME_RATE_COMPATIBILITY_DEFAULT`, 1.`ANATIVEWINDOW_FRAME_RATE_COMPATIBILITY_FIXED_SOURCE` |Value obtained from FuzzedDataProvider|
+|`changeFrameRateStrategy`| 0.`ANATIVEWINDOW_CHANGE_FRAME_RATE_ONLY_IF_SEAMLESS`, 1.`ANATIVEWINDOW_CHANGE_FRAME_RATE_ALWAYS` |Value obtained from FuzzedDataProvider|
+|`hdrTypes`| 0.`ui::Hdr::DOLBY_VISION`, 1.`ui::Hdr::HDR10`, 2.`ui::Hdr::HLG`, 3.`ui::Hdr::HDR10_PLUS` |Value obtained from FuzzedDataProvider|
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) libgui_surfaceComposer_fuzzer
+```
+2. Run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libgui_surfaceComposer_fuzzer/libgui_surfaceComposer_fuzzer
+```
+
+# <a name="libgui_surfaceComposerClient_fuzzer"></a> Fuzzer for SurfaceComposerClient
+
+SurfaceComposerClient supports the following data sources:
+1. SurfaceWidth (parameter name:`width`)
+2. SurfaceHeight (parameter name:`height`)
+3. TransactionStateFlags (parameter name:`flags`)
+4. TransformHint (parameter name:`outTransformHint`)
+5. SurfacePixelFormat (parameter name:`format`)
+6. LayerId (parameter name:`outLayerId`)
+7. SurfaceComposerClientTags (parameter name:`surfaceTag`)
+8. DefaultMode (parameter name:`defaultMode`)
+9. PrimaryRefreshRateMin (parameter name:`primaryRefreshRateMin`)
+10. PrimaryRefreshRateMax (parameter name:`primaryRefreshRateMax`)
+11. AppRefreshRateMin (parameter name:`appRefreshRateMin`)
+12. AppRefreshRateMax (parameter name:`appRefreshRateMax`)
+13. DisplayPowerMode (parameter name:`mode`)
+14. CacheId (parameter name:`cacheId`)
+15. DisplayBrightness (parameter name:`brightness`)
+16. PowerBoostID (parameter name:`boostId`)
+17. AtomId (parameter name:`atomId`)
+18. ComponentMask (parameter name:`componentMask`)
+19. MaxFrames (parameter name:`maxFrames`)
+20. TaskId (parameter name:`taskId`)
+21. Alpha (parameter name:`aplha`)
+22. CornerRadius (parameter name:`cornerRadius`)
+23. BackgroundBlurRadius (parameter name:`backgroundBlurRadius`)
+24. Half3Color (parameter name:`color`)
+25. LayerStack (parameter name:`layerStack`)
+26. Dataspace (parameter name:`dataspace`)
+27. Api (parameter name:`api`)
+28. Priority (parameter name:`priority`)
+29. TouchableRegionPointX (parameter name:`pointX`)
+30. TouchableRegionPointY (parameter name:`pointY`)
+31. ColorMode (parameter name:`colorMode`)
+32. WindowInfoFlags (parameter name:`flags`)
+33. WindowInfoTransformOrientation (parameter name:`transform`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+|`surfaceTag`| 0.`Tag::CREATE_SURFACE`, 1.`Tag::CREATE_WITH_SURFACE_PARENT`, 2.`Tag::CLEAR_LAYER_FRAME_STATS`, 3.`Tag::GET_LAYER_FRAME_STATS`, 4.`Tag::MIRROR_SURFACE`, 5.`Tag::LAST` |Value obtained from FuzzedDataProvider|
+|`mode`| 0.`gui::TouchOcclusionMode::BLOCK_UNTRUSTED`, 1.`gui::TouchOcclusionMode::USE_OPACITY`, 2.`gui::TouchOcclusionMode::ALLOW` |Value obtained from FuzzedDataProvider|
+|`boostId`| 0.`hardware::power::Boost::INTERACTION`, 1.`hardware::power::Boost::DISPLAY_UPDATE_IMMINENT`, 2.`hardware::power::Boost::ML_ACC`, 3.`hardware::power::Boost::AUDIO_LAUNCH`, 4.`hardware::power::Boost::CAMERA_LAUNCH`, 5.`hardware::power::Boost::CAMERA_SHOT` |Value obtained from FuzzedDataProvider|
+|`colorMode`|0.`ui::ColorMode::NATIVE`, 1.`ui::ColorMode::STANDARD_BT601_625`, 2.`ui::ColorMode::STANDARD_BT601_625_UNADJUSTED`, 3.`ui::ColorMode::STANDARD_BT601_525`, 4.`ui::ColorMode::STANDARD_BT601_525_UNADJUSTED`, 5.`ui::ColorMode::STANDARD_BT709`, 6.`ui::ColorMode::DCI_P3`, 7.`ui::ColorMode::SRGB`, 8.`ui::ColorMode::ADOBE_RGB`, 9.`ui::ColorMode::DISPLAY_P3`, 10.`ui::ColorMode::BT2020`, 11.`ui::ColorMode::BT2100_PQ`, 12.`ui::ColorMode::BT2100_HLG`, 13.`ui::ColorMode::DISPLAY_BT2020` |Value obtained from FuzzedDataProvider|
+|`flags`|0 .`gui::WindowInfo::Flag::ALLOW_LOCK_WHILE_SCREEN_ON`, 1.`gui::WindowInfo::Flag::DIM_BEHIND`, 2.`gui::WindowInfo::Flag::BLUR_BEHIND`, 3.`gui::WindowInfo::Flag::NOT_FOCUSABLE`, 4.`gui::WindowInfo::Flag::NOT_TOUCHABLE`, 5.`gui::WindowInfo::Flag::NOT_TOUCH_MODAL`, 6.`gui::WindowInfo::Flag::TOUCHABLE_WHEN_WAKING`, 7.`gui::WindowInfo::Flag::KEEP_SCREEN_ON`, 8.`gui::WindowInfo::Flag::LAYOUT_IN_SCREEN`, 9.`gui::WindowInfo::Flag::LAYOUT_NO_LIMITS`, 10.`gui::WindowInfo::Flag::FULLSCREEN`, 11.`gui::WindowInfo::Flag::FORCE_NOT_FULLSCREEN`, 12.`gui::WindowInfo::Flag::DITHER`, 13.`gui::WindowInfo::Flag::SECURE`, 14.`gui::WindowInfo::Flag::SCALED`, 15.`gui::WindowInfo::Flag::IGNORE_CHEEK_PRESSES`, 16.`gui::WindowInfo::Flag::LAYOUT_INSET_DECOR`, 17.`gui::WindowInfo::Flag::ALT_FOCUSABLE_IM`, 18.`gui::WindowInfo::Flag::WATCH_OUTSIDE_TOUCH`, 19.`gui::WindowInfo::Flag::SHOW_WHEN_LOCKED`, 20.`gui::WindowInfo::Flag::SHOW_WALLPAPER`, 21.`gui::WindowInfo::Flag::TURN_SCREEN_ON`, 22.`gui::WindowInfo::Flag::DISMISS_KEYGUARD`, 23.`gui::WindowInfo::Flag::SPLIT_TOUCH`, 24.`gui::WindowInfo::Flag::HARDWARE_ACCELERATED`, 25.`gui::WindowInfo::Flag::LAYOUT_IN_OVERSCAN`, 26.`gui::WindowInfo::Flag::TRANSLUCENT_STATUS`, 27.`gui::WindowInfo::Flag::TRANSLUCENT_NAVIGATION`, 28.`gui::WindowInfo::Flag::LOCAL_FOCUS_MODE`, 29.`gui::WindowInfo::Flag::SLIPPERY`, 30.`gui::WindowInfo::Flag::LAYOUT_ATTACHED_IN_DECOR`, 31.`gui::WindowInfo::Flag::DRAWS_SYSTEM_BAR_BACKGROUNDS`, |Value obtained from FuzzedDataProvider|
+|`dataspace`| 0.`ui::Dataspace::UNKNOWN`, 1.`ui::Dataspace::ARBITRARY`, 2.`ui::Dataspace::STANDARD_SHIFT`, 3.`ui::Dataspace::STANDARD_MASK`, 4.`ui::Dataspace::STANDARD_UNSPECIFIED`, 5.`ui::Dataspace::STANDARD_BT709`, 6.`ui::Dataspace::STANDARD_BT601_625`, 7.`ui::Dataspace::STANDARD_BT601_625_UNADJUSTED`, 8.`ui::Dataspace::STANDARD_BT601_525`, 9.`ui::Dataspace::STANDARD_BT601_525_UNADJUSTED`, 10.`ui::Dataspace::STANDARD_BT2020`, 11.`ui::Dataspace::STANDARD_BT2020_CONSTANT_LUMINANCE`, 12.`ui::Dataspace::STANDARD_BT470M`, 13.`ui::Dataspace::STANDARD_FILM`, 14.`ui::Dataspace::STANDARD_DCI_P3`, 15.`ui::Dataspace::STANDARD_ADOBE_RGB`, 16.`ui::Dataspace::TRANSFER_SHIFT`, 17.`ui::Dataspace::TRANSFER_MASK`, 18.`ui::Dataspace::TRANSFER_UNSPECIFIED`, 19.`ui::Dataspace::TRANSFER_LINEAR`, 20.`ui::Dataspace::TRANSFER_SRGB`, 21.`ui::Dataspace::TRANSFER_SMPTE_170M`, 22.`ui::Dataspace::TRANSFER_GAMMA2_2`, 23.`ui::Dataspace::TRANSFER_GAMMA2_6`, 24.`ui::Dataspace::TRANSFER_GAMMA2_8`, 25.`ui::Dataspace::TRANSFER_ST2084`, 26.`ui::Dataspace::TRANSFER_HLG`, 27.`ui::Dataspace::RANGE_SHIFT`, 28.`ui::Dataspace::RANGE_MASK`, 29.`ui::Dataspace::RANGE_UNSPECIFIED`, 30.`ui::Dataspace::RANGE_FULL`, 31.`ui::Dataspace::RANGE_LIMITED`, 32.`ui::Dataspace::RANGE_EXTENDED`, 33.`ui::Dataspace::SRGB_LINEAR`, 34.`ui::Dataspace::V0_SRGB_LINEAR`, 35.`ui::Dataspace::V0_SCRGB_LINEAR`, 36.`ui::Dataspace::SRGB`, 37.`ui::Dataspace::V0_SRGB`, 38.`ui::Dataspace::V0_SCRGB`, 39.`ui::Dataspace::JFIF`, 40.`ui::Dataspace::V0_JFIF`, 41.`ui::Dataspace::BT601_625`, 42.`ui::Dataspace::V0_BT601_625`, 43.`ui::Dataspace::BT601_525`, 44.`ui::Dataspace::V0_BT601_525`, 45.`ui::Dataspace::BT709`, 46.`ui::Dataspace::V0_BT709`, 47.`ui::Dataspace::DCI_P3_LINEAR`, 48.`ui::Dataspace::DCI_P3`, 49.`ui::Dataspace::DISPLAY_P3_LINEAR`, 50.`ui::Dataspace::DISPLAY_P3`, 51.`ui::Dataspace::ADOBE_RGB`, 52.`ui::Dataspace::BT2020_LINEAR`, 53.`ui::Dataspace::BT2020`, 54.`ui::Dataspace::BT2020_PQ`, 55.`ui::Dataspace::DEPTH`, 56.`ui::Dataspace::SENSOR`, 57.`ui::Dataspace::BT2020_ITU`, 58.`ui::Dataspace::BT2020_ITU_PQ`, 59.`ui::Dataspace::BT2020_ITU_HLG`, 60.`ui::Dataspace::BT2020_HLG`, 61.`ui::Dataspace::DISPLAY_BT2020`, 62.`ui::Dataspace::DYNAMIC_DEPTH`, 63.`ui::Dataspace::JPEG_APP_SEGMENTS`, 64.`ui::Dataspace::HEIF`, |Value obtained from FuzzedDataProvider|
+|`transform`| 0.`ui::Transform::ROT_0`, 1.`ui::Transform::FLIP_H`, 2.`ui::Transform::FLIP_V`, 3.`ui::Transform::ROT_90`, 4.`ui::Transform::ROT_180`, 5.`ui::Transform::ROT_270` |Value obtained from FuzzedDataProvider|
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) libgui_surfaceComposerClient_fuzzer
+```
+2. To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libgui_surfaceComposerClient_fuzzer/libgui_surfaceComposerClient_fuzzer
+```
+
+# <a name="libgui_parcelable_fuzzer"></a> Fuzzer for Libgui_Parcelable
+
+Libgui_Parcelable supports the following parameters:
+1. LayerMetadataKey (parameter name:`key`)
+2. Dataspace (parameter name:`mDataspace`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+|`key`| 0.`view::LayerMetadataKey::METADATA_OWNER_UID`, 1.`view::LayerMetadataKey::METADATA_WINDOW_TYPE`, 2.`view::LayerMetadataKey::METADATA_TASK_ID`, 3.`view::LayerMetadataKey::METADATA_MOUSE_CURSOR`, 4.`view::LayerMetadataKey::METADATA_ACCESSIBILITY_ID`, 5.`view::LayerMetadataKey::METADATA_OWNER_PID`, 6.`view::LayerMetadataKey::METADATA_DEQUEUE_TIME`, 7.`view::LayerMetadataKey::METADATA_GAME_MODE`, |Value obtained from FuzzedDataProvider|
+|`mDataSpace`| 0.`ui::Dataspace::UNKNOWN`, 1.`ui::Dataspace::ARBITRARY`, 2.`ui::Dataspace::STANDARD_SHIFT`, 3.`ui::Dataspace::STANDARD_MASK`, 4.`ui::Dataspace::STANDARD_UNSPECIFIED`, 5.`ui::Dataspace::STANDARD_BT709`, 6.`ui::Dataspace::STANDARD_BT601_625`, 7.`ui::Dataspace::STANDARD_BT601_625_UNADJUSTED`, 8.`ui::Dataspace::STANDARD_BT601_525`, 9.`ui::Dataspace::STANDARD_BT601_525_UNADJUSTED`, 10.`ui::Dataspace::STANDARD_BT2020`, 11.`ui::Dataspace::STANDARD_BT2020_CONSTANT_LUMINANCE`, 12.`ui::Dataspace::STANDARD_BT470M`, 13.`ui::Dataspace::STANDARD_FILM`, 14.`ui::Dataspace::STANDARD_DCI_P3`, 15.`ui::Dataspace::STANDARD_ADOBE_RGB`, 16.`ui::Dataspace::TRANSFER_SHIFT`, 17.`ui::Dataspace::TRANSFER_MASK`, 18.`ui::Dataspace::TRANSFER_UNSPECIFIED`, 19.`ui::Dataspace::TRANSFER_LINEAR`, 20.`ui::Dataspace::TRANSFER_SRGB`, 21.`ui::Dataspace::TRANSFER_SMPTE_170M`, 22.`ui::Dataspace::TRANSFER_GAMMA2_2`, 23.`ui::Dataspace::TRANSFER_GAMMA2_6`, 24.`ui::Dataspace::TRANSFER_GAMMA2_8`, 25.`ui::Dataspace::TRANSFER_ST2084`, 26.`ui::Dataspace::TRANSFER_HLG`, 27.`ui::Dataspace::RANGE_SHIFT`, 28.`ui::Dataspace::RANGE_MASK`, 29.`ui::Dataspace::RANGE_UNSPECIFIED`, 30.`ui::Dataspace::RANGE_FULL`, 31.`ui::Dataspace::RANGE_LIMITED`, 32.`ui::Dataspace::RANGE_EXTENDED`, 33.`ui::Dataspace::SRGB_LINEAR`, 34.`ui::Dataspace::V0_SRGB_LINEAR`, 35.`ui::Dataspace::V0_SCRGB_LINEAR`, 36.`ui::Dataspace::SRGB`, 37.`ui::Dataspace::V0_SRGB`, 38.`ui::Dataspace::V0_SCRGB`, 39.`ui::Dataspace::JFIF`, 40.`ui::Dataspace::V0_JFIF`, 41.`ui::Dataspace::BT601_625`, 42.`ui::Dataspace::V0_BT601_625`, 43.`ui::Dataspace::BT601_525`, 44.`ui::Dataspace::V0_BT601_525`, 45.`ui::Dataspace::BT709`, 46.`ui::Dataspace::V0_BT709`, 47.`ui::Dataspace::DCI_P3_LINEAR`, 48.`ui::Dataspace::DCI_P3`, 49.`ui::Dataspace::DISPLAY_P3_LINEAR`, 50.`ui::Dataspace::DISPLAY_P3`, 51.`ui::Dataspace::ADOBE_RGB`, 52.`ui::Dataspace::BT2020_LINEAR`, 53.`ui::Dataspace::BT2020`, 54.`ui::Dataspace::BT2020_PQ`, 55.`ui::Dataspace::DEPTH`, 56.`ui::Dataspace::SENSOR`, 57.`ui::Dataspace::BT2020_ITU`, 58.`ui::Dataspace::BT2020_ITU_PQ`, 59.`ui::Dataspace::BT2020_ITU_HLG`, 60.`ui::Dataspace::BT2020_HLG`, 61.`ui::Dataspace::DISPLAY_BT2020`, 62.`ui::Dataspace::DYNAMIC_DEPTH`, 63.`ui::Dataspace::JPEG_APP_SEGMENTS`, 64.`ui::Dataspace::HEIF`, |Value obtained from FuzzedDataProvider|
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) libgui_fuzzer
+```
+2. Run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libgui_fuzzer/libgui_fuzzer
+```
+
+# <a name="libgui_bufferQueue_fuzzer"></a> Fuzzer for BufferQueue
+
+BufferQueue supports the following parameters:
+1. SurfaceWidth (parameter name:`width`)
+2. SurfaceHeight (parameter name:`height`)
+3. TransactionStateFlags (parameter name:`flags`)
+4. TransformHint (parameter name:`outTransformHint`)
+5. SurfacePixelFormat (parameter name:`format`)
+6. LayerId (parameter name:`layerId`)
+7. BufferId (parameter name:`bufferId`)
+8. FrameNumber (parameter name:`frameNumber`)
+9. FrameRate (parameter name:`frameRate`)
+10. Compatability (parameter name:`compatability`)
+11. LatchTime (parameter name:`latchTime`)
+12. AcquireTime (parameter name:`acquireTime`)
+13. RefreshTime (parameter name:`refreshTime`)
+14. DequeueTime (parameter name:`dequeueTime`)
+15. Slot (parameter name:`slot`)
+16. MaxBuffers (parameter name:`maxBuffers`)
+17. GenerationNumber (parameter name:`generationNumber`)
+18. Api (parameter name:`api`)
+19. Usage (parameter name:`usage`)
+20. MaxFrameNumber (parameter name:`maxFrameNumber`)
+21. BufferCount (parameter name:`bufferCount`)
+22. MaxAcquredBufferCount (parameter name:`maxAcquredBufferCount`)
+23. Status (parameter name:`status`)
+24. ApiConnection (parameter name:`apiConnection`)
+25. Dataspace (parameter name:`dataspace`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+|`status`| 0.`OK`, 1.`NO_MEMORY`, 2.`NO_INIT`, 3.`BAD_VALUE`, 4.`DEAD_OBJECT`, 5.`INVALID_OPERATION`, 6.`TIMED_OUT`, 7.`WOULD_BLOCK`, 8.`UNKNOWN_ERROR`, 9.`ALREADY_EXISTS`, |Value obtained from FuzzedDataProvider|
+|`apiConnection`| 0.`BufferQueueCore::CURRENTLY_CONNECTED_API`, 1.`BufferQueueCore::NO_CONNECTED_API`, 2.`NATIVE_WINDOW_API_EGL`, 3.`NATIVE_WINDOW_API_CPU`, 4.`NATIVE_WINDOW_API_MEDIA`, 5.`NATIVE_WINDOW_API_CAMERA`, |Value obtained from FuzzedDataProvider|
+|`dataspace`| 0.`ui::Dataspace::UNKNOWN`, 1.`ui::Dataspace::ARBITRARY`, 2.`ui::Dataspace::STANDARD_SHIFT`, 3.`ui::Dataspace::STANDARD_MASK`, 4.`ui::Dataspace::STANDARD_UNSPECIFIED`, 5.`ui::Dataspace::STANDARD_BT709`, 6.`ui::Dataspace::STANDARD_BT601_625`, 7.`ui::Dataspace::STANDARD_BT601_625_UNADJUSTED`, 8.`ui::Dataspace::STANDARD_BT601_525`, 9.`ui::Dataspace::STANDARD_BT601_525_UNADJUSTED`, 10.`ui::Dataspace::STANDARD_BT2020`, 11.`ui::Dataspace::STANDARD_BT2020_CONSTANT_LUMINANCE`, 12.`ui::Dataspace::STANDARD_BT470M`, 13.`ui::Dataspace::STANDARD_FILM`, 14.`ui::Dataspace::STANDARD_DCI_P3`, 15.`ui::Dataspace::STANDARD_ADOBE_RGB`, 16.`ui::Dataspace::TRANSFER_SHIFT`, 17.`ui::Dataspace::TRANSFER_MASK`, 18.`ui::Dataspace::TRANSFER_UNSPECIFIED`, 19.`ui::Dataspace::TRANSFER_LINEAR`, 20.`ui::Dataspace::TRANSFER_SRGB`, 21.`ui::Dataspace::TRANSFER_SMPTE_170M`, 22.`ui::Dataspace::TRANSFER_GAMMA2_2`, 23.`ui::Dataspace::TRANSFER_GAMMA2_6`, 24.`ui::Dataspace::TRANSFER_GAMMA2_8`, 25.`ui::Dataspace::TRANSFER_ST2084`, 26.`ui::Dataspace::TRANSFER_HLG`, 27.`ui::Dataspace::RANGE_SHIFT`, 28.`ui::Dataspace::RANGE_MASK`, 29.`ui::Dataspace::RANGE_UNSPECIFIED`, 30.`ui::Dataspace::RANGE_FULL`, 31.`ui::Dataspace::RANGE_LIMITED`, 32.`ui::Dataspace::RANGE_EXTENDED`, 33.`ui::Dataspace::SRGB_LINEAR`, 34.`ui::Dataspace::V0_SRGB_LINEAR`, 35.`ui::Dataspace::V0_SCRGB_LINEAR`, 36.`ui::Dataspace::SRGB`, 37.`ui::Dataspace::V0_SRGB`, 38.`ui::Dataspace::V0_SCRGB`, 39.`ui::Dataspace::JFIF`, 40.`ui::Dataspace::V0_JFIF`, 41.`ui::Dataspace::BT601_625`, 42.`ui::Dataspace::V0_BT601_625`, 43.`ui::Dataspace::BT601_525`, 44.`ui::Dataspace::V0_BT601_525`, 45.`ui::Dataspace::BT709`, 46.`ui::Dataspace::V0_BT709`, 47.`ui::Dataspace::DCI_P3_LINEAR`, 48.`ui::Dataspace::DCI_P3`, 49.`ui::Dataspace::DISPLAY_P3_LINEAR`, 50.`ui::Dataspace::DISPLAY_P3`, 51.`ui::Dataspace::ADOBE_RGB`, 52.`ui::Dataspace::BT2020_LINEAR`, 53.`ui::Dataspace::BT2020`, 54.`ui::Dataspace::BT2020_PQ`, 55.`ui::Dataspace::DEPTH`, 56.`ui::Dataspace::SENSOR`, 57.`ui::Dataspace::BT2020_ITU`, 58.`ui::Dataspace::BT2020_ITU_PQ`, 59.`ui::Dataspace::BT2020_ITU_HLG`, 60.`ui::Dataspace::BT2020_HLG`, 61.`ui::Dataspace::DISPLAY_BT2020`, 62.`ui::Dataspace::DYNAMIC_DEPTH`, 63.`ui::Dataspace::JPEG_APP_SEGMENTS`, 64.`ui::Dataspace::HEIF`, |Value obtained from FuzzedDataProvider|
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) libgui_bufferQueue_fuzzer
+```
+2. To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libgui_bufferQueue_fuzzer/libgui_bufferQueue_fuzzer
+```
+
+# <a name="libgui_consumer_fuzzer"></a> Fuzzer for Libgui_Consumer
+
+Libgui_Consumer supports the following parameters:
+1. GraphicWidth (parameter name:`graphicWidth`)
+2. GraphicHeight (parameter name:`graphicHeight`)
+4. TransformHint (parameter name:`outTransformHint`)
+5. GraphicPixelFormat (parameter name:`format`)
+6. Usage (parameter name:`usage`)
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) libgui_consumer_fuzzer
+```
+2. Run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libgui_consumer_fuzzer/libgui_consumer_fuzzer
+```
+
+# <a name="libgui_displayEvent_fuzzer"></a> Fuzzer for LibGui_DisplayEvent
+
+LibGui_DisplayEvent supports the following parameters:
+1. DisplayEventType (parameter name:`type`)
+2. Events (parameter name:`events`)
+3. VsyncSource (parameter name:`vsyncSource`)
+4. EventRegistrationFlags (parameter name:`flags`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+|`vsyncSource`| 0.`ISurfaceComposer::eVsyncSourceApp`, 1.`ISurfaceComposer::eVsyncSourceSurfaceFlinger`, |Value obtained from FuzzedDataProvider|
+|`flags`| 0.`ISurfaceComposer::EventRegistration::modeChanged`, 1.`ISurfaceComposer::EventRegistration::frameRateOverride`, |Value obtained from FuzzedDataProvider|
+|`type`| 0.`DisplayEventReceiver::DISPLAY_EVENT_NULL`, 1.`DisplayEventReceiver::DISPLAY_EVENT_VSYNC`, 2.`DisplayEventReceiver::DISPLAY_EVENT_HOTPLUG`, 3.`DisplayEventReceiver::DISPLAY_EVENT_MODE_CHANGE`, 4.`DisplayEventReceiver::DISPLAY_EVENT_FRAME_RATE_OVERRIDE`, 5.`DisplayEventReceiver::DISPLAY_EVENT_FRAME_RATE_OVERRIDE_FLUSH`, |Value obtained from FuzzedDataProvider|
+|`events`| 0.`Looper::EVENT_INPUT`, 1.`Looper::EVENT_OUTPUT`, 2.`Looper::EVENT_ERROR`, 3.`Looper::EVENT_HANGUP`, 4.`Looper::EVENT_INVALID`, |Value obtained from FuzzedDataProvider|
+
+#### Steps to run
+1. Build the fuzzer
+```
+  $ mm -j$(nproc) libgui_displayEvent_fuzzer
+```
+2. Run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/libgui_displayEvent_fuzzer/libgui_displayEvent_fuzzer
+```
diff --git a/libs/gui/fuzzer/libgui_bufferQueue_fuzzer.cpp b/libs/gui/fuzzer/libgui_bufferQueue_fuzzer.cpp
new file mode 100644
index 0000000..17f4c63
--- /dev/null
+++ b/libs/gui/fuzzer/libgui_bufferQueue_fuzzer.cpp
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <android-base/stringprintf.h>
+#include <gui/BufferQueueConsumer.h>
+#include <gui/BufferQueueCore.h>
+#include <gui/BufferQueueProducer.h>
+#include <gui/bufferqueue/2.0/types.h>
+#include <system/window.h>
+
+#include <libgui_fuzzer_utils.h>
+
+using namespace android;
+using namespace hardware::graphics::bufferqueue;
+using namespace V1_0::utils;
+using namespace V2_0::utils;
+
+constexpr int32_t kMaxBytes = 256;
+
+constexpr int32_t kError[] = {
+        OK,        NO_MEMORY,   NO_INIT,       BAD_VALUE,      DEAD_OBJECT, INVALID_OPERATION,
+        TIMED_OUT, WOULD_BLOCK, UNKNOWN_ERROR, ALREADY_EXISTS,
+};
+
+constexpr int32_t kAPIConnection[] = {
+        BufferQueueCore::CURRENTLY_CONNECTED_API,
+        BufferQueueCore::NO_CONNECTED_API,
+        NATIVE_WINDOW_API_EGL,
+        NATIVE_WINDOW_API_CPU,
+        NATIVE_WINDOW_API_MEDIA,
+        NATIVE_WINDOW_API_CAMERA,
+};
+
+class BufferQueueFuzzer {
+public:
+    BufferQueueFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+
+private:
+    void invokeTypes();
+    void invokeH2BGraphicBufferV1();
+    void invokeH2BGraphicBufferV2();
+    void invokeBufferQueueConsumer();
+    void invokeBufferQueueProducer();
+    void invokeBlastBufferQueue();
+    void invokeQuery(sp<BufferQueueProducer>);
+    void invokeQuery(sp<V1_0::utils::H2BGraphicBufferProducer>);
+    void invokeQuery(sp<V2_0::utils::H2BGraphicBufferProducer>);
+    void invokeAcquireBuffer(sp<BufferQueueConsumer>);
+    void invokeOccupancyTracker(sp<BufferQueueConsumer>);
+    sp<SurfaceControl> makeSurfaceControl();
+    sp<BLASTBufferQueue> makeBLASTBufferQueue(sp<SurfaceControl>);
+
+    FuzzedDataProvider mFdp;
+};
+
+class ManageResourceHandle {
+public:
+    ManageResourceHandle(FuzzedDataProvider* fdp) {
+        mNativeHandle = native_handle_create(0 /*numFds*/, 1 /*numInts*/);
+        mShouldOwn = fdp->ConsumeBool();
+        mStream = NativeHandle::create(mNativeHandle, mShouldOwn);
+    }
+    ~ManageResourceHandle() {
+        if (!mShouldOwn) {
+            native_handle_close(mNativeHandle);
+            native_handle_delete(mNativeHandle);
+        }
+    }
+    sp<NativeHandle> getStream() { return mStream; }
+
+private:
+    bool mShouldOwn;
+    sp<NativeHandle> mStream;
+    native_handle_t* mNativeHandle;
+};
+
+sp<SurfaceControl> BufferQueueFuzzer::makeSurfaceControl() {
+    sp<IBinder> handle;
+    const sp<FakeBnSurfaceComposerClient> testClient(new FakeBnSurfaceComposerClient());
+    sp<SurfaceComposerClient> client = new SurfaceComposerClient(testClient);
+    sp<BnGraphicBufferProducer> producer;
+    uint32_t layerId = mFdp.ConsumeIntegral<uint32_t>();
+    std::string layerName = base::StringPrintf("#%d", layerId);
+    return sp<SurfaceControl>::make(client, handle, layerId, layerName,
+                                    mFdp.ConsumeIntegral<int32_t>(),
+                                    mFdp.ConsumeIntegral<uint32_t>(),
+                                    mFdp.ConsumeIntegral<int32_t>(),
+                                    mFdp.ConsumeIntegral<uint32_t>(),
+                                    mFdp.ConsumeIntegral<uint32_t>());
+}
+
+sp<BLASTBufferQueue> BufferQueueFuzzer::makeBLASTBufferQueue(sp<SurfaceControl> surface) {
+    return sp<BLASTBufferQueue>::make(mFdp.ConsumeRandomLengthString(kMaxBytes), surface,
+                                      mFdp.ConsumeIntegral<uint32_t>(),
+                                      mFdp.ConsumeIntegral<uint32_t>(),
+                                      mFdp.ConsumeIntegral<int32_t>());
+}
+
+void BufferQueueFuzzer::invokeBlastBufferQueue() {
+    sp<SurfaceControl> surface = makeSurfaceControl();
+    sp<BLASTBufferQueue> queue = makeBLASTBufferQueue(surface);
+
+    BufferItem item;
+    queue->onFrameAvailable(item);
+    queue->onFrameReplaced(item);
+    uint64_t bufferId = mFdp.ConsumeIntegral<uint64_t>();
+    queue->onFrameDequeued(bufferId);
+    queue->onFrameCancelled(bufferId);
+
+    SurfaceComposerClient::Transaction next;
+    uint64_t frameNumber = mFdp.ConsumeIntegral<uint64_t>();
+    queue->mergeWithNextTransaction(&next, frameNumber);
+    queue->applyPendingTransactions(frameNumber);
+
+    queue->update(surface, mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<uint32_t>(),
+                  mFdp.ConsumeIntegral<int32_t>());
+    queue->setFrameRate(mFdp.ConsumeFloatingPoint<float>(), mFdp.ConsumeIntegral<int8_t>(),
+                        mFdp.ConsumeBool() /*shouldBeSeamless*/);
+    FrameTimelineInfo info;
+    queue->setFrameTimelineInfo(mFdp.ConsumeIntegral<uint64_t>(), info);
+
+    ManageResourceHandle handle(&mFdp);
+    queue->setSidebandStream(handle.getStream());
+
+    queue->getLastTransformHint();
+    queue->getLastAcquiredFrameNum();
+
+    CompositorTiming compTiming;
+    sp<Fence> previousFence = new Fence(memfd_create("pfd", MFD_ALLOW_SEALING));
+    sp<Fence> gpuFence = new Fence(memfd_create("gfd", MFD_ALLOW_SEALING));
+    FrameEventHistoryStats frameStats(frameNumber, gpuFence, compTiming,
+                                      mFdp.ConsumeIntegral<int64_t>(),
+                                      mFdp.ConsumeIntegral<int64_t>());
+    std::vector<SurfaceControlStats> stats;
+    sp<Fence> presentFence = new Fence(memfd_create("fd", MFD_ALLOW_SEALING));
+    SurfaceControlStats controlStats(surface, mFdp.ConsumeIntegral<int64_t>(),
+                                     mFdp.ConsumeIntegral<int64_t>(), presentFence, previousFence,
+                                     mFdp.ConsumeIntegral<uint32_t>(), frameStats,
+                                     mFdp.ConsumeIntegral<uint32_t>());
+    stats.push_back(controlStats);
+}
+
+void BufferQueueFuzzer::invokeQuery(sp<BufferQueueProducer> producer) {
+    int32_t value;
+    producer->query(mFdp.ConsumeIntegral<int32_t>(), &value);
+}
+
+void BufferQueueFuzzer::invokeQuery(sp<V1_0::utils::H2BGraphicBufferProducer> producer) {
+    int32_t value;
+    producer->query(mFdp.ConsumeIntegral<int32_t>(), &value);
+}
+
+void BufferQueueFuzzer::invokeQuery(sp<V2_0::utils::H2BGraphicBufferProducer> producer) {
+    int32_t value;
+    producer->query(mFdp.ConsumeIntegral<int32_t>(), &value);
+}
+
+void BufferQueueFuzzer::invokeBufferQueueProducer() {
+    sp<BufferQueueCore> core(new BufferQueueCore());
+    sp<BufferQueueProducer> producer(new BufferQueueProducer(core));
+    const sp<android::IProducerListener> listener;
+    android::IGraphicBufferProducer::QueueBufferOutput output;
+    uint32_t api = mFdp.ConsumeIntegral<uint32_t>();
+    producer->connect(listener, api, mFdp.ConsumeBool() /*producerControlledByApp*/, &output);
+
+    sp<GraphicBuffer> buffer;
+    int32_t slot = mFdp.ConsumeIntegral<int32_t>();
+    uint32_t maxBuffers = mFdp.ConsumeIntegral<uint32_t>();
+    producer->requestBuffer(slot, &buffer);
+    producer->setMaxDequeuedBufferCount(maxBuffers);
+    producer->setAsyncMode(mFdp.ConsumeBool() /*async*/);
+
+    android::IGraphicBufferProducer::QueueBufferInput input;
+    producer->attachBuffer(&slot, buffer);
+    producer->queueBuffer(slot, input, &output);
+
+    int32_t format = mFdp.ConsumeIntegral<int32_t>();
+    uint32_t width = mFdp.ConsumeIntegral<uint32_t>();
+    uint32_t height = mFdp.ConsumeIntegral<uint32_t>();
+    uint64_t usage = mFdp.ConsumeIntegral<uint64_t>();
+    uint64_t outBufferAge;
+    FrameEventHistoryDelta outTimestamps;
+    sp<android::Fence> fence;
+    producer->dequeueBuffer(&slot, &fence, width, height, format, usage, &outBufferAge,
+                            &outTimestamps);
+    producer->detachBuffer(slot);
+    producer->detachNextBuffer(&buffer, &fence);
+    producer->cancelBuffer(slot, fence);
+
+    invokeQuery(producer);
+
+    ManageResourceHandle handle(&mFdp);
+    producer->setSidebandStream(handle.getStream());
+
+    producer->allocateBuffers(width, height, format, usage);
+    producer->allowAllocation(mFdp.ConsumeBool() /*allow*/);
+    producer->setSharedBufferMode(mFdp.ConsumeBool() /*sharedBufferMode*/);
+    producer->setAutoRefresh(mFdp.ConsumeBool() /*autoRefresh*/);
+    producer->setLegacyBufferDrop(mFdp.ConsumeBool() /*drop*/);
+    producer->setAutoPrerotation(mFdp.ConsumeBool() /*autoPrerotation*/);
+
+    producer->setGenerationNumber(mFdp.ConsumeIntegral<uint32_t>());
+    producer->setDequeueTimeout(mFdp.ConsumeIntegral<uint32_t>());
+    producer->disconnect(api);
+}
+
+void BufferQueueFuzzer::invokeAcquireBuffer(sp<BufferQueueConsumer> consumer) {
+    BufferItem item;
+    consumer->acquireBuffer(&item, mFdp.ConsumeIntegral<uint32_t>(),
+                            mFdp.ConsumeIntegral<uint64_t>());
+}
+
+void BufferQueueFuzzer::invokeOccupancyTracker(sp<BufferQueueConsumer> consumer) {
+    String8 outResult;
+    String8 prefix((mFdp.ConsumeRandomLengthString(kMaxBytes)).c_str());
+    consumer->dumpState(prefix, &outResult);
+
+    std::vector<OccupancyTracker::Segment> outHistory;
+    consumer->getOccupancyHistory(mFdp.ConsumeBool() /*forceFlush*/, &outHistory);
+}
+
+void BufferQueueFuzzer::invokeBufferQueueConsumer() {
+    sp<BufferQueueCore> core(new BufferQueueCore());
+    sp<BufferQueueConsumer> consumer(new BufferQueueConsumer(core));
+    sp<android::IConsumerListener> listener;
+    consumer->consumerConnect(listener, mFdp.ConsumeBool() /*controlledByApp*/);
+    invokeAcquireBuffer(consumer);
+
+    int32_t slot = mFdp.ConsumeIntegral<int32_t>();
+    sp<GraphicBuffer> buffer =
+            new GraphicBuffer(mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<uint32_t>(),
+                              mFdp.ConsumeIntegral<int32_t>(), mFdp.ConsumeIntegral<uint32_t>(),
+                              mFdp.ConsumeIntegral<uint64_t>());
+    consumer->attachBuffer(&slot, buffer);
+    consumer->detachBuffer(slot);
+
+    consumer->setDefaultBufferSize(mFdp.ConsumeIntegral<uint32_t>(),
+                                   mFdp.ConsumeIntegral<uint32_t>());
+    consumer->setMaxBufferCount(mFdp.ConsumeIntegral<int32_t>());
+    consumer->setMaxAcquiredBufferCount(mFdp.ConsumeIntegral<int32_t>());
+
+    String8 name((mFdp.ConsumeRandomLengthString(kMaxBytes)).c_str());
+    consumer->setConsumerName(name);
+    consumer->setDefaultBufferFormat(mFdp.ConsumeIntegral<int32_t>());
+    android_dataspace dataspace =
+            static_cast<android_dataspace>(mFdp.PickValueInArray(kDataspaces));
+    consumer->setDefaultBufferDataSpace(dataspace);
+
+    consumer->setTransformHint(mFdp.ConsumeIntegral<uint32_t>());
+    consumer->setConsumerUsageBits(mFdp.ConsumeIntegral<uint64_t>());
+    consumer->setConsumerIsProtected(mFdp.ConsumeBool() /*isProtected*/);
+    invokeOccupancyTracker(consumer);
+
+    sp<Fence> releaseFence = new Fence(memfd_create("fd", MFD_ALLOW_SEALING));
+    consumer->releaseBuffer(mFdp.ConsumeIntegral<int32_t>(), mFdp.ConsumeIntegral<uint64_t>(),
+                            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, releaseFence);
+    consumer->consumerDisconnect();
+}
+
+void BufferQueueFuzzer::invokeTypes() {
+    HStatus hStatus;
+    int32_t status = mFdp.PickValueInArray(kError);
+    bool bufferNeedsReallocation = mFdp.ConsumeBool();
+    bool releaseAllBuffers = mFdp.ConsumeBool();
+    b2h(status, &hStatus, &bufferNeedsReallocation, &releaseAllBuffers);
+    h2b(hStatus, &status);
+
+    HConnectionType type;
+    int32_t apiConnection = mFdp.PickValueInArray(kAPIConnection);
+    b2h(apiConnection, &type);
+    h2b(type, &apiConnection);
+}
+
+void BufferQueueFuzzer::invokeH2BGraphicBufferV1() {
+    sp<V1_0::utils::H2BGraphicBufferProducer> producer(
+            new V1_0::utils::H2BGraphicBufferProducer(new FakeGraphicBufferProducerV1()));
+    const sp<android::IProducerListener> listener;
+    android::IGraphicBufferProducer::QueueBufferOutput output;
+    uint32_t api = mFdp.ConsumeIntegral<uint32_t>();
+    producer->connect(listener, api, mFdp.ConsumeBool() /*producerControlledByApp*/, &output);
+
+    sp<GraphicBuffer> buffer;
+    int32_t slot = mFdp.ConsumeIntegral<int32_t>();
+    producer->requestBuffer(slot, &buffer);
+    producer->setMaxDequeuedBufferCount(mFdp.ConsumeIntegral<int32_t>());
+    producer->setAsyncMode(mFdp.ConsumeBool());
+
+    android::IGraphicBufferProducer::QueueBufferInput input;
+    input.fence = new Fence(memfd_create("ffd", MFD_ALLOW_SEALING));
+    producer->attachBuffer(&slot, buffer);
+    producer->queueBuffer(slot, input, &output);
+
+    int32_t format = mFdp.ConsumeIntegral<int32_t>();
+    uint32_t width = mFdp.ConsumeIntegral<uint32_t>();
+    uint32_t height = mFdp.ConsumeIntegral<uint32_t>();
+    uint64_t usage = mFdp.ConsumeIntegral<uint64_t>();
+    uint64_t outBufferAge;
+    FrameEventHistoryDelta outTimestamps;
+    sp<android::Fence> fence;
+    producer->dequeueBuffer(&slot, &fence, width, height, format, usage, &outBufferAge,
+                            &outTimestamps);
+    producer->detachBuffer(slot);
+    producer->cancelBuffer(slot, fence);
+
+    invokeQuery(producer);
+
+    ManageResourceHandle handle(&mFdp);
+    producer->setSidebandStream(handle.getStream());
+
+    producer->allocateBuffers(width, height, format, usage);
+    producer->allowAllocation(mFdp.ConsumeBool() /*allow*/);
+    producer->setSharedBufferMode(mFdp.ConsumeBool() /*sharedBufferMode*/);
+    producer->setAutoRefresh(mFdp.ConsumeBool() /*autoRefresh*/);
+
+    producer->setGenerationNumber(mFdp.ConsumeIntegral<uint32_t>());
+    producer->setDequeueTimeout(mFdp.ConsumeIntegral<uint32_t>());
+    producer->disconnect(api);
+}
+
+void BufferQueueFuzzer::invokeH2BGraphicBufferV2() {
+    sp<V2_0::utils::H2BGraphicBufferProducer> producer(
+            new V2_0::utils::H2BGraphicBufferProducer(new FakeGraphicBufferProducerV2()));
+    const sp<android::IProducerListener> listener;
+    android::IGraphicBufferProducer::QueueBufferOutput output;
+    uint32_t api = mFdp.ConsumeIntegral<uint32_t>();
+    producer->connect(listener, api, mFdp.ConsumeBool() /*producerControlledByApp*/, &output);
+
+    sp<GraphicBuffer> buffer;
+    int32_t slot = mFdp.ConsumeIntegral<int32_t>();
+    producer->requestBuffer(slot, &buffer);
+    producer->setMaxDequeuedBufferCount(mFdp.ConsumeIntegral<uint32_t>());
+    producer->setAsyncMode(mFdp.ConsumeBool());
+
+    android::IGraphicBufferProducer::QueueBufferInput input;
+    input.fence = new Fence(memfd_create("ffd", MFD_ALLOW_SEALING));
+    producer->attachBuffer(&slot, buffer);
+    producer->queueBuffer(slot, input, &output);
+
+    int32_t format = mFdp.ConsumeIntegral<int32_t>();
+    uint32_t width = mFdp.ConsumeIntegral<uint32_t>();
+    uint32_t height = mFdp.ConsumeIntegral<uint32_t>();
+    uint64_t usage = mFdp.ConsumeIntegral<uint64_t>();
+    uint64_t outBufferAge;
+    FrameEventHistoryDelta outTimestamps;
+    sp<android::Fence> fence;
+    producer->dequeueBuffer(&slot, &fence, width, height, format, usage, &outBufferAge,
+                            &outTimestamps);
+    producer->detachBuffer(slot);
+    producer->cancelBuffer(slot, fence);
+
+    invokeQuery(producer);
+
+    ManageResourceHandle handle(&mFdp);
+    producer->setSidebandStream(handle.getStream());
+
+    producer->allocateBuffers(width, height, format, usage);
+    producer->allowAllocation(mFdp.ConsumeBool() /*allow*/);
+    producer->setSharedBufferMode(mFdp.ConsumeBool() /*sharedBufferMode*/);
+    producer->setAutoRefresh(mFdp.ConsumeBool() /*autoRefresh*/);
+
+    producer->setGenerationNumber(mFdp.ConsumeIntegral<uint32_t>());
+    producer->setDequeueTimeout(mFdp.ConsumeIntegral<uint32_t>());
+    producer->disconnect(api);
+}
+
+void BufferQueueFuzzer::process() {
+    invokeBlastBufferQueue();
+    invokeH2BGraphicBufferV1();
+    invokeH2BGraphicBufferV2();
+    invokeTypes();
+    invokeBufferQueueConsumer();
+    invokeBufferQueueProducer();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    BufferQueueFuzzer bufferQueueFuzzer(data, size);
+    bufferQueueFuzzer.process();
+    return 0;
+}
diff --git a/libs/gui/fuzzer/libgui_consumer_fuzzer.cpp b/libs/gui/fuzzer/libgui_consumer_fuzzer.cpp
new file mode 100644
index 0000000..24a046d
--- /dev/null
+++ b/libs/gui/fuzzer/libgui_consumer_fuzzer.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <gui/BufferQueueConsumer.h>
+#include <gui/BufferQueueCore.h>
+#include <gui/BufferQueueProducer.h>
+#include <gui/GLConsumer.h>
+#include <libgui_fuzzer_utils.h>
+
+using namespace android;
+
+constexpr int32_t kMinBuffer = 0;
+constexpr int32_t kMaxBuffer = 100000;
+
+class ConsumerFuzzer {
+public:
+    ConsumerFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+
+private:
+    FuzzedDataProvider mFdp;
+};
+
+void ConsumerFuzzer::process() {
+    sp<BufferQueueCore> core(new BufferQueueCore());
+    sp<IGraphicBufferConsumer> consumer(new BufferQueueConsumer(core));
+
+    uint64_t maxBuffers = mFdp.ConsumeIntegralInRange<uint64_t>(kMinBuffer, kMaxBuffer);
+    sp<CpuConsumer> cpu(
+            new CpuConsumer(consumer, maxBuffers, mFdp.ConsumeBool() /*controlledByApp*/));
+    CpuConsumer::LockedBuffer lockBuffer;
+    cpu->lockNextBuffer(&lockBuffer);
+    cpu->unlockBuffer(lockBuffer);
+    cpu->abandon();
+
+    uint32_t tex = mFdp.ConsumeIntegral<uint32_t>();
+    sp<GLConsumer> glComsumer(new GLConsumer(consumer, tex, GLConsumer::TEXTURE_EXTERNAL,
+                                             mFdp.ConsumeBool() /*useFenceSync*/,
+                                             mFdp.ConsumeBool() /*isControlledByApp*/));
+    sp<Fence> releaseFence = new Fence(memfd_create("rfd", MFD_ALLOW_SEALING));
+    glComsumer->setReleaseFence(releaseFence);
+    glComsumer->updateTexImage();
+    glComsumer->releaseTexImage();
+
+    sp<GraphicBuffer> buffer =
+            new GraphicBuffer(mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<uint32_t>(),
+                              mFdp.ConsumeIntegral<int32_t>(), mFdp.ConsumeIntegral<uint32_t>(),
+                              mFdp.ConsumeIntegral<uint64_t>());
+    float mtx[16];
+    glComsumer->getTransformMatrix(mtx);
+    glComsumer->computeTransformMatrix(mtx, buffer, getRect(&mFdp),
+                                       mFdp.ConsumeIntegral<uint32_t>(),
+                                       mFdp.ConsumeBool() /*filtering*/);
+    glComsumer->scaleDownCrop(getRect(&mFdp), mFdp.ConsumeIntegral<uint32_t>(),
+                              mFdp.ConsumeIntegral<uint32_t>());
+
+    glComsumer->setDefaultBufferSize(mFdp.ConsumeIntegral<uint32_t>(),
+                                     mFdp.ConsumeIntegral<uint32_t>());
+    glComsumer->setFilteringEnabled(mFdp.ConsumeBool() /*enabled*/);
+
+    glComsumer->setConsumerUsageBits(mFdp.ConsumeIntegral<uint64_t>());
+    glComsumer->attachToContext(tex);
+    glComsumer->abandon();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    ConsumerFuzzer consumerFuzzer(data, size);
+    consumerFuzzer.process();
+    return 0;
+}
diff --git a/libs/gui/fuzzer/libgui_displayEvent_fuzzer.cpp b/libs/gui/fuzzer/libgui_displayEvent_fuzzer.cpp
new file mode 100644
index 0000000..6e4f074
--- /dev/null
+++ b/libs/gui/fuzzer/libgui_displayEvent_fuzzer.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/gui/ISurfaceComposer.h>
+
+#include <libgui_fuzzer_utils.h>
+
+using namespace android;
+
+constexpr gui::ISurfaceComposer::VsyncSource kVsyncSource[] = {
+        gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
+        gui::ISurfaceComposer::VsyncSource::eVsyncSourceSurfaceFlinger,
+};
+
+constexpr gui::ISurfaceComposer::EventRegistration kEventRegistration[] = {
+        gui::ISurfaceComposer::EventRegistration::modeChanged,
+        gui::ISurfaceComposer::EventRegistration::frameRateOverride,
+};
+
+constexpr uint32_t kDisplayEvent[] = {
+        DisplayEventReceiver::DISPLAY_EVENT_NULL,
+        DisplayEventReceiver::DISPLAY_EVENT_VSYNC,
+        DisplayEventReceiver::DISPLAY_EVENT_HOTPLUG,
+        DisplayEventReceiver::DISPLAY_EVENT_MODE_CHANGE,
+        DisplayEventReceiver::DISPLAY_EVENT_FRAME_RATE_OVERRIDE,
+        DisplayEventReceiver::DISPLAY_EVENT_FRAME_RATE_OVERRIDE_FLUSH,
+};
+
+constexpr int32_t kEvents[] = {
+        Looper::EVENT_INPUT,  Looper::EVENT_OUTPUT,  Looper::EVENT_ERROR,
+        Looper::EVENT_HANGUP, Looper::EVENT_INVALID,
+};
+
+DisplayEventReceiver::Event buildDisplayEvent(FuzzedDataProvider* fdp, uint32_t type,
+                                              DisplayEventReceiver::Event event) {
+    switch (type) {
+        case DisplayEventReceiver::DISPLAY_EVENT_VSYNC: {
+            event.vsync.count = fdp->ConsumeIntegral<uint32_t>();
+            event.vsync.vsyncData.frameInterval = fdp->ConsumeIntegral<uint64_t>();
+            event.vsync.vsyncData.preferredFrameTimelineIndex = fdp->ConsumeIntegral<uint32_t>();
+            for (size_t idx = 0; idx < gui::VsyncEventData::kFrameTimelinesCapacity; ++idx) {
+                event.vsync.vsyncData.frameTimelines[idx].vsyncId = fdp->ConsumeIntegral<int64_t>();
+                event.vsync.vsyncData.frameTimelines[idx].deadlineTimestamp =
+                        fdp->ConsumeIntegral<uint64_t>();
+                event.vsync.vsyncData.frameTimelines[idx].expectedPresentationTime =
+                        fdp->ConsumeIntegral<uint64_t>();
+            }
+            break;
+
+        }
+        case DisplayEventReceiver::DISPLAY_EVENT_HOTPLUG: {
+            event.hotplug = DisplayEventReceiver::Event::Hotplug{fdp->ConsumeBool() /*connected*/};
+            break;
+        }
+        case DisplayEventReceiver::DISPLAY_EVENT_MODE_CHANGE: {
+            event.modeChange =
+                    DisplayEventReceiver::Event::ModeChange{fdp->ConsumeIntegral<int32_t>(),
+                                                            fdp->ConsumeIntegral<int64_t>()};
+            break;
+        }
+        case DisplayEventReceiver::DISPLAY_EVENT_FRAME_RATE_OVERRIDE:
+        case DisplayEventReceiver::DISPLAY_EVENT_FRAME_RATE_OVERRIDE_FLUSH: {
+            event.frameRateOverride =
+                    DisplayEventReceiver::Event::FrameRateOverride{fdp->ConsumeIntegral<uint32_t>(),
+                                                                   fdp->ConsumeFloatingPoint<
+                                                                           float>()};
+            break;
+        }
+    }
+    return event;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fdp(data, size);
+    sp<Looper> looper;
+    sp<FakeDisplayEventDispatcher> dispatcher(
+            new FakeDisplayEventDispatcher(looper, fdp.PickValueInArray(kVsyncSource),
+                                           fdp.PickValueInArray(kEventRegistration)));
+
+    dispatcher->initialize();
+    DisplayEventReceiver::Event event;
+    uint32_t type = fdp.PickValueInArray(kDisplayEvent);
+    PhysicalDisplayId displayId;
+    event.header =
+            DisplayEventReceiver::Event::Header{type, displayId, fdp.ConsumeIntegral<int64_t>()};
+    event = buildDisplayEvent(&fdp, type, event);
+
+    dispatcher->injectEvent(event);
+    dispatcher->handleEvent(0, fdp.PickValueInArray(kEvents), nullptr);
+    return 0;
+}
diff --git a/libs/gui/fuzzer/libgui_fuzzer_utils.h b/libs/gui/fuzzer/libgui_fuzzer_utils.h
new file mode 100644
index 0000000..8c003d8
--- /dev/null
+++ b/libs/gui/fuzzer/libgui_fuzzer_utils.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <android/gui/BnRegionSamplingListener.h>
+#include <android/gui/BnSurfaceComposer.h>
+#include <android/gui/BnSurfaceComposerClient.h>
+#include <android/gui/IDisplayEventConnection.h>
+#include <android/gui/ISurfaceComposerClient.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <gmock/gmock.h>
+#include <gui/BLASTBufferQueue.h>
+#include <gui/DisplayEventDispatcher.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/LayerDebugInfo.h>
+#include <gui/LayerState.h>
+#include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
+#include <gui/bufferqueue/2.0/H2BGraphicBufferProducer.h>
+#include <ui/fuzzer/FuzzableDataspaces.h>
+
+namespace android {
+
+constexpr uint32_t kOrientation[] = {
+        ui::Transform::ROT_0,  ui::Transform::FLIP_H,  ui::Transform::FLIP_V,
+        ui::Transform::ROT_90, ui::Transform::ROT_180, ui::Transform::ROT_270,
+};
+
+Rect getRect(FuzzedDataProvider* fdp) {
+    const int32_t left = fdp->ConsumeIntegral<int32_t>();
+    const int32_t top = fdp->ConsumeIntegral<int32_t>();
+    const int32_t right = fdp->ConsumeIntegral<int32_t>();
+    const int32_t bottom = fdp->ConsumeIntegral<int32_t>();
+    return Rect(left, top, right, bottom);
+}
+
+gui::DisplayBrightness getBrightness(FuzzedDataProvider* fdp) {
+    static constexpr float kMinBrightness = 0;
+    static constexpr float kMaxBrightness = 1;
+    gui::DisplayBrightness brightness;
+    brightness.sdrWhitePoint =
+            fdp->ConsumeFloatingPointInRange<float>(kMinBrightness, kMaxBrightness);
+    brightness.sdrWhitePointNits =
+            fdp->ConsumeFloatingPointInRange<float>(kMinBrightness, kMaxBrightness);
+    brightness.displayBrightness =
+            fdp->ConsumeFloatingPointInRange<float>(kMinBrightness, kMaxBrightness);
+    brightness.displayBrightnessNits =
+            fdp->ConsumeFloatingPointInRange<float>(kMinBrightness, kMaxBrightness);
+    return brightness;
+}
+
+class FakeBnSurfaceComposer : public gui::BnSurfaceComposer {
+public:
+    MOCK_METHOD(binder::Status, bootFinished, (), (override));
+    MOCK_METHOD(binder::Status, createDisplayEventConnection,
+                (gui::ISurfaceComposer::VsyncSource, gui::ISurfaceComposer::EventRegistration,
+                 const sp<IBinder>& /*layerHandle*/, sp<gui::IDisplayEventConnection>*),
+                (override));
+    MOCK_METHOD(binder::Status, createConnection, (sp<gui::ISurfaceComposerClient>*), (override));
+    MOCK_METHOD(binder::Status, createDisplay, (const std::string&, bool, float, sp<IBinder>*),
+                (override));
+    MOCK_METHOD(binder::Status, destroyDisplay, (const sp<IBinder>&), (override));
+    MOCK_METHOD(binder::Status, getPhysicalDisplayIds, (std::vector<int64_t>*), (override));
+    MOCK_METHOD(binder::Status, getPhysicalDisplayToken, (int64_t, sp<IBinder>*), (override));
+    MOCK_METHOD(binder::Status, setPowerMode, (const sp<IBinder>&, int), (override));
+    MOCK_METHOD(binder::Status, getSupportedFrameTimestamps, (std::vector<FrameEvent>*),
+                (override));
+    MOCK_METHOD(binder::Status, getDisplayStats, (const sp<IBinder>&, gui::DisplayStatInfo*),
+                (override));
+    MOCK_METHOD(binder::Status, getDisplayState, (const sp<IBinder>&, gui::DisplayState*),
+                (override));
+    MOCK_METHOD(binder::Status, getStaticDisplayInfo, (int64_t, gui::StaticDisplayInfo*),
+                (override));
+    MOCK_METHOD(binder::Status, getDynamicDisplayInfoFromId, (int64_t, gui::DynamicDisplayInfo*),
+                (override));
+    MOCK_METHOD(binder::Status, getDynamicDisplayInfoFromToken,
+                (const sp<IBinder>&, gui::DynamicDisplayInfo*), (override));
+    MOCK_METHOD(binder::Status, getDisplayNativePrimaries,
+                (const sp<IBinder>&, gui::DisplayPrimaries*), (override));
+    MOCK_METHOD(binder::Status, setActiveColorMode, (const sp<IBinder>&, int), (override));
+    MOCK_METHOD(binder::Status, setBootDisplayMode, (const sp<IBinder>&, int), (override));
+    MOCK_METHOD(binder::Status, clearBootDisplayMode, (const sp<IBinder>&), (override));
+    MOCK_METHOD(binder::Status, getBootDisplayModeSupport, (bool*), (override));
+    MOCK_METHOD(binder::Status, getHdrConversionCapabilities,
+                (std::vector<gui::HdrConversionCapability>*), (override));
+    MOCK_METHOD(binder::Status, setHdrConversionStrategy,
+                (const gui::HdrConversionStrategy&, int32_t*), (override));
+    MOCK_METHOD(binder::Status, getHdrOutputConversionSupport, (bool*), (override));
+    MOCK_METHOD(binder::Status, setAutoLowLatencyMode, (const sp<IBinder>&, bool), (override));
+    MOCK_METHOD(binder::Status, setGameContentType, (const sp<IBinder>&, bool), (override));
+    MOCK_METHOD(binder::Status, captureDisplay,
+                (const DisplayCaptureArgs&, const sp<IScreenCaptureListener>&), (override));
+    MOCK_METHOD(binder::Status, captureDisplayById, (int64_t, const sp<IScreenCaptureListener>&),
+                (override));
+    MOCK_METHOD(binder::Status, captureLayers,
+                (const LayerCaptureArgs&, const sp<IScreenCaptureListener>&), (override));
+    MOCK_METHOD(binder::Status, clearAnimationFrameStats, (), (override));
+    MOCK_METHOD(binder::Status, getAnimationFrameStats, (gui::FrameStats*), (override));
+    MOCK_METHOD(binder::Status, overrideHdrTypes, (const sp<IBinder>&, const std::vector<int32_t>&),
+                (override));
+    MOCK_METHOD(binder::Status, onPullAtom, (int32_t, gui::PullAtomData*), (override));
+    MOCK_METHOD(binder::Status, getLayerDebugInfo, (std::vector<gui::LayerDebugInfo>*), (override));
+    MOCK_METHOD(binder::Status, getColorManagement, (bool*), (override));
+    MOCK_METHOD(binder::Status, getCompositionPreference, (gui::CompositionPreference*),
+                (override));
+    MOCK_METHOD(binder::Status, getDisplayedContentSamplingAttributes,
+                (const sp<IBinder>&, gui::ContentSamplingAttributes*), (override));
+    MOCK_METHOD(binder::Status, setDisplayContentSamplingEnabled,
+                (const sp<IBinder>&, bool, int8_t, int64_t), (override));
+    MOCK_METHOD(binder::Status, getDisplayedContentSample,
+                (const sp<IBinder>&, int64_t, int64_t, gui::DisplayedFrameStats*), (override));
+    MOCK_METHOD(binder::Status, getProtectedContentSupport, (bool*), (override));
+    MOCK_METHOD(binder::Status, isWideColorDisplay, (const sp<IBinder>&, bool*), (override));
+    MOCK_METHOD(binder::Status, addRegionSamplingListener,
+                (const gui::ARect&, const sp<IBinder>&, const sp<gui::IRegionSamplingListener>&),
+                (override));
+    MOCK_METHOD(binder::Status, removeRegionSamplingListener,
+                (const sp<gui::IRegionSamplingListener>&), (override));
+    MOCK_METHOD(binder::Status, addFpsListener, (int32_t, const sp<gui::IFpsListener>&),
+                (override));
+    MOCK_METHOD(binder::Status, removeFpsListener, (const sp<gui::IFpsListener>&), (override));
+    MOCK_METHOD(binder::Status, addTunnelModeEnabledListener,
+                (const sp<gui::ITunnelModeEnabledListener>&), (override));
+    MOCK_METHOD(binder::Status, removeTunnelModeEnabledListener,
+                (const sp<gui::ITunnelModeEnabledListener>&), (override));
+    MOCK_METHOD(binder::Status, setDesiredDisplayModeSpecs,
+                (const sp<IBinder>&, const gui::DisplayModeSpecs&), (override));
+    MOCK_METHOD(binder::Status, getDesiredDisplayModeSpecs,
+                (const sp<IBinder>&, gui::DisplayModeSpecs*), (override));
+    MOCK_METHOD(binder::Status, getDisplayBrightnessSupport, (const sp<IBinder>&, bool*),
+                (override));
+    MOCK_METHOD(binder::Status, setDisplayBrightness,
+                (const sp<IBinder>&, const gui::DisplayBrightness&), (override));
+    MOCK_METHOD(binder::Status, addHdrLayerInfoListener,
+                (const sp<IBinder>&, const sp<gui::IHdrLayerInfoListener>&), (override));
+    MOCK_METHOD(binder::Status, removeHdrLayerInfoListener,
+                (const sp<IBinder>&, const sp<gui::IHdrLayerInfoListener>&), (override));
+    MOCK_METHOD(binder::Status, notifyPowerBoost, (int), (override));
+    MOCK_METHOD(binder::Status, setGlobalShadowSettings,
+                (const gui::Color&, const gui::Color&, float, float, float), (override));
+    MOCK_METHOD(binder::Status, getDisplayDecorationSupport,
+                (const sp<IBinder>&, std::optional<gui::DisplayDecorationSupport>*), (override));
+    MOCK_METHOD(binder::Status, setOverrideFrameRate, (int32_t, float), (override));
+    MOCK_METHOD(binder::Status, getGpuContextPriority, (int32_t*), (override));
+    MOCK_METHOD(binder::Status, getMaxAcquiredBufferCount, (int32_t*), (override));
+    MOCK_METHOD(binder::Status, addWindowInfosListener, (const sp<gui::IWindowInfosListener>&),
+                (override));
+    MOCK_METHOD(binder::Status, removeWindowInfosListener, (const sp<gui::IWindowInfosListener>&),
+                (override));
+    MOCK_METHOD(binder::Status, getOverlaySupport, (gui::OverlayProperties*), (override));
+};
+
+class FakeBnSurfaceComposerClient : public gui::BnSurfaceComposerClient {
+public:
+    MOCK_METHOD(binder::Status, createSurface,
+                (const std::string& name, int32_t flags, const sp<IBinder>& parent,
+                 const gui::LayerMetadata& metadata, gui::CreateSurfaceResult* outResult),
+                (override));
+
+    MOCK_METHOD(binder::Status, clearLayerFrameStats, (const sp<IBinder>& handle), (override));
+
+    MOCK_METHOD(binder::Status, getLayerFrameStats,
+                (const sp<IBinder>& handle, gui::FrameStats* outStats), (override));
+
+    MOCK_METHOD(binder::Status, mirrorSurface,
+                (const sp<IBinder>& mirrorFromHandle, gui::CreateSurfaceResult* outResult),
+                (override));
+
+    MOCK_METHOD(binder::Status, mirrorDisplay,
+                (int64_t displayId, gui::CreateSurfaceResult* outResult), (override));
+};
+
+class FakeDisplayEventDispatcher : public DisplayEventDispatcher {
+public:
+    FakeDisplayEventDispatcher(const sp<Looper>& looper,
+                               gui::ISurfaceComposer::VsyncSource vsyncSource,
+                               gui::ISurfaceComposer::EventRegistration eventRegistration)
+          : DisplayEventDispatcher(looper, vsyncSource, eventRegistration){};
+
+    MOCK_METHOD4(dispatchVsync, void(nsecs_t, PhysicalDisplayId, uint32_t, VsyncEventData));
+    MOCK_METHOD3(dispatchHotplug, void(nsecs_t, PhysicalDisplayId, bool));
+    MOCK_METHOD4(dispatchModeChanged, void(nsecs_t, PhysicalDisplayId, int32_t, nsecs_t));
+    MOCK_METHOD2(dispatchNullEvent, void(nsecs_t, PhysicalDisplayId));
+    MOCK_METHOD3(dispatchFrameRateOverrides,
+                 void(nsecs_t, PhysicalDisplayId, std::vector<FrameRateOverride>));
+};
+
+} // namespace android
+
+namespace android::hardware {
+
+namespace graphics::bufferqueue::V1_0::utils {
+
+class FakeGraphicBufferProducerV1 : public HGraphicBufferProducer {
+public:
+    FakeGraphicBufferProducerV1() {
+        ON_CALL(*this, setMaxDequeuedBufferCount).WillByDefault([]() { return 0; });
+        ON_CALL(*this, setAsyncMode).WillByDefault([]() { return 0; });
+        ON_CALL(*this, detachBuffer).WillByDefault([]() { return 0; });
+        ON_CALL(*this, cancelBuffer).WillByDefault([]() { return 0; });
+        ON_CALL(*this, disconnect).WillByDefault([]() { return 0; });
+        ON_CALL(*this, setSidebandStream).WillByDefault([]() { return 0; });
+        ON_CALL(*this, allowAllocation).WillByDefault([]() { return 0; });
+        ON_CALL(*this, setGenerationNumber).WillByDefault([]() { return 0; });
+        ON_CALL(*this, setSharedBufferMode).WillByDefault([]() { return 0; });
+        ON_CALL(*this, setAutoRefresh).WillByDefault([]() { return 0; });
+        ON_CALL(*this, setDequeueTimeout).WillByDefault([]() { return 0; });
+        ON_CALL(*this, setLegacyBufferDrop).WillByDefault([]() { return 0; });
+    };
+    MOCK_METHOD2(requestBuffer, Return<void>(int, requestBuffer_cb));
+    MOCK_METHOD1(setMaxDequeuedBufferCount, Return<int32_t>(int32_t));
+    MOCK_METHOD1(setAsyncMode, Return<int32_t>(bool));
+    MOCK_METHOD6(dequeueBuffer,
+                 Return<void>(uint32_t, uint32_t, graphics::common::V1_0::PixelFormat, uint32_t,
+                              bool, dequeueBuffer_cb));
+    MOCK_METHOD1(detachBuffer, Return<int32_t>(int));
+    MOCK_METHOD1(detachNextBuffer, Return<void>(detachNextBuffer_cb));
+    MOCK_METHOD2(attachBuffer, Return<void>(const media::V1_0::AnwBuffer&, attachBuffer_cb));
+    MOCK_METHOD3(
+            queueBuffer,
+            Return<void>(
+                    int,
+                    const graphics::bufferqueue::V1_0::IGraphicBufferProducer::QueueBufferInput&,
+                    queueBuffer_cb));
+    MOCK_METHOD2(cancelBuffer, Return<int32_t>(int, const hidl_handle&));
+    MOCK_METHOD2(query, Return<void>(int32_t, query_cb));
+    MOCK_METHOD4(connect,
+                 Return<void>(const sp<graphics::bufferqueue::V1_0::IProducerListener>&, int32_t,
+                              bool, connect_cb));
+    MOCK_METHOD2(disconnect,
+                 Return<int32_t>(
+                         int, graphics::bufferqueue::V1_0::IGraphicBufferProducer::DisconnectMode));
+    MOCK_METHOD1(setSidebandStream, Return<int32_t>(const hidl_handle&));
+    MOCK_METHOD4(allocateBuffers,
+                 Return<void>(uint32_t, uint32_t, graphics::common::V1_0::PixelFormat, uint32_t));
+    MOCK_METHOD1(allowAllocation, Return<int32_t>(bool));
+    MOCK_METHOD1(setGenerationNumber, Return<int32_t>(uint32_t));
+    MOCK_METHOD1(getConsumerName, Return<void>(getConsumerName_cb));
+    MOCK_METHOD1(setSharedBufferMode, Return<int32_t>(bool));
+    MOCK_METHOD1(setAutoRefresh, Return<int32_t>(bool));
+    MOCK_METHOD1(setDequeueTimeout, Return<int32_t>(nsecs_t));
+    MOCK_METHOD1(setLegacyBufferDrop, Return<int32_t>(bool));
+    MOCK_METHOD1(getLastQueuedBuffer, Return<void>(getLastQueuedBuffer_cb));
+    MOCK_METHOD1(getFrameTimestamps, Return<void>(getFrameTimestamps_cb));
+    MOCK_METHOD1(getUniqueId, Return<void>(getUniqueId_cb));
+};
+
+}; // namespace graphics::bufferqueue::V1_0::utils
+
+namespace graphics::bufferqueue::V2_0::utils {
+
+class FakeGraphicBufferProducerV2 : public HGraphicBufferProducer {
+public:
+    FakeGraphicBufferProducerV2() {
+        ON_CALL(*this, setMaxDequeuedBufferCount).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, setAsyncMode).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, detachBuffer).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, cancelBuffer).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, disconnect).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, allocateBuffers).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, allowAllocation).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, setGenerationNumber).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, setDequeueTimeout).WillByDefault([]() { return Status::OK; });
+        ON_CALL(*this, getUniqueId).WillByDefault([]() { return 0; });
+    };
+    MOCK_METHOD2(requestBuffer, Return<void>(int, requestBuffer_cb));
+    MOCK_METHOD1(setMaxDequeuedBufferCount, Return<graphics::bufferqueue::V2_0::Status>(int));
+    MOCK_METHOD1(setAsyncMode, Return<graphics::bufferqueue::V2_0::Status>(bool));
+    MOCK_METHOD2(
+            dequeueBuffer,
+            Return<void>(
+                    const graphics::bufferqueue::V2_0::IGraphicBufferProducer::DequeueBufferInput&,
+                    dequeueBuffer_cb));
+    MOCK_METHOD1(detachBuffer, Return<graphics::bufferqueue::V2_0::Status>(int));
+    MOCK_METHOD1(detachNextBuffer, Return<void>(detachNextBuffer_cb));
+    MOCK_METHOD3(attachBuffer,
+                 Return<void>(const graphics::common::V1_2::HardwareBuffer&, uint32_t,
+                              attachBuffer_cb));
+    MOCK_METHOD3(
+            queueBuffer,
+            Return<void>(
+                    int,
+                    const graphics::bufferqueue::V2_0::IGraphicBufferProducer::QueueBufferInput&,
+                    queueBuffer_cb));
+    MOCK_METHOD2(cancelBuffer,
+                 Return<graphics::bufferqueue::V2_0::Status>(int, const hidl_handle&));
+    MOCK_METHOD2(query, Return<void>(int32_t, query_cb));
+    MOCK_METHOD4(connect,
+                 Return<void>(const sp<graphics::bufferqueue::V2_0::IProducerListener>&,
+                              graphics::bufferqueue::V2_0::ConnectionType, bool, connect_cb));
+    MOCK_METHOD1(disconnect,
+                 Return<graphics::bufferqueue::V2_0::Status>(
+                         graphics::bufferqueue::V2_0::ConnectionType));
+    MOCK_METHOD4(allocateBuffers,
+                 Return<graphics::bufferqueue::V2_0::Status>(uint32_t, uint32_t, uint32_t,
+                                                             uint64_t));
+    MOCK_METHOD1(allowAllocation, Return<graphics::bufferqueue::V2_0::Status>(bool));
+    MOCK_METHOD1(setGenerationNumber, Return<graphics::bufferqueue::V2_0::Status>(uint32_t));
+    MOCK_METHOD1(getConsumerName, Return<void>(getConsumerName_cb));
+    MOCK_METHOD1(setDequeueTimeout, Return<graphics::bufferqueue::V2_0::Status>(int64_t));
+    MOCK_METHOD0(getUniqueId, Return<uint64_t>());
+};
+
+}; // namespace graphics::bufferqueue::V2_0::utils
+}; // namespace android::hardware
diff --git a/libs/gui/fuzzer/libgui_parcelable_fuzzer.cpp b/libs/gui/fuzzer/libgui_parcelable_fuzzer.cpp
new file mode 100644
index 0000000..9f0f6ca
--- /dev/null
+++ b/libs/gui/fuzzer/libgui_parcelable_fuzzer.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <gui/BufferQueueConsumer.h>
+#include <gui/BufferQueueCore.h>
+#include <gui/BufferQueueProducer.h>
+#include <gui/LayerMetadata.h>
+#include <gui/OccupancyTracker.h>
+#include <gui/StreamSplitter.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceControl.h>
+#include <gui/view/Surface.h>
+#include <libgui_fuzzer_utils.h>
+#include "android/view/LayerMetadataKey.h"
+
+using namespace android;
+
+constexpr int32_t kMaxBytes = 256;
+constexpr int32_t kMatrixSize = 4;
+constexpr int32_t kLayerMetadataKeyCount = 8;
+
+constexpr uint32_t kMetadataKey[] = {
+        (uint32_t)view::LayerMetadataKey::METADATA_OWNER_UID,
+        (uint32_t)view::LayerMetadataKey::METADATA_WINDOW_TYPE,
+        (uint32_t)view::LayerMetadataKey::METADATA_TASK_ID,
+        (uint32_t)view::LayerMetadataKey::METADATA_MOUSE_CURSOR,
+        (uint32_t)view::LayerMetadataKey::METADATA_ACCESSIBILITY_ID,
+        (uint32_t)view::LayerMetadataKey::METADATA_OWNER_PID,
+        (uint32_t)view::LayerMetadataKey::METADATA_DEQUEUE_TIME,
+        (uint32_t)view::LayerMetadataKey::METADATA_GAME_MODE,
+};
+
+class ParcelableFuzzer {
+public:
+    ParcelableFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+
+private:
+    void invokeStreamSplitter();
+    void invokeOccupancyTracker();
+    void invokeLayerDebugInfo();
+    void invokeLayerMetadata();
+    void invokeViewSurface();
+
+    FuzzedDataProvider mFdp;
+};
+
+void ParcelableFuzzer::invokeViewSurface() {
+    view::Surface surface;
+    surface.name = String16((mFdp.ConsumeRandomLengthString(kMaxBytes)).c_str());
+    Parcel parcel;
+    surface.writeToParcel(&parcel);
+    parcel.setDataPosition(0);
+    surface.readFromParcel(&parcel);
+    bool nameAlreadyWritten = mFdp.ConsumeBool();
+    surface.writeToParcel(&parcel, nameAlreadyWritten);
+    parcel.setDataPosition(0);
+    surface.readFromParcel(&parcel, mFdp.ConsumeBool());
+}
+
+void ParcelableFuzzer::invokeLayerMetadata() {
+    std::unordered_map<uint32_t, std::vector<uint8_t>> map;
+    for (size_t idx = 0; idx < kLayerMetadataKeyCount; ++idx) {
+        std::vector<uint8_t> data;
+        for (size_t idx1 = 0; idx1 < mFdp.ConsumeIntegral<uint32_t>(); ++idx1) {
+            data.push_back(mFdp.ConsumeIntegral<uint8_t>());
+        }
+        map[kMetadataKey[idx]] = data;
+    }
+    LayerMetadata metadata(map);
+    uint32_t key = mFdp.PickValueInArray(kMetadataKey);
+    metadata.setInt32(key, mFdp.ConsumeIntegral<int32_t>());
+    metadata.itemToString(key, (mFdp.ConsumeRandomLengthString(kMaxBytes)).c_str());
+
+    Parcel parcel;
+    metadata.writeToParcel(&parcel);
+    parcel.setDataPosition(0);
+    metadata.readFromParcel(&parcel);
+}
+
+void ParcelableFuzzer::invokeLayerDebugInfo() {
+    gui::LayerDebugInfo info;
+    info.mName = mFdp.ConsumeRandomLengthString(kMaxBytes);
+    info.mParentName = mFdp.ConsumeRandomLengthString(kMaxBytes);
+    info.mType = mFdp.ConsumeRandomLengthString(kMaxBytes);
+    info.mLayerStack = mFdp.ConsumeIntegral<uint32_t>();
+    info.mX = mFdp.ConsumeFloatingPoint<float>();
+    info.mY = mFdp.ConsumeFloatingPoint<float>();
+    info.mZ = mFdp.ConsumeIntegral<uint32_t>();
+    info.mWidth = mFdp.ConsumeIntegral<int32_t>();
+    info.mHeight = mFdp.ConsumeIntegral<int32_t>();
+    info.mActiveBufferWidth = mFdp.ConsumeIntegral<int32_t>();
+    info.mActiveBufferHeight = mFdp.ConsumeIntegral<int32_t>();
+    info.mActiveBufferStride = mFdp.ConsumeIntegral<int32_t>();
+    info.mActiveBufferFormat = mFdp.ConsumeIntegral<int32_t>();
+    info.mNumQueuedFrames = mFdp.ConsumeIntegral<int32_t>();
+
+    info.mFlags = mFdp.ConsumeIntegral<uint32_t>();
+    info.mPixelFormat = mFdp.ConsumeIntegral<int32_t>();
+    info.mTransparentRegion = Region(getRect(&mFdp));
+    info.mVisibleRegion = Region(getRect(&mFdp));
+    info.mSurfaceDamageRegion = Region(getRect(&mFdp));
+    info.mCrop = getRect(&mFdp);
+    info.mDataSpace = static_cast<android_dataspace>(mFdp.PickValueInArray(kDataspaces));
+    info.mColor = half4(mFdp.ConsumeFloatingPoint<float>(), mFdp.ConsumeFloatingPoint<float>(),
+                        mFdp.ConsumeFloatingPoint<float>(), mFdp.ConsumeFloatingPoint<float>());
+    for (size_t idx = 0; idx < kMatrixSize; ++idx) {
+        info.mMatrix[idx / 2][idx % 2] = mFdp.ConsumeFloatingPoint<float>();
+    }
+    info.mIsOpaque = mFdp.ConsumeBool();
+    info.mContentDirty = mFdp.ConsumeBool();
+    info.mStretchEffect.width = mFdp.ConsumeFloatingPoint<float>();
+    info.mStretchEffect.height = mFdp.ConsumeFloatingPoint<float>();
+    info.mStretchEffect.vectorX = mFdp.ConsumeFloatingPoint<float>();
+    info.mStretchEffect.vectorY = mFdp.ConsumeFloatingPoint<float>();
+    info.mStretchEffect.maxAmountX = mFdp.ConsumeFloatingPoint<float>();
+    info.mStretchEffect.maxAmountY = mFdp.ConsumeFloatingPoint<float>();
+    info.mStretchEffect.mappedChildBounds =
+            FloatRect(mFdp.ConsumeFloatingPoint<float>(), mFdp.ConsumeFloatingPoint<float>(),
+                      mFdp.ConsumeFloatingPoint<float>(), mFdp.ConsumeFloatingPoint<float>());
+
+    Parcel parcel;
+    info.writeToParcel(&parcel);
+    parcel.setDataPosition(0);
+    info.readFromParcel(&parcel);
+}
+
+void ParcelableFuzzer::invokeOccupancyTracker() {
+    nsecs_t totalTime = mFdp.ConsumeIntegral<uint32_t>();
+    size_t numFrames = mFdp.ConsumeIntegral<size_t>();
+    float occupancyAverage = mFdp.ConsumeFloatingPoint<float>();
+    OccupancyTracker::Segment segment(totalTime, numFrames, occupancyAverage,
+                                      mFdp.ConsumeBool() /*usedThirdBuffer*/);
+    Parcel parcel;
+    segment.writeToParcel(&parcel);
+    parcel.setDataPosition(0);
+    segment.readFromParcel(&parcel);
+}
+
+void ParcelableFuzzer::invokeStreamSplitter() {
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    sp<StreamSplitter> splitter;
+    StreamSplitter::createSplitter(consumer, &splitter);
+    splitter->addOutput(producer);
+    std::string name = mFdp.ConsumeRandomLengthString(kMaxBytes);
+    splitter->setName(String8(name.c_str()));
+}
+
+void ParcelableFuzzer::process() {
+    invokeStreamSplitter();
+    invokeOccupancyTracker();
+    invokeLayerDebugInfo();
+    invokeLayerMetadata();
+    invokeViewSurface();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    ParcelableFuzzer libGuiFuzzer(data, size);
+    libGuiFuzzer.process();
+    return 0;
+}
diff --git a/libs/gui/fuzzer/libgui_surfaceComposerClient_fuzzer.cpp b/libs/gui/fuzzer/libgui_surfaceComposerClient_fuzzer.cpp
new file mode 100644
index 0000000..57720dd
--- /dev/null
+++ b/libs/gui/fuzzer/libgui_surfaceComposerClient_fuzzer.cpp
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <android/hardware/power/Boost.h>
+#include <fuzzbinder/libbinder_driver.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <libgui_fuzzer_utils.h>
+#include "android-base/stringprintf.h"
+
+using namespace android;
+
+constexpr int32_t kRandomStringMaxBytes = 256;
+
+constexpr ui::ColorMode kColormodes[] = {ui::ColorMode::NATIVE,
+                                         ui::ColorMode::STANDARD_BT601_625,
+                                         ui::ColorMode::STANDARD_BT601_625_UNADJUSTED,
+                                         ui::ColorMode::STANDARD_BT601_525,
+                                         ui::ColorMode::STANDARD_BT601_525_UNADJUSTED,
+                                         ui::ColorMode::STANDARD_BT709,
+                                         ui::ColorMode::DCI_P3,
+                                         ui::ColorMode::SRGB,
+                                         ui::ColorMode::ADOBE_RGB,
+                                         ui::ColorMode::DISPLAY_P3,
+                                         ui::ColorMode::BT2020,
+                                         ui::ColorMode::BT2100_PQ,
+                                         ui::ColorMode::BT2100_HLG,
+                                         ui::ColorMode::DISPLAY_BT2020};
+
+constexpr hardware::power::Boost kBoost[] = {
+        hardware::power::Boost::INTERACTION,   hardware::power::Boost::DISPLAY_UPDATE_IMMINENT,
+        hardware::power::Boost::ML_ACC,        hardware::power::Boost::AUDIO_LAUNCH,
+        hardware::power::Boost::CAMERA_LAUNCH, hardware::power::Boost::CAMERA_SHOT,
+};
+
+constexpr gui::TouchOcclusionMode kMode[] = {
+        gui::TouchOcclusionMode::BLOCK_UNTRUSTED,
+        gui::TouchOcclusionMode::USE_OPACITY,
+        gui::TouchOcclusionMode::ALLOW,
+};
+
+constexpr gui::WindowInfo::Flag kFlags[] = {
+        gui::WindowInfo::Flag::ALLOW_LOCK_WHILE_SCREEN_ON,
+        gui::WindowInfo::Flag::DIM_BEHIND,
+        gui::WindowInfo::Flag::BLUR_BEHIND,
+        gui::WindowInfo::Flag::NOT_FOCUSABLE,
+        gui::WindowInfo::Flag::NOT_TOUCHABLE,
+        gui::WindowInfo::Flag::NOT_TOUCH_MODAL,
+        gui::WindowInfo::Flag::TOUCHABLE_WHEN_WAKING,
+        gui::WindowInfo::Flag::KEEP_SCREEN_ON,
+        gui::WindowInfo::Flag::LAYOUT_IN_SCREEN,
+        gui::WindowInfo::Flag::LAYOUT_NO_LIMITS,
+        gui::WindowInfo::Flag::FULLSCREEN,
+        gui::WindowInfo::Flag::FORCE_NOT_FULLSCREEN,
+        gui::WindowInfo::Flag::DITHER,
+        gui::WindowInfo::Flag::SECURE,
+        gui::WindowInfo::Flag::SCALED,
+        gui::WindowInfo::Flag::IGNORE_CHEEK_PRESSES,
+        gui::WindowInfo::Flag::LAYOUT_INSET_DECOR,
+        gui::WindowInfo::Flag::ALT_FOCUSABLE_IM,
+        gui::WindowInfo::Flag::WATCH_OUTSIDE_TOUCH,
+        gui::WindowInfo::Flag::SHOW_WHEN_LOCKED,
+        gui::WindowInfo::Flag::SHOW_WALLPAPER,
+        gui::WindowInfo::Flag::TURN_SCREEN_ON,
+        gui::WindowInfo::Flag::DISMISS_KEYGUARD,
+        gui::WindowInfo::Flag::SPLIT_TOUCH,
+        gui::WindowInfo::Flag::HARDWARE_ACCELERATED,
+        gui::WindowInfo::Flag::LAYOUT_IN_OVERSCAN,
+        gui::WindowInfo::Flag::TRANSLUCENT_STATUS,
+        gui::WindowInfo::Flag::TRANSLUCENT_NAVIGATION,
+        gui::WindowInfo::Flag::LOCAL_FOCUS_MODE,
+        gui::WindowInfo::Flag::SLIPPERY,
+        gui::WindowInfo::Flag::LAYOUT_ATTACHED_IN_DECOR,
+        gui::WindowInfo::Flag::DRAWS_SYSTEM_BAR_BACKGROUNDS,
+};
+
+constexpr gui::WindowInfo::Type kType[] = {
+        gui::WindowInfo::Type::UNKNOWN,
+        gui::WindowInfo::Type::FIRST_APPLICATION_WINDOW,
+        gui::WindowInfo::Type::BASE_APPLICATION,
+        gui::WindowInfo::Type::APPLICATION,
+        gui::WindowInfo::Type::APPLICATION_STARTING,
+        gui::WindowInfo::Type::LAST_APPLICATION_WINDOW,
+        gui::WindowInfo::Type::FIRST_SUB_WINDOW,
+        gui::WindowInfo::Type::APPLICATION_PANEL,
+        gui::WindowInfo::Type::APPLICATION_MEDIA,
+        gui::WindowInfo::Type::APPLICATION_SUB_PANEL,
+        gui::WindowInfo::Type::APPLICATION_ATTACHED_DIALOG,
+        gui::WindowInfo::Type::APPLICATION_MEDIA_OVERLAY,
+};
+
+constexpr gui::WindowInfo::InputConfig kFeatures[] = {
+        gui::WindowInfo::InputConfig::NO_INPUT_CHANNEL,
+        gui::WindowInfo::InputConfig::DISABLE_USER_ACTIVITY,
+        gui::WindowInfo::InputConfig::DROP_INPUT,
+        gui::WindowInfo::InputConfig::DROP_INPUT_IF_OBSCURED,
+        gui::WindowInfo::InputConfig::SPY,
+        gui::WindowInfo::InputConfig::INTERCEPTS_STYLUS,
+};
+
+class SurfaceComposerClientFuzzer {
+public:
+    SurfaceComposerClientFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+
+private:
+    void invokeSurfaceComposerClient();
+    void invokeSurfaceComposerClientBinder();
+    void invokeSurfaceComposerTransaction();
+    void getWindowInfo(gui::WindowInfo*);
+    sp<SurfaceControl> makeSurfaceControl();
+    BlurRegion getBlurRegion();
+    void fuzzOnPullAtom();
+    gui::DisplayModeSpecs getDisplayModeSpecs();
+
+    FuzzedDataProvider mFdp;
+};
+
+gui::DisplayModeSpecs SurfaceComposerClientFuzzer::getDisplayModeSpecs() {
+    const auto getRefreshRateRange = [&] {
+        gui::DisplayModeSpecs::RefreshRateRanges::RefreshRateRange range;
+        range.min = mFdp.ConsumeFloatingPoint<float>();
+        range.max = mFdp.ConsumeFloatingPoint<float>();
+        return range;
+    };
+
+    const auto getRefreshRateRanges = [&] {
+        gui::DisplayModeSpecs::RefreshRateRanges ranges;
+        ranges.physical = getRefreshRateRange();
+        ranges.render = getRefreshRateRange();
+        return ranges;
+    };
+
+    String8 displayName((mFdp.ConsumeRandomLengthString(kRandomStringMaxBytes)).c_str());
+    sp<IBinder> displayToken =
+            SurfaceComposerClient::createDisplay(displayName, mFdp.ConsumeBool() /*secure*/);
+    gui::DisplayModeSpecs specs;
+    specs.defaultMode = mFdp.ConsumeIntegral<int32_t>();
+    specs.allowGroupSwitching = mFdp.ConsumeBool();
+    specs.primaryRanges = getRefreshRateRanges();
+    specs.appRequestRanges = getRefreshRateRanges();
+    return specs;
+}
+
+BlurRegion SurfaceComposerClientFuzzer::getBlurRegion() {
+    int32_t left = mFdp.ConsumeIntegral<int32_t>();
+    int32_t right = mFdp.ConsumeIntegral<int32_t>();
+    int32_t top = mFdp.ConsumeIntegral<int32_t>();
+    int32_t bottom = mFdp.ConsumeIntegral<int32_t>();
+    uint32_t blurRadius = mFdp.ConsumeIntegral<uint32_t>();
+    float alpha = mFdp.ConsumeFloatingPoint<float>();
+    float cornerRadiusTL = mFdp.ConsumeFloatingPoint<float>();
+    float cornerRadiusTR = mFdp.ConsumeFloatingPoint<float>();
+    float cornerRadiusBL = mFdp.ConsumeFloatingPoint<float>();
+    float cornerRadiusBR = mFdp.ConsumeFloatingPoint<float>();
+    return BlurRegion{blurRadius,     cornerRadiusTL, cornerRadiusTR, cornerRadiusBL,
+                      cornerRadiusBR, alpha,          left,           top,
+                      right,          bottom};
+}
+
+void SurfaceComposerClientFuzzer::getWindowInfo(gui::WindowInfo* windowInfo) {
+    windowInfo->id = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->name = mFdp.ConsumeRandomLengthString(kRandomStringMaxBytes);
+    windowInfo->layoutParamsFlags = mFdp.PickValueInArray(kFlags);
+    windowInfo->layoutParamsType = mFdp.PickValueInArray(kType);
+    windowInfo->frameLeft = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->frameTop = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->frameRight = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->frameBottom = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->surfaceInset = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->alpha = mFdp.ConsumeFloatingPointInRange<float>(0, 1);
+    ui::Transform transform(mFdp.PickValueInArray(kOrientation));
+    windowInfo->transform = transform;
+    windowInfo->touchableRegion = Region(getRect(&mFdp));
+    windowInfo->replaceTouchableRegionWithCrop = mFdp.ConsumeBool();
+    windowInfo->touchOcclusionMode = mFdp.PickValueInArray(kMode);
+    windowInfo->ownerPid = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->ownerUid = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo->packageName = mFdp.ConsumeRandomLengthString(kRandomStringMaxBytes);
+    windowInfo->inputConfig = mFdp.PickValueInArray(kFeatures);
+}
+
+sp<SurfaceControl> SurfaceComposerClientFuzzer::makeSurfaceControl() {
+    sp<IBinder> handle;
+    const sp<FakeBnSurfaceComposerClient> testClient(new FakeBnSurfaceComposerClient());
+    sp<SurfaceComposerClient> client = new SurfaceComposerClient(testClient);
+    sp<BnGraphicBufferProducer> producer;
+    uint32_t width = mFdp.ConsumeIntegral<uint32_t>();
+    uint32_t height = mFdp.ConsumeIntegral<uint32_t>();
+    uint32_t transformHint = mFdp.ConsumeIntegral<uint32_t>();
+    uint32_t flags = mFdp.ConsumeIntegral<uint32_t>();
+    int32_t format = mFdp.ConsumeIntegral<int32_t>();
+    int32_t layerId = mFdp.ConsumeIntegral<int32_t>();
+    std::string layerName = base::StringPrintf("#%d", layerId);
+    return new SurfaceControl(client, handle, layerId, layerName, width, height, format,
+                              transformHint, flags);
+}
+
+void SurfaceComposerClientFuzzer::invokeSurfaceComposerTransaction() {
+    sp<SurfaceControl> surface = makeSurfaceControl();
+
+    SurfaceComposerClient::Transaction transaction;
+    int32_t layer = mFdp.ConsumeIntegral<int32_t>();
+    transaction.setLayer(surface, layer);
+
+    sp<SurfaceControl> relativeSurface = makeSurfaceControl();
+    transaction.setRelativeLayer(surface, relativeSurface, layer);
+
+    Region transparentRegion(getRect(&mFdp));
+    transaction.setTransparentRegionHint(surface, transparentRegion);
+    transaction.setAlpha(surface, mFdp.ConsumeFloatingPoint<float>());
+
+    transaction.setCornerRadius(surface, mFdp.ConsumeFloatingPoint<float>());
+    transaction.setBackgroundBlurRadius(surface, mFdp.ConsumeFloatingPoint<float>());
+    std::vector<BlurRegion> regions;
+    uint32_t vectorSize = mFdp.ConsumeIntegralInRange<uint32_t>(0, 100);
+    regions.resize(vectorSize);
+    for (size_t idx = 0; idx < vectorSize; ++idx) {
+        regions.push_back(getBlurRegion());
+    }
+    transaction.setBlurRegions(surface, regions);
+
+    transaction.setLayerStack(surface, {mFdp.ConsumeIntegral<uint32_t>()});
+    half3 color = {mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<uint32_t>(),
+                   mFdp.ConsumeIntegral<uint32_t>()};
+    transaction.setColor(surface, color);
+    transaction.setBackgroundColor(surface, color, mFdp.ConsumeFloatingPoint<float>(),
+                                   mFdp.PickValueInArray(kDataspaces));
+
+    transaction.setApi(surface, mFdp.ConsumeIntegral<int32_t>());
+    transaction.setFrameRateSelectionPriority(surface, mFdp.ConsumeIntegral<int32_t>());
+    transaction.setColorSpaceAgnostic(surface, mFdp.ConsumeBool() /*agnostic*/);
+
+    gui::WindowInfo windowInfo;
+    getWindowInfo(&windowInfo);
+    transaction.setInputWindowInfo(surface, windowInfo);
+    Parcel windowParcel;
+    windowInfo.writeToParcel(&windowParcel);
+    windowParcel.setDataPosition(0);
+    windowInfo.readFromParcel(&windowParcel);
+
+    windowInfo.addTouchableRegion(getRect(&mFdp));
+    int32_t pointX = mFdp.ConsumeIntegral<int32_t>();
+    int32_t pointY = mFdp.ConsumeIntegral<int32_t>();
+    windowInfo.touchableRegionContainsPoint(pointX, pointY);
+    windowInfo.frameContainsPoint(pointX, pointY);
+
+    Parcel transactionParcel;
+    transaction.writeToParcel(&transactionParcel);
+    transactionParcel.setDataPosition(0);
+    transaction.readFromParcel(&transactionParcel);
+    SurfaceComposerClient::Transaction::createFromParcel(&transactionParcel);
+}
+
+void SurfaceComposerClientFuzzer::fuzzOnPullAtom() {
+    std::string outData;
+    bool success;
+    SurfaceComposerClient::onPullAtom(mFdp.ConsumeIntegral<int32_t>(), &outData, &success);
+}
+
+void SurfaceComposerClientFuzzer::invokeSurfaceComposerClient() {
+    String8 displayName((mFdp.ConsumeRandomLengthString(kRandomStringMaxBytes)).c_str());
+    sp<IBinder> displayToken =
+            SurfaceComposerClient::createDisplay(displayName, mFdp.ConsumeBool() /*secure*/);
+    SurfaceComposerClient::setDesiredDisplayModeSpecs(displayToken, getDisplayModeSpecs());
+
+    ui::ColorMode colorMode = mFdp.PickValueInArray(kColormodes);
+    SurfaceComposerClient::setActiveColorMode(displayToken, colorMode);
+    SurfaceComposerClient::setAutoLowLatencyMode(displayToken, mFdp.ConsumeBool() /*on*/);
+    SurfaceComposerClient::setGameContentType(displayToken, mFdp.ConsumeBool() /*on*/);
+    SurfaceComposerClient::setDisplayPowerMode(displayToken, mFdp.ConsumeIntegral<int32_t>());
+    SurfaceComposerClient::doUncacheBufferTransaction(mFdp.ConsumeIntegral<uint64_t>());
+
+    SurfaceComposerClient::setDisplayBrightness(displayToken, getBrightness(&mFdp));
+    hardware::power::Boost boostId = mFdp.PickValueInArray(kBoost);
+    SurfaceComposerClient::notifyPowerBoost((int32_t)boostId);
+
+    String8 surfaceName((mFdp.ConsumeRandomLengthString(kRandomStringMaxBytes)).c_str());
+    sp<BBinder> handle(new BBinder());
+    sp<BnGraphicBufferProducer> producer;
+    sp<Surface> surfaceParent(
+            new Surface(producer, mFdp.ConsumeBool() /*controlledByApp*/, handle));
+
+    fuzzOnPullAtom();
+    SurfaceComposerClient::setDisplayContentSamplingEnabled(displayToken,
+                                                            mFdp.ConsumeBool() /*enable*/,
+                                                            mFdp.ConsumeIntegral<uint8_t>(),
+                                                            mFdp.ConsumeIntegral<uint64_t>());
+
+    sp<IBinder> stopLayerHandle;
+    sp<gui::IRegionSamplingListener> listener = sp<gui::IRegionSamplingListenerDefault>::make();
+    sp<gui::IRegionSamplingListenerDelegator> sampleListener =
+            new gui::IRegionSamplingListenerDelegator(listener);
+    SurfaceComposerClient::addRegionSamplingListener(getRect(&mFdp), stopLayerHandle,
+                                                     sampleListener);
+    sp<gui::IFpsListenerDefault> fpsListener;
+    SurfaceComposerClient::addFpsListener(mFdp.ConsumeIntegral<int32_t>(), fpsListener);
+}
+
+void SurfaceComposerClientFuzzer::invokeSurfaceComposerClientBinder() {
+    sp<FakeBnSurfaceComposerClient> client(new FakeBnSurfaceComposerClient());
+    fuzzService(client.get(), std::move(mFdp));
+}
+
+void SurfaceComposerClientFuzzer::process() {
+    invokeSurfaceComposerClient();
+    invokeSurfaceComposerTransaction();
+    invokeSurfaceComposerClientBinder();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    SurfaceComposerClientFuzzer surfaceComposerClientFuzzer(data, size);
+    surfaceComposerClientFuzzer.process();
+    return 0;
+}
diff --git a/libs/gui/fuzzer/libgui_surfaceComposer_fuzzer.cpp b/libs/gui/fuzzer/libgui_surfaceComposer_fuzzer.cpp
new file mode 100644
index 0000000..6d5427b
--- /dev/null
+++ b/libs/gui/fuzzer/libgui_surfaceComposer_fuzzer.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fuzzbinder/libbinder_driver.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <libgui_fuzzer_utils.h>
+
+using namespace android;
+
+class SurfaceComposerFuzzer {
+public:
+    SurfaceComposerFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+
+private:
+    FuzzedDataProvider mFdp;
+};
+
+void SurfaceComposerFuzzer::process() {
+    sp<FakeBnSurfaceComposer> composer(new FakeBnSurfaceComposer());
+    fuzzService(composer.get(), std::move(mFdp));
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    SurfaceComposerFuzzer surfaceComposerFuzzer(data, size);
+    surfaceComposerFuzzer.process();
+    return 0;
+}
diff --git a/libs/gui/include/gui/AidlStatusUtil.h b/libs/gui/include/gui/AidlStatusUtil.h
new file mode 100644
index 0000000..55be27b
--- /dev/null
+++ b/libs/gui/include/gui/AidlStatusUtil.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <binder/Status.h>
+
+// Extracted from frameworks/av/media/libaudioclient/include/media/AidlConversionUtil.h
+namespace android::gui::aidl_utils {
+
+/**
+ * Return the equivalent Android status_t from a binder exception code.
+ *
+ * Generally one should use statusTFromBinderStatus() instead.
+ *
+ * Exception codes can be generated from a remote Java service exception, translate
+ * them for use on the Native side.
+ *
+ * Note: for EX_TRANSACTION_FAILED and EX_SERVICE_SPECIFIC a more detailed error code
+ * can be found from transactionError() or serviceSpecificErrorCode().
+ */
+static inline status_t statusTFromExceptionCode(int32_t exceptionCode) {
+    using namespace ::android::binder;
+    switch (exceptionCode) {
+        case Status::EX_NONE:
+            return OK;
+        case Status::EX_SECURITY: // Java SecurityException, rethrows locally in Java
+            return PERMISSION_DENIED;
+        case Status::EX_BAD_PARCELABLE:   // Java BadParcelableException, rethrows in Java
+        case Status::EX_ILLEGAL_ARGUMENT: // Java IllegalArgumentException, rethrows in Java
+        case Status::EX_NULL_POINTER:     // Java NullPointerException, rethrows in Java
+            return BAD_VALUE;
+        case Status::EX_ILLEGAL_STATE:         // Java IllegalStateException, rethrows in Java
+        case Status::EX_UNSUPPORTED_OPERATION: // Java UnsupportedOperationException, rethrows
+            return INVALID_OPERATION;
+        case Status::EX_HAS_REPLY_HEADER: // Native strictmode violation
+        case Status::EX_PARCELABLE: // Java bootclass loader (not standard exception), rethrows
+        case Status::EX_NETWORK_MAIN_THREAD: // Java NetworkOnMainThreadException, rethrows
+        case Status::EX_TRANSACTION_FAILED:  // Native - see error code
+        case Status::EX_SERVICE_SPECIFIC:    // Java ServiceSpecificException,
+                                             // rethrows in Java with integer error code
+            return UNKNOWN_ERROR;
+    }
+    return UNKNOWN_ERROR;
+}
+
+/**
+ * Return the equivalent Android status_t from a binder status.
+ *
+ * Used to handle errors from a AIDL method declaration
+ *
+ * [oneway] void method(type0 param0, ...)
+ *
+ * or the following (where return_type is not a status_t)
+ *
+ * return_type method(type0 param0, ...)
+ */
+static inline status_t statusTFromBinderStatus(const ::android::binder::Status &status) {
+    return status.isOk() ? OK // check OK,
+        : status.serviceSpecificErrorCode() // service-side error, not standard Java exception
+                                            // (fromServiceSpecificError)
+        ?: status.transactionError() // a native binder transaction error (fromStatusT)
+        ?: statusTFromExceptionCode(status.exceptionCode()); // a service-side error with a
+                                                    // standard Java exception (fromExceptionCode)
+}
+
+/**
+ * Return a binder::Status from native service status.
+ *
+ * This is used for methods not returning an explicit status_t,
+ * where Java callers expect an exception, not an integer return value.
+ */
+static inline ::android::binder::Status binderStatusFromStatusT(
+        status_t status, const char *optionalMessage = nullptr) {
+    const char *const emptyIfNull = optionalMessage == nullptr ? "" : optionalMessage;
+    // From binder::Status instructions:
+    //  Prefer a generic exception code when possible, then a service specific
+    //  code, and finally a status_t for low level failures or legacy support.
+    //  Exception codes and service specific errors map to nicer exceptions for
+    //  Java clients.
+
+    using namespace ::android::binder;
+    switch (status) {
+        case OK:
+            return Status::ok();
+        case PERMISSION_DENIED: // throw SecurityException on Java side
+            return Status::fromExceptionCode(Status::EX_SECURITY, emptyIfNull);
+        case BAD_VALUE: // throw IllegalArgumentException on Java side
+            return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT, emptyIfNull);
+        case INVALID_OPERATION: // throw IllegalStateException on Java side
+            return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE, emptyIfNull);
+    }
+
+    // A service specific error will not show on status.transactionError() so
+    // be sure to use statusTFromBinderStatus() for reliable error handling.
+
+    // throw a ServiceSpecificException.
+    return Status::fromServiceSpecificError(status, emptyIfNull);
+}
+
+} // namespace android::gui::aidl_utils
diff --git a/libs/gui/include/gui/BLASTBufferQueue.h b/libs/gui/include/gui/BLASTBufferQueue.h
index 40ffea6..a49a859 100644
--- a/libs/gui/include/gui/BLASTBufferQueue.h
+++ b/libs/gui/include/gui/BLASTBufferQueue.h
@@ -44,25 +44,25 @@
             mCurrentlyConnected(false),
             mPreviouslyConnected(false) {}
 
-    void onDisconnect() override;
+    void onDisconnect() override EXCLUDES(mMutex);
     void addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
-                                  FrameEventHistoryDelta* outDelta) override REQUIRES(mMutex);
+                                  FrameEventHistoryDelta* outDelta) override EXCLUDES(mMutex);
     void updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
                                const sp<Fence>& gpuCompositionDoneFence,
                                const sp<Fence>& presentFence, const sp<Fence>& prevReleaseFence,
                                CompositorTiming compositorTiming, nsecs_t latchTime,
-                               nsecs_t dequeueReadyTime) REQUIRES(mMutex);
-    void getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect);
+                               nsecs_t dequeueReadyTime) EXCLUDES(mMutex);
+    void getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) EXCLUDES(mMutex);
 
     void resizeFrameEventHistory(size_t newSize);
 
 protected:
-    void onSidebandStreamChanged() override REQUIRES(mMutex);
+    void onSidebandStreamChanged() override EXCLUDES(mMutex);
 
 private:
     const wp<BLASTBufferQueue> mBLASTBufferQueue;
 
-    uint64_t mCurrentFrameNumber = 0;
+    uint64_t mCurrentFrameNumber GUARDED_BY(mMutex) = 0;
 
     Mutex mMutex;
     ConsumerFrameEventHistory mFrameEventHistory GUARDED_BY(mMutex);
@@ -71,9 +71,7 @@
     bool mPreviouslyConnected GUARDED_BY(mMutex);
 };
 
-class BLASTBufferQueue
-    : public ConsumerBase::FrameAvailableListener, public BufferItemConsumer::BufferFreedListener
-{
+class BLASTBufferQueue : public ConsumerBase::FrameAvailableListener {
 public:
     BLASTBufferQueue(const std::string& name, bool updateDestinationFrame = true);
     BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface, int width,
@@ -85,7 +83,6 @@
     sp<Surface> getSurface(bool includeSurfaceControlHandle);
     bool isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const;
 
-    void onBufferFreed(const wp<GraphicBuffer>&/* graphicBuffer*/) override { /* TODO */ }
     void onFrameReplaced(const BufferItem& item) override;
     void onFrameAvailable(const BufferItem& item) override;
     void onFrameDequeued(const uint64_t) override;
@@ -99,10 +96,11 @@
                                std::optional<uint32_t> currentMaxAcquiredBufferCount);
     void releaseBufferCallbackLocked(const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
                                      std::optional<uint32_t> currentMaxAcquiredBufferCount,
-                                     bool fakeRelease);
-    void syncNextTransaction(std::function<void(SurfaceComposerClient::Transaction*)> callback,
+                                     bool fakeRelease) REQUIRES(mMutex);
+    bool syncNextTransaction(std::function<void(SurfaceComposerClient::Transaction*)> callback,
                              bool acquireSingleBuffer = true);
     void stopContinuousSyncTransaction();
+    void clearSyncTransaction();
 
     void mergeWithNextTransaction(SurfaceComposerClient::Transaction* t, uint64_t frameNumber);
     void applyPendingTransactions(uint64_t frameNumber);
@@ -117,15 +115,12 @@
 
     uint32_t getLastTransformHint() const;
     uint64_t getLastAcquiredFrameNum();
-    void abandon();
 
     /**
-     * Set a callback to be invoked when we are hung. The boolean parameter
-     * indicates whether the hang is due to an unfired fence.
-     * TODO: The boolean is always true atm, unfired fence is
-     * the only case we detect.
+     * Set a callback to be invoked when we are hung. The string parameter
+     * indicates the reason for the hang.
      */
-    void setTransactionHangCallback(std::function<void(bool)> callback);
+    void setTransactionHangCallback(std::function<void(const std::string&)> callback);
 
     virtual ~BLASTBufferQueue();
 
@@ -161,7 +156,7 @@
     // mNumAcquired (buffers that queued to SF)  mPendingRelease.size() (buffers that are held by
     // blast). This counter is read by android studio profiler.
     std::string mQueuedBufferTrace;
-    sp<SurfaceControl> mSurfaceControl;
+    sp<SurfaceControl> mSurfaceControl GUARDED_BY(mMutex);
 
     mutable std::mutex mMutex;
     std::condition_variable mCallbackCV;
@@ -173,6 +168,11 @@
     int32_t mNumFrameAvailable GUARDED_BY(mMutex) = 0;
     int32_t mNumAcquired GUARDED_BY(mMutex) = 0;
 
+    // A value used to identify if a producer has been changed for the same SurfaceControl.
+    // This is needed to know when the frame number has been reset to make sure we don't
+    // latch stale buffers and that we don't wait on barriers from an old producer.
+    uint32_t mProducerId = 0;
+
     // Keep a reference to the submitted buffers so we can release when surfaceflinger drops the
     // buffer or the buffer has been presented and a new buffer is ready to be presented.
     std::unordered_map<ReleaseCallbackId, BufferItem, ReleaseBufferCallbackIdHash> mSubmitted
@@ -249,7 +249,7 @@
 
     // Queues up transactions using this token in SurfaceFlinger. This prevents queued up
     // transactions from other parts of the client from blocking this transaction.
-    const sp<IBinder> mApplyToken GUARDED_BY(mMutex) = new BBinder();
+    const sp<IBinder> mApplyToken GUARDED_BY(mMutex) = sp<BBinder>::make();
 
     // Guards access to mDequeueTimestamps since we cannot hold to mMutex in onFrameDequeued or
     // we will deadlock.
@@ -263,7 +263,7 @@
     // callback for them.
     std::queue<sp<SurfaceControl>> mSurfaceControlsWithPendingCallback GUARDED_BY(mMutex);
 
-    uint32_t mCurrentMaxAcquiredBufferCount;
+    uint32_t mCurrentMaxAcquiredBufferCount GUARDED_BY(mMutex);
 
     // Flag to determine if syncTransaction should only acquire a single buffer and then clear or
     // continue to acquire buffers until explicitly cleared
@@ -287,10 +287,10 @@
     // need to set this flag, notably only in the case where we are transitioning from a previous
     // transaction applied by us (one way, may not yet have reached server) and an upcoming
     // transaction that will be applied by some sync consumer.
-    bool mAppliedLastTransaction = false;
-    uint64_t mLastAppliedFrameNumber = 0;
+    bool mAppliedLastTransaction GUARDED_BY(mMutex) = false;
+    uint64_t mLastAppliedFrameNumber GUARDED_BY(mMutex) = 0;
 
-    std::function<void(bool)> mTransactionHangCallback;
+    std::function<void(const std::string&)> mTransactionHangCallback;
 
     std::unordered_set<uint64_t> mSyncedFrameNumbers GUARDED_BY(mMutex);
 };
diff --git a/libs/gui/include/gui/Choreographer.h b/libs/gui/include/gui/Choreographer.h
new file mode 100644
index 0000000..1df9b11
--- /dev/null
+++ b/libs/gui/include/gui/Choreographer.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/choreographer.h>
+#include <gui/DisplayEventDispatcher.h>
+#include <jni.h>
+#include <utils/Looper.h>
+
+#include <mutex>
+#include <queue>
+#include <thread>
+
+namespace android {
+using gui::VsyncEventData;
+
+struct FrameCallback {
+    AChoreographer_frameCallback callback;
+    AChoreographer_frameCallback64 callback64;
+    AChoreographer_vsyncCallback vsyncCallback;
+    void* data;
+    nsecs_t dueTime;
+
+    inline bool operator<(const FrameCallback& rhs) const {
+        // Note that this is intentionally flipped because we want callbacks due sooner to be at
+        // the head of the queue
+        return dueTime > rhs.dueTime;
+    }
+};
+
+struct RefreshRateCallback {
+    AChoreographer_refreshRateCallback callback;
+    void* data;
+    bool firstCallbackFired = false;
+};
+
+class Choreographer;
+
+/**
+ * Implementation of AChoreographerFrameCallbackData.
+ */
+struct ChoreographerFrameCallbackDataImpl {
+    int64_t frameTimeNanos{0};
+
+    VsyncEventData vsyncEventData;
+
+    const Choreographer* choreographer;
+};
+
+class Choreographer : public DisplayEventDispatcher, public MessageHandler {
+public:
+    struct Context {
+        std::mutex lock;
+        std::vector<Choreographer*> ptrs GUARDED_BY(lock);
+        std::map<AVsyncId, int64_t> startTimes GUARDED_BY(lock);
+        bool registeredToDisplayManager GUARDED_BY(lock) = false;
+
+        std::atomic<nsecs_t> mLastKnownVsync = -1;
+    };
+    static Context gChoreographers;
+
+    explicit Choreographer(const sp<Looper>& looper, const sp<IBinder>& layerHandle = nullptr)
+            EXCLUDES(gChoreographers.lock);
+    void postFrameCallbackDelayed(AChoreographer_frameCallback cb,
+                                  AChoreographer_frameCallback64 cb64,
+                                  AChoreographer_vsyncCallback vsyncCallback, void* data,
+                                  nsecs_t delay);
+    void registerRefreshRateCallback(AChoreographer_refreshRateCallback cb, void* data)
+            EXCLUDES(gChoreographers.lock);
+    void unregisterRefreshRateCallback(AChoreographer_refreshRateCallback cb, void* data);
+    // Drains the queue of pending vsync periods and dispatches refresh rate
+    // updates to callbacks.
+    // The assumption is that this method is only called on a single
+    // processing thread, either by looper or by AChoreographer_handleEvents
+    void handleRefreshRateUpdates();
+    void scheduleLatestConfigRequest();
+
+    enum {
+        MSG_SCHEDULE_CALLBACKS = 0,
+        MSG_SCHEDULE_VSYNC = 1,
+        MSG_HANDLE_REFRESH_RATE_UPDATES = 2,
+    };
+    virtual void handleMessage(const Message& message) override;
+
+    static void initJVM(JNIEnv* env);
+    static Choreographer* getForThread();
+    static void signalRefreshRateCallbacks(nsecs_t vsyncPeriod) EXCLUDES(gChoreographers.lock);
+    static int64_t getStartTimeNanosForVsyncId(AVsyncId vsyncId) EXCLUDES(gChoreographers.lock);
+    virtual ~Choreographer() override EXCLUDES(gChoreographers.lock);
+    int64_t getFrameInterval() const;
+    bool inCallback() const;
+
+private:
+    Choreographer(const Choreographer&) = delete;
+
+    void dispatchVsync(nsecs_t timestamp, PhysicalDisplayId displayId, uint32_t count,
+                       VsyncEventData vsyncEventData) override;
+    void dispatchHotplug(nsecs_t timestamp, PhysicalDisplayId displayId, bool connected) override;
+    void dispatchModeChanged(nsecs_t timestamp, PhysicalDisplayId displayId, int32_t modeId,
+                             nsecs_t vsyncPeriod) override;
+    void dispatchNullEvent(nsecs_t, PhysicalDisplayId) override;
+    void dispatchFrameRateOverrides(nsecs_t timestamp, PhysicalDisplayId displayId,
+                                    std::vector<FrameRateOverride> overrides) override;
+
+    void scheduleCallbacks();
+
+    ChoreographerFrameCallbackDataImpl createFrameCallbackData(nsecs_t timestamp) const;
+    void registerStartTime() const;
+
+    std::mutex mLock;
+    // Protected by mLock
+    std::priority_queue<FrameCallback> mFrameCallbacks;
+    std::vector<RefreshRateCallback> mRefreshRateCallbacks;
+
+    nsecs_t mLatestVsyncPeriod = -1;
+    VsyncEventData mLastVsyncEventData;
+    bool mInCallback = false;
+
+    const sp<Looper> mLooper;
+    const std::thread::id mThreadId;
+
+    // Approximation of num_threads_using_choreographer * num_frames_of_history with leeway.
+    static constexpr size_t kMaxStartTimes = 250;
+};
+
+} // namespace android
\ No newline at end of file
diff --git a/libs/gui/include/gui/CompositorTiming.h b/libs/gui/include/gui/CompositorTiming.h
new file mode 100644
index 0000000..cb8ca7a
--- /dev/null
+++ b/libs/gui/include/gui/CompositorTiming.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/Timers.h>
+
+namespace android::gui {
+
+// Expected timing of the next composited frame, based on the timing of the latest frames.
+struct CompositorTiming {
+    static constexpr nsecs_t kDefaultVsyncPeriod = 16'666'667;
+
+    CompositorTiming() = default;
+    CompositorTiming(nsecs_t vsyncDeadline, nsecs_t vsyncPeriod, nsecs_t vsyncPhase,
+                     nsecs_t presentLatency);
+
+    // Time point when compositing is expected to start.
+    nsecs_t deadline = 0;
+
+    // Duration between consecutive frames. In other words, the VSYNC period.
+    nsecs_t interval = kDefaultVsyncPeriod;
+
+    // Duration between composite start and present. For missed frames, the extra latency is rounded
+    // to a multiple of the VSYNC period, such that the remainder (presentLatency % interval) always
+    // evaluates to the VSYNC phase offset.
+    nsecs_t presentLatency = kDefaultVsyncPeriod;
+};
+
+} // namespace android::gui
diff --git a/libs/gui/include/gui/DisplayCaptureArgs.h b/libs/gui/include/gui/DisplayCaptureArgs.h
index ec884cf..2676e0a 100644
--- a/libs/gui/include/gui/DisplayCaptureArgs.h
+++ b/libs/gui/include/gui/DisplayCaptureArgs.h
@@ -22,8 +22,11 @@
 #include <binder/IBinder.h>
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
+#include <gui/SpHash.h>
 #include <ui/GraphicTypes.h>
 #include <ui/PixelFormat.h>
+#include <ui/Rect.h>
+#include <unordered_set>
 
 namespace android::gui {
 
@@ -38,7 +41,7 @@
     bool captureSecureLayers{false};
     int32_t uid{UNSET_UID};
     // Force capture to be in a color space. If the value is ui::Dataspace::UNKNOWN, the captured
-    // result will be in the display's colorspace.
+    // result will be in a colorspace appropriate for capturing the display contents
     // The display may use non-RGB dataspace (ex. displayP3) that could cause pixel data could be
     // different from SRGB (byte per color), and failed when checking colors in tests.
     // NOTE: In normal cases, we want the screen to be captured in display's colorspace.
@@ -54,6 +57,17 @@
 
     bool grayscale = false;
 
+    std::unordered_set<sp<IBinder>, SpHash<IBinder>> excludeHandles;
+
+    // Hint that the caller will use the screenshot animation as part of a transition animation.
+    // The canonical example would be screen rotation - in such a case any color shift in the
+    // screenshot is a detractor so composition in the display's colorspace is required.
+    // Otherwise, the system may choose a colorspace that is more appropriate for use-cases
+    // such as file encoding or for blending HDR content into an ap's UI, where the display's
+    // exact colorspace is not an appropriate intermediate result.
+    // Note that if the caller is requesting a specific dataspace, this hint does nothing.
+    bool hintForSeamlessTransition = false;
+
     virtual status_t writeToParcel(Parcel* output) const;
     virtual status_t readFromParcel(const Parcel* input);
 };
diff --git a/libs/gui/include/gui/DisplayEventDispatcher.h b/libs/gui/include/gui/DisplayEventDispatcher.h
index a342539..140efa6 100644
--- a/libs/gui/include/gui/DisplayEventDispatcher.h
+++ b/libs/gui/include/gui/DisplayEventDispatcher.h
@@ -23,10 +23,11 @@
 
 class DisplayEventDispatcher : public LooperCallback {
 public:
-    explicit DisplayEventDispatcher(
-            const sp<Looper>& looper,
-            ISurfaceComposer::VsyncSource vsyncSource = ISurfaceComposer::eVsyncSourceApp,
-            ISurfaceComposer::EventRegistrationFlags eventRegistration = {});
+    explicit DisplayEventDispatcher(const sp<Looper>& looper,
+                                    gui::ISurfaceComposer::VsyncSource vsyncSource =
+                                            gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
+                                    EventRegistrationFlags eventRegistration = {},
+                                    const sp<IBinder>& layerHandle = nullptr);
 
     status_t initialize();
     void dispose();
diff --git a/libs/gui/include/gui/DisplayEventReceiver.h b/libs/gui/include/gui/DisplayEventReceiver.h
index cf7a4e5..7fd6c35 100644
--- a/libs/gui/include/gui/DisplayEventReceiver.h
+++ b/libs/gui/include/gui/DisplayEventReceiver.h
@@ -20,20 +20,26 @@
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <ftl/flags.h>
+
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 #include <utils/Timers.h>
 
+#include <android/gui/ISurfaceComposer.h>
 #include <binder/IInterface.h>
-#include <gui/ISurfaceComposer.h>
 #include <gui/VsyncEventData.h>
 
+#include <ui/DisplayId.h>
+
 // ----------------------------------------------------------------------------
 
 namespace android {
 
 // ----------------------------------------------------------------------------
 
+using EventRegistrationFlags = ftl::Flags<gui::ISurfaceComposer::EventRegistration>;
+
 using gui::IDisplayEventConnection;
 using gui::ParcelableVsyncEventData;
 using gui::VsyncEventData;
@@ -111,9 +117,10 @@
      * To receive ModeChanged and/or FrameRateOverrides events specify this in
      * the constructor. Other events start being delivered immediately.
      */
-    explicit DisplayEventReceiver(
-            ISurfaceComposer::VsyncSource vsyncSource = ISurfaceComposer::eVsyncSourceApp,
-            ISurfaceComposer::EventRegistrationFlags eventRegistration = {});
+    explicit DisplayEventReceiver(gui::ISurfaceComposer::VsyncSource vsyncSource =
+                                          gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
+                                  EventRegistrationFlags eventRegistration = {},
+                                  const sp<IBinder>& layerHandle = nullptr);
 
     /*
      * ~DisplayEventReceiver severs the connection with SurfaceFlinger, new events
diff --git a/libs/gui/include/gui/DisplayInfo.h b/libs/gui/include/gui/DisplayInfo.h
index 74f33a2..42b62c7 100644
--- a/libs/gui/include/gui/DisplayInfo.h
+++ b/libs/gui/include/gui/DisplayInfo.h
@@ -41,6 +41,8 @@
     status_t writeToParcel(android::Parcel*) const override;
 
     status_t readFromParcel(const android::Parcel*) override;
+
+    void dump(std::string& result, const char* prefix = "") const;
 };
 
 } // namespace android::gui
\ No newline at end of file
diff --git a/libs/gui/include/gui/FenceMonitor.h b/libs/gui/include/gui/FenceMonitor.h
new file mode 100644
index 0000000..62cedde
--- /dev/null
+++ b/libs/gui/include/gui/FenceMonitor.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+#include <deque>
+#include <mutex>
+
+#include <ui/Fence.h>
+
+namespace android::gui {
+
+class FenceMonitor {
+public:
+    explicit FenceMonitor(const char* name);
+    void queueFence(const sp<Fence>& fence);
+
+private:
+    void loop();
+    void threadLoop();
+
+    const char* mName;
+    uint32_t mFencesQueued;
+    uint32_t mFencesSignaled;
+    std::deque<sp<Fence>> mQueue;
+    std::condition_variable mCondition;
+    std::mutex mMutex;
+};
+
+} // namespace android::gui
\ No newline at end of file
diff --git a/libs/gui/include/gui/FrameTimestamps.h b/libs/gui/include/gui/FrameTimestamps.h
index 968aa2b..3d1be4d 100644
--- a/libs/gui/include/gui/FrameTimestamps.h
+++ b/libs/gui/include/gui/FrameTimestamps.h
@@ -17,6 +17,9 @@
 #ifndef ANDROID_GUI_FRAMETIMESTAMPS_H
 #define ANDROID_GUI_FRAMETIMESTAMPS_H
 
+#include <android/gui/FrameEvent.h>
+
+#include <gui/CompositorTiming.h>
 #include <ui/FenceTime.h>
 #include <utils/Flattenable.h>
 #include <utils/StrongPointer.h>
@@ -31,22 +34,8 @@
 struct FrameEvents;
 class FrameEventHistoryDelta;
 
-
-// Identifiers for all the events that may be recorded or reported.
-enum class FrameEvent {
-    POSTED,
-    REQUESTED_PRESENT,
-    LATCH,
-    ACQUIRE,
-    FIRST_REFRESH_START,
-    LAST_REFRESH_START,
-    GPU_COMPOSITION_DONE,
-    DISPLAY_PRESENT,
-    DEQUEUE_READY,
-    RELEASE,
-    EVENT_COUNT, // Not an actual event.
-};
-
+using gui::CompositorTiming;
+using gui::FrameEvent;
 
 // A collection of timestamps corresponding to a single frame.
 struct FrameEvents {
@@ -96,12 +85,6 @@
     std::shared_ptr<FenceTime> releaseFence{FenceTime::NO_FENCE};
 };
 
-struct CompositorTiming {
-    nsecs_t deadline{0};
-    nsecs_t interval{16666667};
-    nsecs_t presentLatency{16666667};
-};
-
 // A short history of frames that are synchronized between the consumer and
 // producer via deltas.
 class FrameEventHistory {
diff --git a/libs/gui/include/gui/GLConsumer.h b/libs/gui/include/gui/GLConsumer.h
index 2f538ff..ba268ab 100644
--- a/libs/gui/include/gui/GLConsumer.h
+++ b/libs/gui/include/gui/GLConsumer.h
@@ -138,6 +138,10 @@
             const sp<GraphicBuffer>& buf, const Rect& cropRect,
             uint32_t transform, bool filtering);
 
+    static void computeTransformMatrix(float outTransform[16], float bufferWidth,
+                                       float bufferHeight, PixelFormat pixelFormat,
+                                       const Rect& cropRect, uint32_t transform, bool filtering);
+
     // Scale the crop down horizontally or vertically such that it has the
     // same aspect ratio as the buffer does.
     static Rect scaleDownCrop(const Rect& crop, uint32_t bufferWidth,
diff --git a/libs/gui/include/gui/ISurfaceComposer.h b/libs/gui/include/gui/ISurfaceComposer.h
index a610e94..7c150d5 100644
--- a/libs/gui/include/gui/ISurfaceComposer.h
+++ b/libs/gui/include/gui/ISurfaceComposer.h
@@ -16,19 +16,18 @@
 
 #pragma once
 
+#include <android/gui/CachingHint.h>
 #include <android/gui/DisplayBrightness.h>
+#include <android/gui/FrameTimelineInfo.h>
 #include <android/gui/IDisplayEventConnection.h>
 #include <android/gui/IFpsListener.h>
 #include <android/gui/IHdrLayerInfoListener.h>
 #include <android/gui/IRegionSamplingListener.h>
 #include <android/gui/IScreenCaptureListener.h>
-#include <android/gui/ITransactionTraceListener.h>
 #include <android/gui/ITunnelModeEnabledListener.h>
 #include <android/gui/IWindowInfosListener.h>
 #include <binder/IBinder.h>
 #include <binder/IInterface.h>
-#include <ftl/flags.h>
-#include <gui/FrameTimelineInfo.h>
 #include <gui/ITransactionCompletedListener.h>
 #include <gui/SpHash.h>
 #include <math/vec4.h>
@@ -57,17 +56,14 @@
 namespace android {
 
 struct client_cache_t;
-struct ComposerState;
+class ComposerState;
 struct DisplayStatInfo;
 struct DisplayState;
 struct InputWindowCommands;
-class LayerDebugInfo;
 class HdrCapabilities;
-class IGraphicBufferProducer;
-class ISurfaceComposerClient;
 class Rect;
-enum class FrameEvent;
 
+using gui::FrameTimelineInfo;
 using gui::IDisplayEventConnection;
 using gui::IRegionSamplingListener;
 using gui::IScreenCaptureListener;
@@ -77,6 +73,7 @@
 
 struct DisplayCaptureArgs;
 struct LayerCaptureArgs;
+class LayerDebugInfo;
 
 } // namespace gui
 
@@ -85,7 +82,6 @@
 struct DisplayMode;
 struct DisplayState;
 struct DynamicDisplayInfo;
-struct StaticDisplayInfo;
 
 } // namespace ui
 
@@ -97,11 +93,8 @@
 public:
     DECLARE_META_INTERFACE(SurfaceComposer)
 
-    static constexpr size_t MAX_LAYERS = 4096;
-
     // flags for setTransactionState()
     enum {
-        eSynchronous = 0x01,
         eAnimation = 0x02,
 
         // Explicit indication that this transaction and others to follow will likely result in a
@@ -110,328 +103,20 @@
         // (sf vsync offset - debug.sf.early_phase_offset_ns). SurfaceFlinger will continue to be
         // in the early configuration until it receives eEarlyWakeupEnd. These flags are
         // expected to be used by WindowManager only and are guarded by
-        // android.permission.ACCESS_SURFACE_FLINGER
+        // android.permission.WAKEUP_SURFACE_FLINGER
         eEarlyWakeupStart = 0x08,
         eEarlyWakeupEnd = 0x10,
         eOneWay = 0x20
     };
 
-    enum VsyncSource {
-        eVsyncSourceApp = 0,
-        eVsyncSourceSurfaceFlinger = 1
-    };
-
-    enum class EventRegistration {
-        modeChanged = 1 << 0,
-        frameRateOverride = 1 << 1,
-    };
-
-    using EventRegistrationFlags = ftl::Flags<EventRegistration>;
-
-    /*
-     * Create a connection with SurfaceFlinger.
-     */
-    virtual sp<ISurfaceComposerClient> createConnection() = 0;
-
-    /* return an IDisplayEventConnection */
-    virtual sp<IDisplayEventConnection> createDisplayEventConnection(
-            VsyncSource vsyncSource = eVsyncSourceApp,
-            EventRegistrationFlags eventRegistration = {}) = 0;
-
     /* open/close transactions. requires ACCESS_SURFACE_FLINGER permission */
     virtual status_t setTransactionState(
-            const FrameTimelineInfo& frameTimelineInfo, const Vector<ComposerState>& state,
+            const FrameTimelineInfo& frameTimelineInfo, Vector<ComposerState>& state,
             const Vector<DisplayState>& displays, uint32_t flags, const sp<IBinder>& applyToken,
-            const InputWindowCommands& inputWindowCommands, int64_t desiredPresentTime,
-            bool isAutoTimestamp, const client_cache_t& uncacheBuffer, bool hasListenerCallbacks,
-            const std::vector<ListenerCallbacks>& listenerCallbacks, uint64_t transactionId) = 0;
-
-    /* signal that we're done booting.
-     * Requires ACCESS_SURFACE_FLINGER permission
-     */
-    virtual void bootFinished() = 0;
-
-    /* verify that an IGraphicBufferProducer was created by SurfaceFlinger.
-     */
-    virtual bool authenticateSurfaceTexture(
-            const sp<IGraphicBufferProducer>& surface) const = 0;
-
-    /* Returns the frame timestamps supported by SurfaceFlinger.
-     */
-    virtual status_t getSupportedFrameTimestamps(
-            std::vector<FrameEvent>* outSupported) const = 0;
-
-    /**
-     * Gets immutable information about given physical display.
-     */
-    virtual status_t getStaticDisplayInfo(const sp<IBinder>& display, ui::StaticDisplayInfo*) = 0;
-
-    /**
-     * Gets dynamic information about given physical display.
-     */
-    virtual status_t getDynamicDisplayInfo(const sp<IBinder>& display, ui::DynamicDisplayInfo*) = 0;
-
-    virtual status_t getDisplayNativePrimaries(const sp<IBinder>& display,
-            ui::DisplayPrimaries& primaries) = 0;
-    virtual status_t setActiveColorMode(const sp<IBinder>& display,
-            ui::ColorMode colorMode) = 0;
-
-    /**
-     * Sets the user-preferred display mode that a device should boot in.
-     */
-    virtual status_t setBootDisplayMode(const sp<IBinder>& display, ui::DisplayModeId) = 0;
-
-    /* Clears the frame statistics for animations.
-     *
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t clearAnimationFrameStats() = 0;
-
-    /* Gets the frame statistics for animations.
-     *
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t getAnimationFrameStats(FrameStats* outStats) const = 0;
-
-    /* Overrides the supported HDR modes for the given display device.
-     *
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t overrideHdrTypes(const sp<IBinder>& display,
-                                      const std::vector<ui::Hdr>& hdrTypes) = 0;
-
-    /* Pulls surfaceflinger atoms global stats and layer stats to pipe to statsd.
-     *
-     * Requires the calling uid be from system server.
-     */
-    virtual status_t onPullAtom(const int32_t atomId, std::string* outData, bool* success) = 0;
-
-    virtual status_t enableVSyncInjections(bool enable) = 0;
-
-    virtual status_t injectVSync(nsecs_t when) = 0;
-
-    /* Gets the list of active layers in Z order for debugging purposes
-     *
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t getLayerDebugInfo(std::vector<LayerDebugInfo>* outLayers) = 0;
-
-    virtual status_t getColorManagement(bool* outGetColorManagement) const = 0;
-
-    /* Gets the composition preference of the default data space and default pixel format,
-     * as well as the wide color gamut data space and wide color gamut pixel format.
-     * If the wide color gamut data space is V0_SRGB, then it implies that the platform
-     * has no wide color gamut support.
-     *
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t getCompositionPreference(ui::Dataspace* defaultDataspace,
-                                              ui::PixelFormat* defaultPixelFormat,
-                                              ui::Dataspace* wideColorGamutDataspace,
-                                              ui::PixelFormat* wideColorGamutPixelFormat) const = 0;
-    /*
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t getDisplayedContentSamplingAttributes(const sp<IBinder>& display,
-                                                           ui::PixelFormat* outFormat,
-                                                           ui::Dataspace* outDataspace,
-                                                           uint8_t* outComponentMask) const = 0;
-
-    /* Turns on the color sampling engine on the display.
-     *
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t setDisplayContentSamplingEnabled(const sp<IBinder>& display, bool enable,
-                                                      uint8_t componentMask,
-                                                      uint64_t maxFrames) = 0;
-
-    /* Returns statistics on the color profile of the last frame displayed for a given display
-     *
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t getDisplayedContentSample(const sp<IBinder>& display, uint64_t maxFrames,
-                                               uint64_t timestamp,
-                                               DisplayedFrameStats* outStats) const = 0;
-
-    /*
-     * Gets whether SurfaceFlinger can support protected content in GPU composition.
-     * Requires the ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t getProtectedContentSupport(bool* outSupported) const = 0;
-
-    /* Registers a listener to stream median luma updates from SurfaceFlinger.
-     *
-     * The sampling area is bounded by both samplingArea and the given stopLayerHandle
-     * (i.e., only layers behind the stop layer will be captured and sampled).
-     *
-     * Multiple listeners may be provided so long as they have independent listeners.
-     * If multiple listeners are provided, the effective sampling region for each listener will
-     * be bounded by whichever stop layer has a lower Z value.
-     *
-     * Requires the same permissions as captureLayers and captureScreen.
-     */
-    virtual status_t addRegionSamplingListener(const Rect& samplingArea,
-                                               const sp<IBinder>& stopLayerHandle,
-                                               const sp<IRegionSamplingListener>& listener) = 0;
-
-    /*
-     * Removes a listener that was streaming median luma updates from SurfaceFlinger.
-     */
-    virtual status_t removeRegionSamplingListener(const sp<IRegionSamplingListener>& listener) = 0;
-
-    /* Registers a listener that streams fps updates from SurfaceFlinger.
-     *
-     * The listener will stream fps updates for the layer tree rooted at the layer denoted by the
-     * task ID, i.e., the layer must have the task ID as part of its layer metadata with key
-     * METADATA_TASK_ID. If there is no such layer, then no fps is expected to be reported.
-     *
-     * Multiple listeners may be supported.
-     *
-     * Requires the READ_FRAME_BUFFER permission.
-     */
-    virtual status_t addFpsListener(int32_t taskId, const sp<gui::IFpsListener>& listener) = 0;
-    /*
-     * Removes a listener that was streaming fps updates from SurfaceFlinger.
-     */
-    virtual status_t removeFpsListener(const sp<gui::IFpsListener>& listener) = 0;
-
-    /* Registers a listener to receive tunnel mode enabled updates from SurfaceFlinger.
-     *
-     * Requires ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t addTunnelModeEnabledListener(
-            const sp<gui::ITunnelModeEnabledListener>& listener) = 0;
-
-    /*
-     * Removes a listener that was receiving tunnel mode enabled updates from SurfaceFlinger.
-     *
-     * Requires ACCESS_SURFACE_FLINGER permission.
-     */
-    virtual status_t removeTunnelModeEnabledListener(
-            const sp<gui::ITunnelModeEnabledListener>& listener) = 0;
-
-    /* Sets the refresh rate boundaries for the display.
-     *
-     * The primary refresh rate range represents display manager's general guidance on the display
-     * modes we'll consider when switching refresh rates. Unless we get an explicit signal from an
-     * app, we should stay within this range.
-     *
-     * The app request refresh rate range allows us to consider more display modes when switching
-     * refresh rates. Although we should generally stay within the primary range, specific
-     * considerations, such as layer frame rate settings specified via the setFrameRate() api, may
-     * cause us to go outside the primary range. We never go outside the app request range. The app
-     * request range will be greater than or equal to the primary refresh rate range, never smaller.
-     *
-     * defaultMode is used to narrow the list of display modes SurfaceFlinger will consider
-     * switching between. Only modes with a mode group and resolution matching defaultMode
-     * will be considered for switching. The defaultMode corresponds to an ID of mode in the list
-     * of supported modes returned from getDynamicDisplayInfo().
-     */
-    virtual status_t setDesiredDisplayModeSpecs(
-            const sp<IBinder>& displayToken, ui::DisplayModeId defaultMode,
-            bool allowGroupSwitching, float primaryRefreshRateMin, float primaryRefreshRateMax,
-            float appRequestRefreshRateMin, float appRequestRefreshRateMax) = 0;
-
-    virtual status_t getDesiredDisplayModeSpecs(const sp<IBinder>& displayToken,
-                                                ui::DisplayModeId* outDefaultMode,
-                                                bool* outAllowGroupSwitching,
-                                                float* outPrimaryRefreshRateMin,
-                                                float* outPrimaryRefreshRateMax,
-                                                float* outAppRequestRefreshRateMin,
-                                                float* outAppRequestRefreshRateMax) = 0;
-
-    /*
-     * Sets the global configuration for all the shadows drawn by SurfaceFlinger. Shadow follows
-     * material design guidelines.
-     *
-     * ambientColor
-     *      Color to the ambient shadow. The alpha is premultiplied.
-     *
-     * spotColor
-     *      Color to the spot shadow. The alpha is premultiplied. The position of the spot shadow
-     *      depends on the light position.
-     *
-     * lightPosY/lightPosZ
-     *      Position of the light used to cast the spot shadow. The X value is always the display
-     *      width / 2.
-     *
-     * lightRadius
-     *      Radius of the light casting the shadow.
-     */
-    virtual status_t setGlobalShadowSettings(const half4& ambientColor, const half4& spotColor,
-                                             float lightPosY, float lightPosZ,
-                                             float lightRadius) = 0;
-
-    /*
-     * Gets whether a display supports DISPLAY_DECORATION layers.
-     *
-     * displayToken
-     *      The token of the display.
-     * outSupport
-     *      An output parameter for whether/how the display supports
-     *      DISPLAY_DECORATION layers.
-     *
-     * Returns NO_ERROR upon success. Otherwise,
-     *      NAME_NOT_FOUND if the display is invalid, or
-     *      BAD_VALUE      if the output parameter is invalid.
-     */
-    virtual status_t getDisplayDecorationSupport(
-            const sp<IBinder>& displayToken,
-            std::optional<aidl::android::hardware::graphics::common::DisplayDecorationSupport>*
-                    outSupport) const = 0;
-
-    /*
-     * Sets the intended frame rate for a surface. See ANativeWindow_setFrameRate() for more info.
-     */
-    virtual status_t setFrameRate(const sp<IGraphicBufferProducer>& surface, float frameRate,
-                                  int8_t compatibility, int8_t changeFrameRateStrategy) = 0;
-
-    /*
-     * Set the override frame rate for a specified uid by GameManagerService.
-     * Passing the frame rate and uid to SurfaceFlinger to update the override mapping
-     * in the scheduler.
-     */
-    virtual status_t setOverrideFrameRate(uid_t uid, float frameRate) = 0;
-
-    /*
-     * Sets the frame timeline vsync info received from choreographer that corresponds to next
-     * buffer submitted on that surface.
-     */
-    virtual status_t setFrameTimelineInfo(const sp<IGraphicBufferProducer>& surface,
-                                          const FrameTimelineInfo& frameTimelineInfo) = 0;
-
-    /*
-     * Adds a TransactionTraceListener to listen for transaction tracing state updates.
-     */
-    virtual status_t addTransactionTraceListener(
-            const sp<gui::ITransactionTraceListener>& listener) = 0;
-
-    /**
-     * Gets priority of the RenderEngine in SurfaceFlinger.
-     */
-    virtual int getGPUContextPriority() = 0;
-
-    /**
-     * Gets the number of buffers SurfaceFlinger would need acquire. This number
-     * would be propagated to the client via MIN_UNDEQUEUED_BUFFERS so that the
-     * client could allocate enough buffers to match SF expectations of the
-     * pipeline depth. SurfaceFlinger will make sure that it will give the app at
-     * least the time configured as the 'appDuration' before trying to latch
-     * the buffer.
-     *
-     * The total buffers needed for a given configuration is basically the
-     * numbers of vsyncs a single buffer is used across the stack. For the default
-     * configuration a buffer is held ~1 vsync by the app, ~1 vsync by SurfaceFlinger
-     * and 1 vsync by the display. The extra buffers are calculated as the
-     * number of additional buffers on top of the 2 buffers already present
-     * in MIN_UNDEQUEUED_BUFFERS.
-     */
-    virtual status_t getMaxAcquiredBufferCount(int* buffers) const = 0;
-
-    virtual status_t addWindowInfosListener(
-            const sp<gui::IWindowInfosListener>& windowInfosListener) const = 0;
-    virtual status_t removeWindowInfosListener(
-            const sp<gui::IWindowInfosListener>& windowInfosListener) const = 0;
+            InputWindowCommands inputWindowCommands, int64_t desiredPresentTime,
+            bool isAutoTimestamp, const std::vector<client_cache_t>& uncacheBuffer,
+            bool hasListenerCallbacks, const std::vector<ListenerCallbacks>& listenerCallbacks,
+            uint64_t transactionId, const std::vector<uint64_t>& mergedTransactionIds) = 0;
 };
 
 // ----------------------------------------------------------------------------
@@ -442,77 +127,77 @@
         // Note: BOOT_FINISHED must remain this value, it is called from
         // Java by ActivityManagerService.
         BOOT_FINISHED = IBinder::FIRST_CALL_TRANSACTION,
-        CREATE_CONNECTION,
-        GET_STATIC_DISPLAY_INFO,
-        CREATE_DISPLAY_EVENT_CONNECTION,
-        CREATE_DISPLAY,             // Deprecated. Autogenerated by .aidl now.
-        DESTROY_DISPLAY,            // Deprecated. Autogenerated by .aidl now.
-        GET_PHYSICAL_DISPLAY_TOKEN, // Deprecated. Autogenerated by .aidl now.
+        CREATE_CONNECTION,               // Deprecated. Autogenerated by .aidl now.
+        GET_STATIC_DISPLAY_INFO,         // Deprecated. Autogenerated by .aidl now.
+        CREATE_DISPLAY_EVENT_CONNECTION, // Deprecated. Autogenerated by .aidl now.
+        CREATE_DISPLAY,                  // Deprecated. Autogenerated by .aidl now.
+        DESTROY_DISPLAY,                 // Deprecated. Autogenerated by .aidl now.
+        GET_PHYSICAL_DISPLAY_TOKEN,      // Deprecated. Autogenerated by .aidl now.
         SET_TRANSACTION_STATE,
-        AUTHENTICATE_SURFACE,
-        GET_SUPPORTED_FRAME_TIMESTAMPS,
-        GET_DISPLAY_MODES,       // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
-        GET_ACTIVE_DISPLAY_MODE, // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
+        AUTHENTICATE_SURFACE,           // Deprecated. Autogenerated by .aidl now.
+        GET_SUPPORTED_FRAME_TIMESTAMPS, // Deprecated. Autogenerated by .aidl now.
+        GET_DISPLAY_MODES,              // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
+        GET_ACTIVE_DISPLAY_MODE,        // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
         GET_DISPLAY_STATE,
-        CAPTURE_DISPLAY, // Deprecated. Autogenerated by .aidl now.
-        CAPTURE_LAYERS,  // Deprecated. Autogenerated by .aidl now.
-        CLEAR_ANIMATION_FRAME_STATS,
-        GET_ANIMATION_FRAME_STATS,
-        SET_POWER_MODE, // Deprecated. Autogenerated by .aidl now.
+        CAPTURE_DISPLAY,             // Deprecated. Autogenerated by .aidl now.
+        CAPTURE_LAYERS,              // Deprecated. Autogenerated by .aidl now.
+        CLEAR_ANIMATION_FRAME_STATS, // Deprecated. Autogenerated by .aidl now.
+        GET_ANIMATION_FRAME_STATS,   // Deprecated. Autogenerated by .aidl now.
+        SET_POWER_MODE,              // Deprecated. Autogenerated by .aidl now.
         GET_DISPLAY_STATS,
-        GET_HDR_CAPABILITIES,    // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
-        GET_DISPLAY_COLOR_MODES, // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
-        GET_ACTIVE_COLOR_MODE,   // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
-        SET_ACTIVE_COLOR_MODE,
-        ENABLE_VSYNC_INJECTIONS,
-        INJECT_VSYNC,
-        GET_LAYER_DEBUG_INFO,
-        GET_COMPOSITION_PREFERENCE,
-        GET_COLOR_MANAGEMENT,
-        GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES,
-        SET_DISPLAY_CONTENT_SAMPLING_ENABLED,
+        GET_HDR_CAPABILITIES,       // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
+        GET_DISPLAY_COLOR_MODES,    // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
+        GET_ACTIVE_COLOR_MODE,      // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
+        SET_ACTIVE_COLOR_MODE,      // Deprecated. Autogenerated by .aidl now.
+        ENABLE_VSYNC_INJECTIONS,    // Deprecated. Autogenerated by .aidl now.
+        INJECT_VSYNC,               // Deprecated. Autogenerated by .aidl now.
+        GET_LAYER_DEBUG_INFO,       // Deprecated. Autogenerated by .aidl now.
+        GET_COMPOSITION_PREFERENCE, // Deprecated. Autogenerated by .aidl now.
+        GET_COLOR_MANAGEMENT,       // Deprecated. Autogenerated by .aidl now.
+        GET_DISPLAYED_CONTENT_SAMPLING_ATTRIBUTES, // Deprecated. Autogenerated by .aidl now.
+        SET_DISPLAY_CONTENT_SAMPLING_ENABLED,      // Deprecated. Autogenerated by .aidl now.
         GET_DISPLAYED_CONTENT_SAMPLE,
-        GET_PROTECTED_CONTENT_SUPPORT,
-        IS_WIDE_COLOR_DISPLAY, // Deprecated. Autogenerated by .aidl now.
-        GET_DISPLAY_NATIVE_PRIMARIES,
-        GET_PHYSICAL_DISPLAY_IDS, // Deprecated. Autogenerated by .aidl now.
-        ADD_REGION_SAMPLING_LISTENER,
-        REMOVE_REGION_SAMPLING_LISTENER,
-        SET_DESIRED_DISPLAY_MODE_SPECS,
-        GET_DESIRED_DISPLAY_MODE_SPECS,
-        GET_DISPLAY_BRIGHTNESS_SUPPORT, // Deprecated. Autogenerated by .aidl now.
-        SET_DISPLAY_BRIGHTNESS,         // Deprecated. Autogenerated by .aidl now.
-        CAPTURE_DISPLAY_BY_ID,          // Deprecated. Autogenerated by .aidl now.
-        NOTIFY_POWER_BOOST,             // Deprecated. Autogenerated by .aidl now.
+        GET_PROTECTED_CONTENT_SUPPORT,   // Deprecated. Autogenerated by .aidl now.
+        IS_WIDE_COLOR_DISPLAY,           // Deprecated. Autogenerated by .aidl now.
+        GET_DISPLAY_NATIVE_PRIMARIES,    // Deprecated. Autogenerated by .aidl now.
+        GET_PHYSICAL_DISPLAY_IDS,        // Deprecated. Autogenerated by .aidl now.
+        ADD_REGION_SAMPLING_LISTENER,    // Deprecated. Autogenerated by .aidl now.
+        REMOVE_REGION_SAMPLING_LISTENER, // Deprecated. Autogenerated by .aidl now.
+        SET_DESIRED_DISPLAY_MODE_SPECS,  // Deprecated. Autogenerated by .aidl now.
+        GET_DESIRED_DISPLAY_MODE_SPECS,  // Deprecated. Autogenerated by .aidl now.
+        GET_DISPLAY_BRIGHTNESS_SUPPORT,  // Deprecated. Autogenerated by .aidl now.
+        SET_DISPLAY_BRIGHTNESS,          // Deprecated. Autogenerated by .aidl now.
+        CAPTURE_DISPLAY_BY_ID,           // Deprecated. Autogenerated by .aidl now.
+        NOTIFY_POWER_BOOST,              // Deprecated. Autogenerated by .aidl now.
         SET_GLOBAL_SHADOW_SETTINGS,
         GET_AUTO_LOW_LATENCY_MODE_SUPPORT, // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
         SET_AUTO_LOW_LATENCY_MODE,         // Deprecated. Autogenerated by .aidl now.
         GET_GAME_CONTENT_TYPE_SUPPORT,     // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
         SET_GAME_CONTENT_TYPE,             // Deprecated. Use GET_DYNAMIC_DISPLAY_INFO instead.
-        SET_FRAME_RATE,
+        SET_FRAME_RATE,                    // Deprecated. Autogenerated by .aidl now.
         // Deprecated. Use DisplayManager.setShouldAlwaysRespectAppRequestedMode(true);
         ACQUIRE_FRAME_RATE_FLEXIBILITY_TOKEN,
-        SET_FRAME_TIMELINE_INFO,
-        ADD_TRANSACTION_TRACE_LISTENER,
+        SET_FRAME_TIMELINE_INFO,        // Deprecated. Autogenerated by .aidl now.
+        ADD_TRANSACTION_TRACE_LISTENER, // Deprecated. Autogenerated by .aidl now.
         GET_GPU_CONTEXT_PRIORITY,
         GET_MAX_ACQUIRED_BUFFER_COUNT,
-        GET_DYNAMIC_DISPLAY_INFO,
-        ADD_FPS_LISTENER,
-        REMOVE_FPS_LISTENER,
-        OVERRIDE_HDR_TYPES,
-        ADD_HDR_LAYER_INFO_LISTENER,    // Deprecated. Autogenerated by .aidl now.
-        REMOVE_HDR_LAYER_INFO_LISTENER, // Deprecated. Autogenerated by .aidl now.
-        ON_PULL_ATOM,
-        ADD_TUNNEL_MODE_ENABLED_LISTENER,
-        REMOVE_TUNNEL_MODE_ENABLED_LISTENER,
-        ADD_WINDOW_INFOS_LISTENER,
-        REMOVE_WINDOW_INFOS_LISTENER,
-        GET_PRIMARY_PHYSICAL_DISPLAY_ID, // Deprecated. Autogenerated by .aidl now.
+        GET_DYNAMIC_DISPLAY_INFO,            // Deprecated. Autogenerated by .aidl now.
+        ADD_FPS_LISTENER,                    // Deprecated. Autogenerated by .aidl now.
+        REMOVE_FPS_LISTENER,                 // Deprecated. Autogenerated by .aidl now.
+        OVERRIDE_HDR_TYPES,                  // Deprecated. Autogenerated by .aidl now.
+        ADD_HDR_LAYER_INFO_LISTENER,         // Deprecated. Autogenerated by .aidl now.
+        REMOVE_HDR_LAYER_INFO_LISTENER,      // Deprecated. Autogenerated by .aidl now.
+        ON_PULL_ATOM,                        // Deprecated. Autogenerated by .aidl now.
+        ADD_TUNNEL_MODE_ENABLED_LISTENER,    // Deprecated. Autogenerated by .aidl now.
+        REMOVE_TUNNEL_MODE_ENABLED_LISTENER, // Deprecated. Autogenerated by .aidl now.
+        ADD_WINDOW_INFOS_LISTENER,           // Deprecated. Autogenerated by .aidl now.
+        REMOVE_WINDOW_INFOS_LISTENER,        // Deprecated. Autogenerated by .aidl now.
+        GET_PRIMARY_PHYSICAL_DISPLAY_ID,     // Deprecated. Autogenerated by .aidl now.
         GET_DISPLAY_DECORATION_SUPPORT,
         GET_BOOT_DISPLAY_MODE_SUPPORT, // Deprecated. Autogenerated by .aidl now.
-        SET_BOOT_DISPLAY_MODE,
-        CLEAR_BOOT_DISPLAY_MODE, // Deprecated. Autogenerated by .aidl now.
-        SET_OVERRIDE_FRAME_RATE,
+        SET_BOOT_DISPLAY_MODE,         // Deprecated. Autogenerated by .aidl now.
+        CLEAR_BOOT_DISPLAY_MODE,       // Deprecated. Autogenerated by .aidl now.
+        SET_OVERRIDE_FRAME_RATE,       // Deprecated. Autogenerated by .aidl now.
         // Always append new enum to the end.
     };
 
diff --git a/libs/gui/include/gui/ISurfaceComposerClient.h b/libs/gui/include/gui/ISurfaceComposerClient.h
deleted file mode 100644
index 9e9e191..0000000
--- a/libs/gui/include/gui/ISurfaceComposerClient.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <binder/IInterface.h>
-#include <binder/SafeInterface.h>
-#include <gui/LayerMetadata.h>
-#include <ui/PixelFormat.h>
-
-#include <unordered_map>
-
-namespace android {
-
-class FrameStats;
-class IGraphicBufferProducer;
-
-class ISurfaceComposerClient : public IInterface {
-public:
-    DECLARE_META_INTERFACE(SurfaceComposerClient)
-
-    // flags for createSurface()
-    enum { // (keep in sync with SurfaceControl.java)
-        eHidden = 0x00000004,
-        eDestroyBackbuffer = 0x00000020,
-        eSkipScreenshot = 0x00000040,
-        eSecure = 0x00000080,
-        eNonPremultiplied = 0x00000100,
-        eOpaque = 0x00000400,
-        eProtectedByApp = 0x00000800,
-        eProtectedByDRM = 0x00001000,
-        eCursorWindow = 0x00002000,
-        eNoColorFill = 0x00004000,
-
-        eFXSurfaceBufferQueue = 0x00000000,
-        eFXSurfaceEffect = 0x00020000,
-        eFXSurfaceBufferState = 0x00040000,
-        eFXSurfaceContainer = 0x00080000,
-        eFXSurfaceMask = 0x000F0000,
-    };
-
-    // TODO(b/172002646):  Clean up the Surface Creation Arguments
-    /*
-     * Requires ACCESS_SURFACE_FLINGER permission
-     */
-    virtual status_t createSurface(const String8& name, uint32_t w, uint32_t h, PixelFormat format,
-                                   uint32_t flags, const sp<IBinder>& parent,
-                                   LayerMetadata metadata, sp<IBinder>* handle,
-                                   sp<IGraphicBufferProducer>* gbp, int32_t* outLayerId,
-                                   uint32_t* outTransformHint) = 0;
-
-    /*
-     * Requires ACCESS_SURFACE_FLINGER permission
-     */
-    virtual status_t createWithSurfaceParent(const String8& name, uint32_t w, uint32_t h,
-                                             PixelFormat format, uint32_t flags,
-                                             const sp<IGraphicBufferProducer>& parent,
-                                             LayerMetadata metadata, sp<IBinder>* handle,
-                                             sp<IGraphicBufferProducer>* gbp, int32_t* outLayerId,
-                                             uint32_t* outTransformHint) = 0;
-
-    /*
-     * Requires ACCESS_SURFACE_FLINGER permission
-     */
-    virtual status_t clearLayerFrameStats(const sp<IBinder>& handle) const = 0;
-
-    /*
-     * Requires ACCESS_SURFACE_FLINGER permission
-     */
-    virtual status_t getLayerFrameStats(const sp<IBinder>& handle, FrameStats* outStats) const = 0;
-
-    virtual status_t mirrorSurface(const sp<IBinder>& mirrorFromHandle, sp<IBinder>* outHandle,
-                                   int32_t* outLayerId) = 0;
-};
-
-class BnSurfaceComposerClient : public SafeBnInterface<ISurfaceComposerClient> {
-public:
-    BnSurfaceComposerClient()
-          : SafeBnInterface<ISurfaceComposerClient>("BnSurfaceComposerClient") {}
-
-    status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
-};
-
-} // namespace android
diff --git a/libs/gui/include/gui/ITransactionCompletedListener.h b/libs/gui/include/gui/ITransactionCompletedListener.h
index cc136bb..39bcb4a 100644
--- a/libs/gui/include/gui/ITransactionCompletedListener.h
+++ b/libs/gui/include/gui/ITransactionCompletedListener.h
@@ -40,10 +40,15 @@
 class CallbackId : public Parcelable {
 public:
     int64_t id;
-    enum class Type : int32_t { ON_COMPLETE, ON_COMMIT } type;
+    enum class Type : int32_t {
+        ON_COMPLETE = 0,
+        ON_COMMIT = 1,
+        /*reserved for serialization = 2*/
+    } type;
+    bool includeJankData; // Only respected for ON_COMPLETE callbacks.
 
     CallbackId() {}
-    CallbackId(int64_t id, Type type) : id(id), type(type) {}
+    CallbackId(int64_t id, Type type) : id(id), type(type), includeJankData(false) {}
     status_t writeToParcel(Parcel* output) const override;
     status_t readFromParcel(const Parcel* input) override;
 
@@ -132,7 +137,7 @@
 
     SurfaceStats() = default;
     SurfaceStats(const sp<IBinder>& sc, std::variant<nsecs_t, sp<Fence>> acquireTimeOrFence,
-                 const sp<Fence>& prevReleaseFence, uint32_t hint,
+                 const sp<Fence>& prevReleaseFence, std::optional<uint32_t> hint,
                  uint32_t currentMaxAcquiredBuffersCount, FrameEventHistoryStats frameEventStats,
                  std::vector<JankData> jankData, ReleaseCallbackId previousReleaseCallbackId)
           : surfaceControl(sc),
@@ -147,7 +152,7 @@
     sp<IBinder> surfaceControl;
     std::variant<nsecs_t, sp<Fence>> acquireTimeOrFence = -1;
     sp<Fence> previousReleaseFence;
-    uint32_t transformHint = 0;
+    std::optional<uint32_t> transformHint = 0;
     uint32_t currentMaxAcquiredBufferCount = 0;
     FrameEventHistoryStats eventStats;
     std::vector<JankData> jankData;
@@ -194,7 +199,10 @@
 
     virtual void onReleaseBuffer(ReleaseCallbackId callbackId, sp<Fence> releaseFence,
                                  uint32_t currentMaxAcquiredBufferCount) = 0;
-    virtual void onTransactionQueueStalled() = 0;
+
+    virtual void onTransactionQueueStalled(const String8& name) = 0;
+
+    virtual void onTrustedPresentationChanged(int id, bool inTrustedPresentationState) = 0;
 };
 
 class BnTransactionCompletedListener : public SafeBnInterface<ITransactionCompletedListener> {
diff --git a/libs/gui/include/gui/JankInfo.h b/libs/gui/include/gui/JankInfo.h
index ce9716f..1dddeba 100644
--- a/libs/gui/include/gui/JankInfo.h
+++ b/libs/gui/include/gui/JankInfo.h
@@ -24,9 +24,9 @@
     None = 0x0,
     // Jank that occurs in the layers below SurfaceFlinger
     DisplayHAL = 0x1,
-    // SF took too long on the CPU
+    // SF took too long on the CPU; deadline missed during HWC
     SurfaceFlingerCpuDeadlineMissed = 0x2,
-    // SF took too long on the GPU
+    // SF took too long on the GPU; deadline missed during GPU composition
     SurfaceFlingerGpuDeadlineMissed = 0x4,
     // Either App or GPU took too long on the frame
     AppDeadlineMissed = 0x8,
diff --git a/libs/gui/include/gui/LayerCaptureArgs.h b/libs/gui/include/gui/LayerCaptureArgs.h
index 05ff9d5..fae2bcc 100644
--- a/libs/gui/include/gui/LayerCaptureArgs.h
+++ b/libs/gui/include/gui/LayerCaptureArgs.h
@@ -20,14 +20,11 @@
 #include <sys/types.h>
 
 #include <gui/DisplayCaptureArgs.h>
-#include <gui/SpHash.h>
-#include <unordered_set>
 
 namespace android::gui {
 
 struct LayerCaptureArgs : CaptureArgs {
     sp<IBinder> layerHandle;
-    std::unordered_set<sp<IBinder>, SpHash<IBinder>> excludeHandles;
     bool childrenOnly{false};
 
     status_t writeToParcel(Parcel* output) const override;
diff --git a/libs/gui/include/gui/LayerDebugInfo.h b/libs/gui/include/gui/LayerDebugInfo.h
index af834d7..dbb80e5 100644
--- a/libs/gui/include/gui/LayerDebugInfo.h
+++ b/libs/gui/include/gui/LayerDebugInfo.h
@@ -25,7 +25,7 @@
 #include <string>
 #include <math/vec4.h>
 
-namespace android {
+namespace android::gui {
 
 /* Class for transporting debug info from SurfaceFlinger to authorized
  * recipients.  The class is intended to be a data container. There are
@@ -52,7 +52,7 @@
     uint32_t mZ = 0 ;
     int32_t mWidth = -1;
     int32_t mHeight = -1;
-    Rect mCrop = Rect::INVALID_RECT;
+    android::Rect mCrop = android::Rect::INVALID_RECT;
     half4 mColor = half4(1.0_hf, 1.0_hf, 1.0_hf, 0.0_hf);
     uint32_t mFlags = 0;
     PixelFormat mPixelFormat = PIXEL_FORMAT_NONE;
@@ -71,4 +71,4 @@
 
 std::string to_string(const LayerDebugInfo& info);
 
-} // namespace android
+} // namespace android::gui
diff --git a/libs/gui/include/gui/LayerMetadata.h b/libs/gui/include/gui/LayerMetadata.h
index 27f4d37..9cf62bc 100644
--- a/libs/gui/include/gui/LayerMetadata.h
+++ b/libs/gui/include/gui/LayerMetadata.h
@@ -20,7 +20,7 @@
 
 #include <unordered_map>
 
-namespace android {
+namespace android::gui {
 
 enum {
     METADATA_OWNER_UID = 1,
@@ -30,7 +30,8 @@
     METADATA_ACCESSIBILITY_ID = 5,
     METADATA_OWNER_PID = 6,
     METADATA_DEQUEUE_TIME = 7,
-    METADATA_GAME_MODE = 8
+    METADATA_GAME_MODE = 8,
+    METADATA_CALLING_UID = 9,
 };
 
 struct LayerMetadata : public Parcelable {
@@ -65,8 +66,18 @@
     Standard = 1,
     Performance = 2,
     Battery = 3,
+    Custom = 4,
 
-    ftl_last = Battery
+    ftl_last = Custom
 };
 
-} // namespace android
+} // namespace android::gui
+
+using android::gui::METADATA_ACCESSIBILITY_ID;
+using android::gui::METADATA_DEQUEUE_TIME;
+using android::gui::METADATA_GAME_MODE;
+using android::gui::METADATA_MOUSE_CURSOR;
+using android::gui::METADATA_OWNER_PID;
+using android::gui::METADATA_OWNER_UID;
+using android::gui::METADATA_TASK_ID;
+using android::gui::METADATA_WINDOW_TYPE;
diff --git a/libs/gui/include/gui/LayerState.h b/libs/gui/include/gui/LayerState.h
index 0071d48..a6f503e 100644
--- a/libs/gui/include/gui/LayerState.h
+++ b/libs/gui/include/gui/LayerState.h
@@ -21,6 +21,8 @@
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <android/gui/IWindowInfosReportedListener.h>
+#include <android/gui/TrustedPresentationThresholds.h>
 #include <android/native_window.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/ITransactionCompletedListener.h>
@@ -51,7 +53,11 @@
 namespace android {
 
 class Parcel;
-class ISurfaceComposerClient;
+
+using gui::ISurfaceComposerClient;
+using gui::LayerMetadata;
+
+using gui::TrustedPresentationThresholds;
 
 struct client_cache_t {
     wp<IBinder> token = nullptr;
@@ -62,6 +68,19 @@
     bool isValid() const { return token != nullptr; }
 };
 
+class TrustedPresentationListener : public Parcelable {
+public:
+    sp<ITransactionCompletedListener> callbackInterface;
+    int callbackId = -1;
+
+    void invoke(bool presentedWithinThresholds) {
+        callbackInterface->onTrustedPresentationChanged(callbackId, presentedWithinThresholds);
+    }
+
+    status_t writeToParcel(Parcel* parcel) const;
+    status_t readFromParcel(const Parcel* parcel);
+};
+
 class BufferData : public Parcelable {
 public:
     virtual ~BufferData() = default;
@@ -92,6 +111,7 @@
     uint64_t frameNumber = 0;
     bool hasBarrier = false;
     uint64_t barrierFrameNumber = 0;
+    uint32_t producerId = 0;
 
     // Listens to when the buffer is safe to be released. This is used for blast
     // layers only. The callback includes a release fence as well as the graphic
@@ -130,7 +150,7 @@
         eLayerOpaque = 0x02,         // SURFACE_OPAQUE
         eLayerSkipScreenshot = 0x40, // SKIP_SCREENSHOT
         eLayerSecure = 0x80,         // SECURE
-        // Queue up BufferStateLayer buffers instead of dropping the oldest buffer when this flag is
+        // Queue up layer buffers instead of dropping the oldest buffer when this flag is
         // set. This blocks the client until all the buffers have been presented. If the buffers
         // have presentation timestamps, then we may drop buffers.
         eEnableBackpressure = 0x100,       // ENABLE_BACKPRESSURE
@@ -140,30 +160,33 @@
         // This is needed to maintain compatibility for SurfaceView scaling behavior.
         // See SurfaceView scaling behavior for more details.
         eIgnoreDestinationFrame = 0x400,
+        eLayerIsRefreshRateIndicator = 0x800, // REFRESH_RATE_INDICATOR
     };
 
     enum {
         ePositionChanged = 0x00000001,
         eLayerChanged = 0x00000002,
-        eSizeChanged = 0x00000004,
+        eTrustedPresentationInfoChanged = 0x00000004,
         eAlphaChanged = 0x00000008,
         eMatrixChanged = 0x00000010,
         eTransparentRegionChanged = 0x00000020,
         eFlagsChanged = 0x00000040,
         eLayerStackChanged = 0x00000080,
+        eFlushJankData = 0x00000100,
+        eCachingHintChanged = 0x00000200,
         eDimmingEnabledChanged = 0x00000400,
         eShadowRadiusChanged = 0x00000800,
-        /* unused 0x00001000, */
+        eRenderBorderChanged = 0x00001000,
         eBufferCropChanged = 0x00002000,
         eRelativeLayerChanged = 0x00004000,
         eReparent = 0x00008000,
         eColorChanged = 0x00010000,
-        eDestroySurface = 0x00020000,
-        eTransformChanged = 0x00040000,
+        /* unused = 0x00020000, */
+        eBufferTransformChanged = 0x00040000,
         eTransformToDisplayInverseChanged = 0x00080000,
         eCropChanged = 0x00100000,
         eBufferChanged = 0x00200000,
-        /* unused 0x00400000, */
+        eDefaultFrameRateCompatibilityChanged = 0x00400000,
         eDataspaceChanged = 0x00800000,
         eHdrMetadataChanged = 0x01000000,
         eSurfaceDamageRegionChanged = 0x02000000,
@@ -188,7 +211,9 @@
         eAutoRefreshChanged = 0x1000'00000000,
         eStretchChanged = 0x2000'00000000,
         eTrustedOverlayChanged = 0x4000'00000000,
-        eDropInputModeChanged = 0x8000'00000000
+        eDropInputModeChanged = 0x8000'00000000,
+        eExtendedRangeBrightnessChanged = 0x10000'00000000,
+
     };
 
     layer_state_t();
@@ -196,7 +221,63 @@
     void merge(const layer_state_t& other);
     status_t write(Parcel& output) const;
     status_t read(const Parcel& input);
+    // Compares two layer_state_t structs and returns a set of change flags describing all the
+    // states that are different.
+    uint64_t diff(const layer_state_t& other) const;
     bool hasBufferChanges() const;
+
+    // Layer hierarchy updates.
+    static constexpr uint64_t HIERARCHY_CHANGES = layer_state_t::eLayerChanged |
+            layer_state_t::eRelativeLayerChanged | layer_state_t::eReparent |
+            layer_state_t::eLayerStackChanged;
+
+    // Geometry updates.
+    static constexpr uint64_t GEOMETRY_CHANGES = layer_state_t::eBufferCropChanged |
+            layer_state_t::eBufferTransformChanged | layer_state_t::eCornerRadiusChanged |
+            layer_state_t::eCropChanged | layer_state_t::eDestinationFrameChanged |
+            layer_state_t::eMatrixChanged | layer_state_t::ePositionChanged |
+            layer_state_t::eTransformToDisplayInverseChanged |
+            layer_state_t::eTransparentRegionChanged;
+
+    // Buffer and related updates.
+    static constexpr uint64_t BUFFER_CHANGES = layer_state_t::eApiChanged |
+            layer_state_t::eBufferChanged | layer_state_t::eBufferCropChanged |
+            layer_state_t::eBufferTransformChanged | layer_state_t::eDataspaceChanged |
+            layer_state_t::eSidebandStreamChanged | layer_state_t::eSurfaceDamageRegionChanged |
+            layer_state_t::eTransformToDisplayInverseChanged |
+            layer_state_t::eTransparentRegionChanged |
+            layer_state_t::eExtendedRangeBrightnessChanged;
+
+    // Content updates.
+    static constexpr uint64_t CONTENT_CHANGES = layer_state_t::BUFFER_CHANGES |
+            layer_state_t::eAlphaChanged | layer_state_t::eAutoRefreshChanged |
+            layer_state_t::eBackgroundBlurRadiusChanged | layer_state_t::eBackgroundColorChanged |
+            layer_state_t::eBlurRegionsChanged | layer_state_t::eColorChanged |
+            layer_state_t::eColorSpaceAgnosticChanged | layer_state_t::eColorTransformChanged |
+            layer_state_t::eCornerRadiusChanged | layer_state_t::eDimmingEnabledChanged |
+            layer_state_t::eHdrMetadataChanged | layer_state_t::eRenderBorderChanged |
+            layer_state_t::eShadowRadiusChanged | layer_state_t::eStretchChanged;
+
+    // Changes which invalidates the layer's visible region in CE.
+    static constexpr uint64_t CONTENT_DIRTY = layer_state_t::CONTENT_CHANGES |
+            layer_state_t::GEOMETRY_CHANGES | layer_state_t::HIERARCHY_CHANGES;
+
+    // Changes affecting child states.
+    static constexpr uint64_t AFFECTS_CHILDREN = layer_state_t::GEOMETRY_CHANGES |
+            layer_state_t::HIERARCHY_CHANGES | layer_state_t::eAlphaChanged |
+            layer_state_t::eColorTransformChanged | layer_state_t::eCornerRadiusChanged |
+            layer_state_t::eFlagsChanged | layer_state_t::eTrustedOverlayChanged |
+            layer_state_t::eFrameRateChanged | layer_state_t::eFixedTransformHintChanged;
+
+    // Changes affecting data sent to input.
+    static constexpr uint64_t INPUT_CHANGES = layer_state_t::GEOMETRY_CHANGES |
+            layer_state_t::HIERARCHY_CHANGES | layer_state_t::eInputInfoChanged |
+            layer_state_t::eDropInputModeChanged | layer_state_t::eTrustedOverlayChanged;
+
+    // Changes that affect the visible region on a display.
+    static constexpr uint64_t VISIBLE_REGION_CHANGES =
+            layer_state_t::GEOMETRY_CHANGES | layer_state_t::HIERARCHY_CHANGES;
+
     bool hasValidBuffer() const;
     void sanitize(int32_t permissions);
 
@@ -207,6 +288,11 @@
         float dsdy{0};
         status_t write(Parcel& output) const;
         status_t read(const Parcel& input);
+        inline bool operator==(const matrix22_t& other) const {
+            return std::tie(dsdx, dtdx, dtdy, dsdy) ==
+                    std::tie(other.dsdx, other.dtdx, other.dtdy, other.dsdy);
+        }
+        inline bool operator!=(const matrix22_t& other) const { return !(*this == other); }
     };
     sp<IBinder> surface;
     int32_t layerId;
@@ -214,28 +300,23 @@
     float x;
     float y;
     int32_t z;
-    uint32_t w;
-    uint32_t h;
     ui::LayerStack layerStack = ui::DEFAULT_LAYER_STACK;
-    float alpha;
     uint32_t flags;
     uint32_t mask;
     uint8_t reserved;
     matrix22_t matrix;
     float cornerRadius;
     uint32_t backgroundBlurRadius;
-    sp<SurfaceControl> reparentSurfaceControl;
 
     sp<SurfaceControl> relativeLayerSurfaceControl;
 
     sp<SurfaceControl> parentSurfaceControlForChild;
 
-    half3 color;
+    half4 color;
 
     // non POD must be last. see write/read
     Region transparentRegion;
-
-    uint32_t transform;
+    uint32_t bufferTransform;
     bool transformToDisplayInverse;
     Rect crop;
     std::shared_ptr<BufferData> bufferData = nullptr;
@@ -247,13 +328,13 @@
     mat4 colorTransform;
     std::vector<BlurRegion> blurRegions;
 
-    sp<gui::WindowInfoHandle> windowInfoHandle = new gui::WindowInfoHandle();
+    sp<gui::WindowInfoHandle> windowInfoHandle = sp<gui::WindowInfoHandle>::make();
 
     LayerMetadata metadata;
 
     // The following refer to the alpha, and dataspace, respectively of
     // the background color layer
-    float bgColorAlpha;
+    half4 bgColor;
     ui::Dataspace bgColorDataspace;
 
     // A color space agnostic layer means the color of this layer can be
@@ -273,6 +354,9 @@
     int8_t frameRateCompatibility;
     int8_t changeFrameRateStrategy;
 
+    // Default frame rate compatibility used to set the layer refresh rate votetype.
+    int8_t defaultFrameRateCompatibility;
+
     // Set by window manager indicating the layer and all its children are
     // in a different orientation than the display. The hint suggests that
     // the graphic producers should receive a transform hint as if the
@@ -291,6 +375,11 @@
     // should be trusted for input occlusion detection purposes
     bool isTrustedOverlay;
 
+    // Flag to indicate if border needs to be enabled on the layer
+    bool borderEnabled;
+    float borderWidth;
+    half4 borderColor;
+
     // Stretch effect to be applied to this layer
     StretchEffect stretchEffect;
 
@@ -301,9 +390,17 @@
     gui::DropInputMode dropInputMode;
 
     bool dimmingEnabled;
+    float currentHdrSdrRatio = 1.f;
+    float desiredHdrSdrRatio = 1.f;
+
+    gui::CachingHint cachingHint = gui::CachingHint::Enabled;
+
+    TrustedPresentationThresholds trustedPresentationThresholds;
+    TrustedPresentationListener trustedPresentationListener;
 };
 
-struct ComposerState {
+class ComposerState {
+public:
     layer_state_t state;
     status_t write(Parcel& output) const;
     status_t read(const Parcel& input);
@@ -353,7 +450,9 @@
 
 struct InputWindowCommands {
     std::vector<gui::FocusRequest> focusRequests;
-    bool syncInputWindows{false};
+    std::unordered_set<sp<gui::IWindowInfosReportedListener>,
+                       SpHash<gui::IWindowInfosReportedListener>>
+            windowInfosReportedListeners;
 
     // Merges the passed in commands and returns true if there were any changes.
     bool merge(const InputWindowCommands& other);
diff --git a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl b/libs/gui/include/gui/LayerStatePermissions.h
similarity index 62%
copy from libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
copy to libs/gui/include/gui/LayerStatePermissions.h
index 6929a6c..a90f30c 100644
--- a/libs/binder/aidl/android/content/pm/IPackageChangeObserver.aidl
+++ b/libs/gui/include/gui/LayerStatePermissions.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2020 The Android Open Source Project
+ * Copyright (C) 2023 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,15 +14,16 @@
  * limitations under the License.
  */
 
-package android.content.pm;
+#include <stdint.h>
+#include <string>
+#include <unordered_map>
 
-import android.content.pm.PackageChangeEvent;
+namespace android {
+class LayerStatePermissions {
+public:
+    static uint32_t getTransactionPermissions(int pid, int uid);
 
-/**
- * This is a non-blocking notification when a package has changed.
- *
- * @hide
- */
-oneway interface IPackageChangeObserver {
-  void onPackageChanged(in PackageChangeEvent event);
-}
+private:
+    static std::unordered_map<std::string, int> mPermissionMap;
+};
+} // namespace android
\ No newline at end of file
diff --git a/libs/gui/include/gui/ScreenCaptureResults.h b/libs/gui/include/gui/ScreenCaptureResults.h
index 724c11c..6e17791 100644
--- a/libs/gui/include/gui/ScreenCaptureResults.h
+++ b/libs/gui/include/gui/ScreenCaptureResults.h
@@ -19,6 +19,7 @@
 #include <binder/Parcel.h>
 #include <binder/Parcelable.h>
 #include <ui/Fence.h>
+#include <ui/FenceResult.h>
 #include <ui/GraphicBuffer.h>
 
 namespace android::gui {
@@ -31,11 +32,10 @@
     status_t readFromParcel(const android::Parcel* parcel) override;
 
     sp<GraphicBuffer> buffer;
-    sp<Fence> fence = Fence::NO_FENCE;
+    FenceResult fenceResult = Fence::NO_FENCE;
     bool capturedSecureLayers{false};
     bool capturedHdrLayers{false};
     ui::Dataspace capturedDataspace{ui::Dataspace::V0_SRGB};
-    status_t result = OK;
 };
 
 } // namespace android::gui
diff --git a/libs/gui/include/gui/Surface.h b/libs/gui/include/gui/Surface.h
index 4a552b6..39a59e4 100644
--- a/libs/gui/include/gui/Surface.h
+++ b/libs/gui/include/gui/Surface.h
@@ -17,8 +17,8 @@
 #ifndef ANDROID_GUI_SURFACE_H
 #define ANDROID_GUI_SURFACE_H
 
+#include <android/gui/FrameTimelineInfo.h>
 #include <gui/BufferQueueDefs.h>
-#include <gui/FrameTimelineInfo.h>
 #include <gui/HdrMetadata.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/IProducerListener.h>
@@ -41,6 +41,8 @@
 
 class ISurfaceComposer;
 
+using gui::FrameTimelineInfo;
+
 /* This is the same as ProducerListener except that onBuffersDiscarded is
  * called with a vector of graphic buffers instead of buffer slots.
  */
@@ -203,8 +205,8 @@
             nsecs_t* outDisplayPresentTime, nsecs_t* outDequeueReadyTime,
             nsecs_t* outReleaseTime);
 
-    status_t getWideColorSupport(bool* supported);
-    status_t getHdrSupport(bool* supported);
+    status_t getWideColorSupport(bool* supported) __attribute__((__deprecated__));
+    status_t getHdrSupport(bool* supported) __attribute__((__deprecated__));
 
     status_t getUniqueId(uint64_t* outId) const;
     status_t getConsumerUsage(uint64_t* outUsage) const;
@@ -301,6 +303,10 @@
     int dispatchGetLastQueuedBuffer2(va_list args);
     int dispatchSetFrameTimelineInfo(va_list args);
 
+    std::mutex mNameMutex;
+    std::string mName;
+    const char* getDebugName();
+
 protected:
     virtual int dequeueBuffer(ANativeWindowBuffer** buffer, int* fenceFd);
     virtual int cancelBuffer(ANativeWindowBuffer* buffer, int fenceFd);
diff --git a/libs/gui/include/gui/SurfaceComposerClient.h b/libs/gui/include/gui/SurfaceComposerClient.h
index 9033e17..fb57f63 100644
--- a/libs/gui/include/gui/SurfaceComposerClient.h
+++ b/libs/gui/include/gui/SurfaceComposerClient.h
@@ -38,6 +38,9 @@
 #include <ui/GraphicTypes.h>
 #include <ui/PixelFormat.h>
 #include <ui/Rotation.h>
+#include <ui/StaticDisplayInfo.h>
+
+#include <android/gui/ISurfaceComposerClient.h>
 
 #include <gui/CpuConsumer.h>
 #include <gui/ISurfaceComposer.h>
@@ -52,20 +55,22 @@
 namespace android {
 
 class HdrCapabilities;
-class ISurfaceComposerClient;
 class IGraphicBufferProducer;
 class ITunnelModeEnabledListener;
 class Region;
+class TransactionCompletedListener;
 
 using gui::DisplayCaptureArgs;
 using gui::IRegionSamplingListener;
+using gui::ISurfaceComposerClient;
 using gui::LayerCaptureArgs;
+using gui::LayerMetadata;
 
 struct SurfaceControlStats {
     SurfaceControlStats(const sp<SurfaceControl>& sc, nsecs_t latchTime,
                         std::variant<nsecs_t, sp<Fence>> acquireTimeOrFence,
                         const sp<Fence>& presentFence, const sp<Fence>& prevReleaseFence,
-                        uint32_t hint, FrameEventHistoryStats eventStats,
+                        std::optional<uint32_t> hint, FrameEventHistoryStats eventStats,
                         uint32_t currentMaxAcquiredBufferCount)
           : surfaceControl(sc),
             latchTime(latchTime),
@@ -81,7 +86,7 @@
     std::variant<nsecs_t, sp<Fence>> acquireTimeOrFence = -1;
     sp<Fence> presentFence;
     sp<Fence> previousReleaseFence;
-    uint32_t transformHint = 0;
+    std::optional<uint32_t> transformHint = 0;
     FrameEventHistoryStats frameEventStats;
     uint32_t currentMaxAcquiredBufferCount = 0;
 };
@@ -102,6 +107,8 @@
                            const sp<Fence>& /*presentFence*/,
                            const SurfaceStats& /*stats*/)>;
 
+using TrustedPresentationCallback = std::function<void(void*, bool)>;
+
 // ---------------------------------------------------------------------------
 
 class ReleaseCallbackThread {
@@ -141,32 +148,28 @@
     status_t linkToComposerDeath(const sp<IBinder::DeathRecipient>& recipient,
             void* cookie = nullptr, uint32_t flags = 0);
 
+    // Notify the SurfaceComposerClient that the boot procedure has completed
+    static status_t bootFinished();
+
     // Get transactional state of given display.
     static status_t getDisplayState(const sp<IBinder>& display, ui::DisplayState*);
 
     // Get immutable information about given physical display.
-    static status_t getStaticDisplayInfo(const sp<IBinder>& display, ui::StaticDisplayInfo*);
+    static status_t getStaticDisplayInfo(int64_t, ui::StaticDisplayInfo*);
 
-    // Get dynamic information about given physical display.
-    static status_t getDynamicDisplayInfo(const sp<IBinder>& display, ui::DynamicDisplayInfo*);
+    // Get dynamic information about given physical display from display id
+    static status_t getDynamicDisplayInfoFromId(int64_t, ui::DynamicDisplayInfo*);
 
     // Shorthand for the active display mode from getDynamicDisplayInfo().
     // TODO(b/180391891): Update clients to use getDynamicDisplayInfo and remove this function.
     static status_t getActiveDisplayMode(const sp<IBinder>& display, ui::DisplayMode*);
 
     // Sets the refresh rate boundaries for the display.
-    static status_t setDesiredDisplayModeSpecs(
-            const sp<IBinder>& displayToken, ui::DisplayModeId defaultMode,
-            bool allowGroupSwitching, float primaryRefreshRateMin, float primaryRefreshRateMax,
-            float appRequestRefreshRateMin, float appRequestRefreshRateMax);
+    static status_t setDesiredDisplayModeSpecs(const sp<IBinder>& displayToken,
+                                               const gui::DisplayModeSpecs&);
     // Gets the refresh rate boundaries for the display.
     static status_t getDesiredDisplayModeSpecs(const sp<IBinder>& displayToken,
-                                               ui::DisplayModeId* outDefaultMode,
-                                               bool* outAllowGroupSwitching,
-                                               float* outPrimaryRefreshRateMin,
-                                               float* outPrimaryRefreshRateMax,
-                                               float* outAppRequestRefreshRateMin,
-                                               float* outAppRequestRefreshRateMax);
+                                               gui::DisplayModeSpecs*);
 
     // Get the coordinates of the display's native color primaries
     static status_t getDisplayNativePrimaries(const sp<IBinder>& display,
@@ -178,11 +181,24 @@
 
     // Gets if boot display mode operations are supported on a device
     static status_t getBootDisplayModeSupport(bool* support);
+
+    // Gets the overlay properties of the device
+    static status_t getOverlaySupport(gui::OverlayProperties* outProperties);
+
     // Sets the user-preferred display mode that a device should boot in
     static status_t setBootDisplayMode(const sp<IBinder>& display, ui::DisplayModeId);
     // Clears the user-preferred display mode
     static status_t clearBootDisplayMode(const sp<IBinder>& display);
 
+    // Gets the HDR conversion capabilities of the device
+    static status_t getHdrConversionCapabilities(std::vector<gui::HdrConversionCapability>*);
+    // Sets the HDR conversion strategy for the device. in case when HdrConversionStrategy has
+    // autoAllowedHdrTypes set. Returns Hdr::INVALID in other cases.
+    static status_t setHdrConversionStrategy(gui::HdrConversionStrategy hdrConversionStrategy,
+                                             ui::Hdr* outPreferredHdrOutputType);
+    // Returns whether HDR conversion is supported by the device.
+    static status_t getHdrOutputConversionSupport(bool* isSupported);
+
     // Sets the frame rate of a particular app (uid). This is currently called
     // by GameManager.
     static status_t setOverrideFrameRate(uid_t uid, float frameRate);
@@ -218,7 +234,7 @@
     /**
      * Gets the context priority of surface flinger's render engine.
      */
-    static int getGPUContextPriority();
+    static int getGpuContextPriority();
 
     /**
      * Uncaches a buffer in ISurfaceComposer. It must be uncached via a transaction so that it is
@@ -314,7 +330,7 @@
                                      uint32_t w,          // width in pixel
                                      uint32_t h,          // height in pixel
                                      PixelFormat format,  // pixel-format desired
-                                     uint32_t flags = 0,  // usage flags
+                                     int32_t flags = 0,   // usage flags
                                      const sp<IBinder>& parentHandle = nullptr, // parentHandle
                                      LayerMetadata metadata = LayerMetadata(),  // metadata
                                      uint32_t* outTransformHint = nullptr);
@@ -324,21 +340,11 @@
                                   uint32_t h,          // height in pixel
                                   PixelFormat format,  // pixel-format desired
                                   sp<SurfaceControl>* outSurface,
-                                  uint32_t flags = 0,                        // usage flags
+                                  int32_t flags = 0,                         // usage flags
                                   const sp<IBinder>& parentHandle = nullptr, // parentHandle
                                   LayerMetadata metadata = LayerMetadata(),  // metadata
                                   uint32_t* outTransformHint = nullptr);
 
-    //! Create a surface
-    sp<SurfaceControl> createWithSurfaceParent(const String8& name,       // name of the surface
-                                               uint32_t w,                // width in pixel
-                                               uint32_t h,                // height in pixel
-                                               PixelFormat format,        // pixel-format desired
-                                               uint32_t flags = 0,        // usage flags
-                                               Surface* parent = nullptr, // parent
-                                               LayerMetadata metadata = LayerMetadata(), // metadata
-                                               uint32_t* outTransformHint = nullptr);
-
     // Creates a mirrored hierarchy for the mirrorFromSurface. This returns a SurfaceControl
     // which is a parent of the root of the mirrored hierarchy.
     //
@@ -350,24 +356,20 @@
     //      B               B'
     sp<SurfaceControl> mirrorSurface(SurfaceControl* mirrorFromSurface);
 
+    sp<SurfaceControl> mirrorDisplay(DisplayId displayId);
+
     //! Create a virtual display
-    static sp<IBinder> createDisplay(const String8& displayName, bool secure);
+    static sp<IBinder> createDisplay(const String8& displayName, bool secure,
+                                     float requestedRefereshRate = 0);
 
     //! Destroy a virtual display
     static void destroyDisplay(const sp<IBinder>& display);
 
     //! Get stable IDs for connected physical displays
     static std::vector<PhysicalDisplayId> getPhysicalDisplayIds();
-    static status_t getPrimaryPhysicalDisplayId(PhysicalDisplayId*);
-    static std::optional<PhysicalDisplayId> getInternalDisplayId();
 
     //! Get token for a physical display given its stable ID
     static sp<IBinder> getPhysicalDisplayToken(PhysicalDisplayId displayId);
-    static sp<IBinder> getInternalDisplayToken();
-
-    static status_t enableVSyncInjections(bool enable);
-
-    static status_t injectVSync(nsecs_t when);
 
     struct SCHash {
         std::size_t operator()(const sp<SurfaceControl>& sc) const {
@@ -396,26 +398,43 @@
         std::unordered_set<sp<SurfaceControl>, SCHash> surfaceControls;
     };
 
+    struct PresentationCallbackRAII : public RefBase {
+        sp<TransactionCompletedListener> mTcl;
+        int mId;
+        PresentationCallbackRAII(TransactionCompletedListener* tcl, int id);
+        virtual ~PresentationCallbackRAII();
+    };
+
     class Transaction : public Parcelable {
     private:
+        static sp<IBinder> sApplyToken;
         void releaseBufferIfOverwriting(const layer_state_t& state);
+        static void mergeFrameTimelineInfo(FrameTimelineInfo& t, const FrameTimelineInfo& other);
+        static void clearFrameTimelineInfo(FrameTimelineInfo& t);
 
     protected:
         std::unordered_map<sp<IBinder>, ComposerState, IBinderHash> mComposerStates;
         SortedVector<DisplayState> mDisplayStates;
         std::unordered_map<sp<ITransactionCompletedListener>, CallbackInfo, TCLHash>
                 mListenerCallbacks;
+        std::vector<client_cache_t> mUncacheBuffers;
+
+        // We keep track of the last MAX_MERGE_HISTORY_LENGTH merged transaction ids.
+        // Ordered most recently merged to least recently merged.
+        static const size_t MAX_MERGE_HISTORY_LENGTH = 10u;
+        std::vector<uint64_t> mMergedTransactionIds;
 
         uint64_t mId;
 
-        uint32_t mForceSynchronous = 0;
         uint32_t mTransactionNestCount = 0;
         bool mAnimation = false;
         bool mEarlyWakeupStart = false;
         bool mEarlyWakeupEnd = false;
 
-        // Indicates that the Transaction contains a buffer that should be cached
-        bool mContainsBuffer = false;
+        // Indicates that the Transaction may contain buffers that should be cached. The reason this
+        // is only a guess is that buffers can be removed before cache is called. This is only a
+        // hint that at some point a buffer was added to this transaction before apply was called.
+        bool mMayContainBuffer = false;
 
         // mDesiredPresentTime is the time in nanoseconds that the client would like the transaction
         // to be presented. When it is not possible to present at exactly that time, it will be
@@ -468,16 +487,17 @@
         // The id is updated every time the transaction is applied.
         uint64_t getId();
 
+        std::vector<uint64_t> getMergedTransactionIds();
+
         status_t apply(bool synchronous = false, bool oneWay = false);
         // Merge another transaction in to this one, clearing other
         // as if it had been applied.
         Transaction& merge(Transaction&& other);
         Transaction& show(const sp<SurfaceControl>& sc);
         Transaction& hide(const sp<SurfaceControl>& sc);
-        Transaction& setPosition(const sp<SurfaceControl>& sc,
-                float x, float y);
-        Transaction& setSize(const sp<SurfaceControl>& sc,
-                uint32_t w, uint32_t h);
+        Transaction& setPosition(const sp<SurfaceControl>& sc, float x, float y);
+        // b/243180033 remove once functions are not called from vendor code
+        Transaction& setSize(const sp<SurfaceControl>&, uint32_t, uint32_t) { return *this; }
         Transaction& setLayer(const sp<SurfaceControl>& sc,
                 int32_t z);
 
@@ -527,7 +547,8 @@
         Transaction& setBuffer(const sp<SurfaceControl>& sc, const sp<GraphicBuffer>& buffer,
                                const std::optional<sp<Fence>>& fence = std::nullopt,
                                const std::optional<uint64_t>& frameNumber = std::nullopt,
-                               ReleaseBufferCallback callback = nullptr);
+                               uint32_t producerId = 0, ReleaseBufferCallback callback = nullptr);
+        Transaction& unsetBuffer(const sp<SurfaceControl>& sc);
         std::shared_ptr<BufferData> getAndClearBuffer(const sp<SurfaceControl>& sc);
 
         /**
@@ -551,6 +572,9 @@
         Transaction& setBufferHasBarrier(const sp<SurfaceControl>& sc,
                                          uint64_t barrierFrameNumber);
         Transaction& setDataspace(const sp<SurfaceControl>& sc, ui::Dataspace dataspace);
+        Transaction& setExtendedRangeBrightness(const sp<SurfaceControl>& sc,
+                                                float currentBufferRatio, float desiredRatio);
+        Transaction& setCachingHint(const sp<SurfaceControl>& sc, gui::CachingHint cachingHint);
         Transaction& setHdrMetadata(const sp<SurfaceControl>& sc, const HdrMetadata& hdrMetadata);
         Transaction& setSurfaceDamageRegion(const sp<SurfaceControl>& sc,
                                             const Region& surfaceDamageRegion);
@@ -572,12 +596,67 @@
         Transaction& addTransactionCommittedCallback(
                 TransactionCompletedCallbackTakesContext callback, void* callbackContext);
 
+        /**
+         * Set a callback to receive feedback about the presentation of a layer.
+         * When the layer is presented according to the passed in Thresholds,
+         * it is said to "enter the state", and receives the callback with true.
+         * When the conditions fall out of thresholds, it is then said to leave the
+         * state.
+         *
+         * There are a few simple thresholds:
+         *    minAlpha: Lower bound on computed alpha
+         *    minFractionRendered: Lower bounds on fraction of pixels that
+         *    were rendered.
+         *    stabilityThresholdMs: A time that alpha and fraction rendered
+         *    must remain within bounds before we can "enter the state"
+         *
+         * The fraction of pixels rendered is a computation based on scale, crop
+         * and occlusion. The calculation may be somewhat counterintuitive, so we
+         * can work through an example. Imagine we have a layer with a 100x100 buffer
+         * which is occluded by (10x100) pixels on the left, and cropped by (100x10) pixels
+         * on the top. Furthermore imagine this layer is scaled by 0.9 in both dimensions.
+         * (c=crop,o=occluded,b=both,x=none
+         *      b c c c
+         *      o x x x
+         *      o x x x
+         *      o x x x
+         *
+         * We first start by computing fr=xscale*yscale=0.9*0.9=0.81, indicating
+         * that "81%" of the pixels were rendered. This corresponds to what was 100
+         * pixels being displayed in 81 pixels. This is somewhat of an abuse of
+         * language, as the information of merged pixels isn't totally lost, but
+         * we err on the conservative side.
+         *
+         * We then repeat a similar process for the crop and covered regions and
+         * accumulate the results: fr = fr * (fractionNotCropped) * (fractionNotCovered)
+         * So for this example we would get 0.9*0.9*0.9*0.9=0.65...
+         *
+         * Notice that this is not completely accurate, as we have double counted
+         * the region marked as b. However we only wanted a "lower bound" and so it
+         * is ok to err in this direction. Selection of the threshold will ultimately
+         * be somewhat arbitrary, and so there are some somewhat arbitrary decisions in
+         * this API as well.
+         *
+         * The caller must keep "PresentationCallbackRAII" alive, or the callback
+         * in SurfaceComposerClient will be unregistered.
+         */
+        Transaction& setTrustedPresentationCallback(const sp<SurfaceControl>& sc,
+                                                    TrustedPresentationCallback callback,
+                                                    const TrustedPresentationThresholds& thresholds,
+                                                    void* context,
+                                                    sp<PresentationCallbackRAII>& outCallbackOwner);
+
+        // Clear local memory in SCC
+        Transaction& clearTrustedPresentationCallback(const sp<SurfaceControl>& sc);
+
         // ONLY FOR BLAST ADAPTER
         Transaction& notifyProducerDisconnect(const sp<SurfaceControl>& sc);
 
         Transaction& setInputWindowInfo(const sp<SurfaceControl>& sc, const gui::WindowInfo& info);
         Transaction& setFocusedWindow(const gui::FocusRequest& request);
-        Transaction& syncInputWindows();
+
+        Transaction& addWindowInfosReportedListener(
+                sp<gui::IWindowInfosReportedListener> windowInfosReportedListener);
 
         // Set a color transform matrix on the given layer on the built-in display.
         Transaction& setColorTransform(const sp<SurfaceControl>& sc, const mat3& matrix,
@@ -590,6 +669,9 @@
         Transaction& setFrameRate(const sp<SurfaceControl>& sc, float frameRate,
                                   int8_t compatibility, int8_t changeFrameRateStrategy);
 
+        Transaction& setDefaultFrameRateCompatibility(const sp<SurfaceControl>& sc,
+                                                      int8_t compatibility);
+
         // Set by window manager indicating the layer and all its children are
         // in a different orientation than the display. The hint suggests that
         // the graphic producers should receive a transform hint as if the
@@ -636,6 +718,9 @@
                                          const Rect& destinationFrame);
         Transaction& setDropInputMode(const sp<SurfaceControl>& sc, gui::DropInputMode mode);
 
+        Transaction& enableBorder(const sp<SurfaceControl>& sc, bool shouldEnable, float width,
+                                  const half4& color);
+
         status_t setDisplaySurface(const sp<IBinder>& token,
                 const sp<IGraphicBufferProducer>& bufferProducer);
 
@@ -666,7 +751,12 @@
          *
          * TODO (b/213644870): Remove all permissioned things from Transaction
          */
-        void sanitize();
+        void sanitize(int pid, int uid);
+
+        static sp<IBinder> getDefaultApplyToken();
+        static void setDefaultApplyToken(sp<IBinder> applyToken);
+
+        static status_t sendSurfaceFlushJankDataTransaction(const sp<SurfaceControl>& sc);
     };
 
     status_t clearLayerFrameStats(const sp<IBinder>& token) const;
@@ -714,6 +804,12 @@
     ReleaseCallbackThread mReleaseCallbackThread;
 
 private:
+    // Get dynamic information about given physical display from token
+    static status_t getDynamicDisplayInfoFromToken(const sp<IBinder>& display,
+                                                   ui::DynamicDisplayInfo*);
+
+    static void getDynamicDisplayInfoInternal(gui::DynamicDisplayInfo& ginfo,
+                                              ui::DynamicDisplayInfo*& outInfo);
     virtual void onFirstRef();
 
     mutable     Mutex                       mLock;
@@ -779,7 +875,10 @@
     // This is protected by mSurfaceStatsListenerMutex, but GUARDED_BY isn't supported for
     // std::recursive_mutex
     std::multimap<int32_t, SurfaceStatsCallbackEntry> mSurfaceStatsListeners;
-    std::unordered_map<void*, std::function<void()>> mQueueStallListeners;
+    std::unordered_map<void*, std::function<void(const std::string&)>> mQueueStallListeners;
+
+    std::unordered_map<int, std::tuple<TrustedPresentationCallback, void*>>
+            mTrustedPresentationCallbacks;
 
 public:
     static sp<TransactionCompletedListener> getInstance();
@@ -792,14 +891,22 @@
             const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
                     surfaceControls,
             CallbackId::Type callbackType);
+    CallbackId addCallbackFunctionLocked(
+            const TransactionCompletedCallback& callbackFunction,
+            const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
+                    surfaceControls,
+            CallbackId::Type callbackType) REQUIRES(mMutex);
 
-    void addSurfaceControlToCallbacks(
-            const sp<SurfaceControl>& surfaceControl,
-            const std::unordered_set<CallbackId, CallbackIdHash>& callbackIds);
+    void addSurfaceControlToCallbacks(SurfaceComposerClient::CallbackInfo& callbackInfo,
+                                      const sp<SurfaceControl>& surfaceControl);
 
-    void addQueueStallListener(std::function<void()> stallListener, void* id);
+    void addQueueStallListener(std::function<void(const std::string&)> stallListener, void* id);
     void removeQueueStallListener(void *id);
 
+    sp<SurfaceComposerClient::PresentationCallbackRAII> addTrustedPresentationCallback(
+            TrustedPresentationCallback tpc, int id, void* context);
+    void clearTrustedPresentationCallback(int id);
+
     /*
      * Adds a jank listener to be informed about SurfaceFlinger's jank classification for a specific
      * surface. Jank classifications arrive as part of the transaction callbacks about previous
@@ -828,10 +935,12 @@
     // For Testing Only
     static void setInstance(const sp<TransactionCompletedListener>&);
 
-    void onTransactionQueueStalled() override;
+    void onTransactionQueueStalled(const String8& reason) override;
+
+    void onTrustedPresentationChanged(int id, bool presentedWithinThresholds) override;
 
 private:
-    ReleaseBufferCallback popReleaseBufferCallbackLocked(const ReleaseCallbackId&);
+    ReleaseBufferCallback popReleaseBufferCallbackLocked(const ReleaseCallbackId&) REQUIRES(mMutex);
     static sp<TransactionCompletedListener> sInstance;
 };
 
diff --git a/libs/gui/include/gui/SurfaceControl.h b/libs/gui/include/gui/SurfaceControl.h
index b72cf83..344b957 100644
--- a/libs/gui/include/gui/SurfaceControl.h
+++ b/libs/gui/include/gui/SurfaceControl.h
@@ -24,17 +24,19 @@
 #include <utils/RefBase.h>
 #include <utils/threads.h>
 
+#include <android/gui/ISurfaceComposerClient.h>
+
 #include <ui/FrameStats.h>
 #include <ui/PixelFormat.h>
 #include <ui/Region.h>
 
-#include <gui/ISurfaceComposerClient.h>
 #include <math/vec3.h>
 
 namespace android {
 
 // ---------------------------------------------------------------------------
 
+class Choreographer;
 class IGraphicBufferProducer;
 class Surface;
 class SurfaceComposerClient;
@@ -77,6 +79,10 @@
     sp<IBinder> getHandle() const;
     sp<IBinder> getLayerStateHandle() const;
     int32_t getLayerId() const;
+    const std::string& getName() const;
+
+    // TODO(b/267195698): Consider renaming.
+    std::shared_ptr<Choreographer> getChoreographer();
 
     sp<IGraphicBufferProducer> getIGraphicBufferProducer();
 
@@ -93,9 +99,9 @@
     explicit SurfaceControl(const sp<SurfaceControl>& other);
 
     SurfaceControl(const sp<SurfaceComposerClient>& client, const sp<IBinder>& handle,
-                   const sp<IGraphicBufferProducer>& gbp, int32_t layerId,
-                   uint32_t width = 0, uint32_t height = 0, PixelFormat format = 0,
-                   uint32_t transformHint = 0, uint32_t flags = 0);
+                   int32_t layerId, const std::string& layerName, uint32_t width = 0,
+                   uint32_t height = 0, PixelFormat format = 0, uint32_t transformHint = 0,
+                   uint32_t flags = 0);
 
     sp<SurfaceControl> getParentingLayer();
 
@@ -115,19 +121,20 @@
     status_t validate() const;
 
     sp<SurfaceComposerClient>   mClient;
-    sp<IBinder>                 mHandle;
-    sp<IGraphicBufferProducer>  mGraphicBufferProducer;
+    sp<IBinder> mHandle;
     mutable Mutex               mLock;
     mutable sp<Surface>         mSurfaceData;
     mutable sp<BLASTBufferQueue> mBbq;
     mutable sp<SurfaceControl> mBbqChild;
     int32_t mLayerId = 0;
+    std::string mName;
     uint32_t mTransformHint = 0;
     uint32_t mWidth = 0;
     uint32_t mHeight = 0;
     PixelFormat mFormat = PIXEL_FORMAT_NONE;
     uint32_t mCreateFlags = 0;
     uint64_t mFallbackFrameNumber = 100;
+    std::shared_ptr<Choreographer> mChoreographer;
 };
 
 }; // namespace android
diff --git a/libs/gui/include/gui/SyncScreenCaptureListener.h b/libs/gui/include/gui/SyncScreenCaptureListener.h
index 0784fbc..bcf565a 100644
--- a/libs/gui/include/gui/SyncScreenCaptureListener.h
+++ b/libs/gui/include/gui/SyncScreenCaptureListener.h
@@ -34,7 +34,9 @@
     ScreenCaptureResults waitForResults() {
         std::future<ScreenCaptureResults> resultsFuture = resultsPromise.get_future();
         const auto screenCaptureResults = resultsFuture.get();
-        screenCaptureResults.fence->waitForever("");
+        if (screenCaptureResults.fenceResult.ok()) {
+            screenCaptureResults.fenceResult.value()->waitForever("");
+        }
         return screenCaptureResults;
     }
 
@@ -42,4 +44,4 @@
     std::promise<ScreenCaptureResults> resultsPromise;
 };
 
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/libs/gui/include/gui/TraceUtils.h b/libs/gui/include/gui/TraceUtils.h
index 0009615..441b833 100644
--- a/libs/gui/include/gui/TraceUtils.h
+++ b/libs/gui/include/gui/TraceUtils.h
@@ -21,13 +21,20 @@
 #include <cutils/trace.h>
 #include <utils/Trace.h>
 
-#define ATRACE_FORMAT(fmt, ...)           \
-    TraceUtils::TraceEnder __traceEnder = \
-            (TraceUtils::atraceFormatBegin(fmt, ##__VA_ARGS__), TraceUtils::TraceEnder())
+#define ATRACE_FORMAT(fmt, ...)                                                 \
+    TraceUtils::TraceEnder traceEnder =                                         \
+            (CC_UNLIKELY(ATRACE_ENABLED()) &&                                   \
+                     (TraceUtils::atraceFormatBegin(fmt, ##__VA_ARGS__), true), \
+             TraceUtils::TraceEnder())
 
-#define ATRACE_FORMAT_BEGIN(fmt, ...) TraceUtils::atraceFormatBegin(fmt, ##__VA_ARGS__)
+#define ATRACE_FORMAT_INSTANT(fmt, ...) \
+    (CC_UNLIKELY(ATRACE_ENABLED()) && (TraceUtils::instantFormat(fmt, ##__VA_ARGS__), true))
 
-#define ATRACE_FORMAT_INSTANT(fmt, ...) TraceUtils::intantFormat(fmt, ##__VA_ARGS__)
+#define ALOGE_AND_TRACE(fmt, ...)                  \
+    do {                                           \
+        ALOGE(fmt, ##__VA_ARGS__);                 \
+        ATRACE_FORMAT_INSTANT(fmt, ##__VA_ARGS__); \
+    } while (false)
 
 namespace android {
 
@@ -39,8 +46,6 @@
     };
 
     static void atraceFormatBegin(const char* fmt, ...) {
-        if (CC_LIKELY(!ATRACE_ENABLED())) return;
-
         const int BUFFER_SIZE = 256;
         va_list ap;
         char buf[BUFFER_SIZE];
@@ -52,9 +57,7 @@
         ATRACE_BEGIN(buf);
     }
 
-    static void intantFormat(const char* fmt, ...) {
-        if (CC_LIKELY(!ATRACE_ENABLED())) return;
-
+    static void instantFormat(const char* fmt, ...) {
         const int BUFFER_SIZE = 256;
         va_list ap;
         char buf[BUFFER_SIZE];
@@ -65,7 +68,6 @@
 
         ATRACE_INSTANT(buf);
     }
+};
 
-}; // class TraceUtils
-
-} /* namespace android */
+} // namespace android
diff --git a/libs/gui/include/gui/TransactionTracing.h b/libs/gui/include/gui/TransactionTracing.h
deleted file mode 100644
index 9efba47..0000000
--- a/libs/gui/include/gui/TransactionTracing.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <android/gui/BnTransactionTraceListener.h>
-#include <utils/Mutex.h>
-
-namespace android {
-
-class TransactionTraceListener : public gui::BnTransactionTraceListener {
-    static std::mutex sMutex;
-    static sp<TransactionTraceListener> sInstance;
-
-    TransactionTraceListener();
-
-public:
-    static sp<TransactionTraceListener> getInstance();
-
-    binder::Status onToggled(bool enabled) override;
-
-    bool isTracingEnabled();
-
-private:
-    bool mTracingEnabled = false;
-};
-
-} // namespace android
diff --git a/libs/gui/include/gui/VsyncEventData.h b/libs/gui/include/gui/VsyncEventData.h
index 8e99539..b40a840 100644
--- a/libs/gui/include/gui/VsyncEventData.h
+++ b/libs/gui/include/gui/VsyncEventData.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-#include <gui/FrameTimelineInfo.h>
+#include <android/gui/FrameTimelineInfo.h>
 
 #include <array>
 
@@ -24,8 +24,8 @@
 // Plain Old Data (POD) vsync data structure. For example, it can be easily used in the
 // DisplayEventReceiver::Event union.
 struct VsyncEventData {
-    // Max amount of frame timelines is arbitrarily set to be reasonable.
-    static constexpr int64_t kFrameTimelinesLength = 7;
+    // Max capacity of frame timelines is arbitrarily set to be reasonable.
+    static constexpr int64_t kFrameTimelinesCapacity = 7;
 
     // The current frame interval in ns when this frame was scheduled.
     int64_t frameInterval;
@@ -33,6 +33,9 @@
     // Index into the frameTimelines that represents the platform's preferred frame timeline.
     uint32_t preferredFrameTimelineIndex;
 
+    // Size of frame timelines provided by the platform; max is kFrameTimelinesCapacity.
+    uint32_t frameTimelinesLength;
+
     struct alignas(8) FrameTimeline {
         // The Vsync Id corresponsing to this vsync event. This will be used to
         // populate ISurfaceComposer::setFrameTimelineVsync and
@@ -45,7 +48,7 @@
 
         // The anticipated Vsync presentation time in nanos.
         int64_t expectedPresentationTime;
-    } frameTimelines[kFrameTimelinesLength]; // Sorted possible frame timelines.
+    } frameTimelines[kFrameTimelinesCapacity]; // Sorted possible frame timelines.
 
     // Gets the preferred frame timeline's vsync ID.
     int64_t preferredVsyncId() const;
diff --git a/libs/gui/include/gui/WindowInfo.h b/libs/gui/include/gui/WindowInfo.h
index 169f7f0..70b2ee8 100644
--- a/libs/gui/include/gui/WindowInfo.h
+++ b/libs/gui/include/gui/WindowInfo.h
@@ -171,6 +171,8 @@
                 static_cast<uint32_t>(os::InputConfig::SPY),
         INTERCEPTS_STYLUS =
                 static_cast<uint32_t>(os::InputConfig::INTERCEPTS_STYLUS),
+        CLONE =
+                static_cast<uint32_t>(os::InputConfig::CLONE),
         // clang-format on
     };
 
@@ -234,9 +236,12 @@
     Type layoutParamsType = Type::UNKNOWN;
     ftl::Flags<Flag> layoutParamsFlags;
 
-    void setInputConfig(ftl::Flags<InputConfig> config, bool value);
+    // The input token for the window to which focus should be transferred when this input window
+    // can be successfully focused. If null, this input window will not transfer its focus to
+    // any other window.
+    sp<IBinder> focusTransferTarget;
 
-    bool isClone = false;
+    void setInputConfig(ftl::Flags<InputConfig> config, bool value);
 
     void addTouchableRegion(const Rect& region);
 
@@ -272,6 +277,7 @@
     WindowInfoHandle(const WindowInfo& other);
 
     inline const WindowInfo* getInfo() const { return &mInfo; }
+    inline WindowInfo* editInfo() { return &mInfo; }
 
     sp<IBinder> getToken() const;
 
diff --git a/libs/gui/include/gui/WindowInfosListener.h b/libs/gui/include/gui/WindowInfosListener.h
index a18a498..02c8eb5 100644
--- a/libs/gui/include/gui/WindowInfosListener.h
+++ b/libs/gui/include/gui/WindowInfosListener.h
@@ -16,15 +16,13 @@
 
 #pragma once
 
-#include <gui/DisplayInfo.h>
-#include <gui/WindowInfo.h>
+#include <gui/WindowInfosUpdate.h>
 #include <utils/RefBase.h>
 
 namespace android::gui {
 
 class WindowInfosListener : public virtual RefBase {
 public:
-    virtual void onWindowInfosChanged(const std::vector<WindowInfo>&,
-                                      const std::vector<DisplayInfo>&) = 0;
+    virtual void onWindowInfosChanged(const WindowInfosUpdate& update) = 0;
 };
-} // namespace android::gui
\ No newline at end of file
+} // namespace android::gui
diff --git a/libs/gui/include/gui/WindowInfosListenerReporter.h b/libs/gui/include/gui/WindowInfosListenerReporter.h
index 3b4aed4..38cb108 100644
--- a/libs/gui/include/gui/WindowInfosListenerReporter.h
+++ b/libs/gui/include/gui/WindowInfosListenerReporter.h
@@ -17,33 +17,32 @@
 #pragma once
 
 #include <android/gui/BnWindowInfosListener.h>
+#include <android/gui/ISurfaceComposer.h>
 #include <android/gui/IWindowInfosReportedListener.h>
 #include <binder/IBinder.h>
-#include <gui/ISurfaceComposer.h>
 #include <gui/SpHash.h>
 #include <gui/WindowInfosListener.h>
+#include <gui/WindowInfosUpdate.h>
 #include <unordered_set>
 
 namespace android {
-class ISurfaceComposer;
 
 class WindowInfosListenerReporter : public gui::BnWindowInfosListener {
 public:
     static sp<WindowInfosListenerReporter> getInstance();
-    binder::Status onWindowInfosChanged(const std::vector<gui::WindowInfo>&,
-                                        const std::vector<gui::DisplayInfo>&,
+    binder::Status onWindowInfosChanged(const gui::WindowInfosUpdate& update,
                                         const sp<gui::IWindowInfosReportedListener>&) override;
-
     status_t addWindowInfosListener(
-            const sp<gui::WindowInfosListener>& windowInfosListener, const sp<ISurfaceComposer>&,
+            const sp<gui::WindowInfosListener>& windowInfosListener,
+            const sp<gui::ISurfaceComposer>&,
             std::pair<std::vector<gui::WindowInfo>, std::vector<gui::DisplayInfo>>* outInitialInfo);
     status_t removeWindowInfosListener(const sp<gui::WindowInfosListener>& windowInfosListener,
-                                       const sp<ISurfaceComposer>& surfaceComposer);
-    void reconnect(const sp<ISurfaceComposer>&);
+                                       const sp<gui::ISurfaceComposer>& surfaceComposer);
+    void reconnect(const sp<gui::ISurfaceComposer>&);
 
 private:
     std::mutex mListenersMutex;
-    std::unordered_set<sp<gui::WindowInfosListener>, SpHash<gui::WindowInfosListener>>
+    std::unordered_set<sp<gui::WindowInfosListener>, gui::SpHash<gui::WindowInfosListener>>
             mWindowInfosListeners GUARDED_BY(mListenersMutex);
 
     std::vector<gui::WindowInfo> mLastWindowInfos GUARDED_BY(mListenersMutex);
diff --git a/libs/gui/include/gui/WindowInfosUpdate.h b/libs/gui/include/gui/WindowInfosUpdate.h
new file mode 100644
index 0000000..2ca59fb
--- /dev/null
+++ b/libs/gui/include/gui/WindowInfosUpdate.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <binder/Parcelable.h>
+#include <gui/DisplayInfo.h>
+#include <gui/WindowInfo.h>
+
+namespace android::gui {
+
+struct WindowInfosUpdate : public Parcelable {
+    WindowInfosUpdate() {}
+
+    WindowInfosUpdate(std::vector<WindowInfo> windowInfos, std::vector<DisplayInfo> displayInfos,
+                      int64_t vsyncId, int64_t timestamp)
+          : windowInfos(std::move(windowInfos)),
+            displayInfos(std::move(displayInfos)),
+            vsyncId(vsyncId),
+            timestamp(timestamp) {}
+
+    std::vector<WindowInfo> windowInfos;
+    std::vector<DisplayInfo> displayInfos;
+    int64_t vsyncId;
+    int64_t timestamp;
+
+    status_t writeToParcel(android::Parcel*) const override;
+    status_t readFromParcel(const android::Parcel*) override;
+};
+
+} // namespace android::gui
diff --git a/libs/gui/include/gui/fake/BufferData.h b/libs/gui/include/gui/fake/BufferData.h
new file mode 100644
index 0000000..725d11c
--- /dev/null
+++ b/libs/gui/include/gui/fake/BufferData.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gui/LayerState.h>
+
+namespace android::fake {
+
+// Class which exposes buffer properties from BufferData without holding on to an actual buffer
+class BufferData : public android::BufferData {
+public:
+    BufferData(uint64_t bufferId, uint32_t width, uint32_t height, int32_t pixelFormat,
+               uint64_t outUsage)
+          : mBufferId(bufferId),
+            mWidth(width),
+            mHeight(height),
+            mPixelFormat(pixelFormat),
+            mOutUsage(outUsage) {}
+    bool hasBuffer() const override { return mBufferId != 0; }
+    bool hasSameBuffer(const android::BufferData& other) const override {
+        return getId() == other.getId() && frameNumber == other.frameNumber;
+    }
+    uint32_t getWidth() const override { return mWidth; }
+    uint32_t getHeight() const override { return mHeight; }
+    uint64_t getId() const override { return mBufferId; }
+    PixelFormat getPixelFormat() const override { return mPixelFormat; }
+    uint64_t getUsage() const override { return mOutUsage; }
+
+private:
+    uint64_t mBufferId;
+    uint32_t mWidth;
+    uint32_t mHeight;
+    int32_t mPixelFormat;
+    uint64_t mOutUsage;
+};
+
+} // namespace android::fake
diff --git a/libs/gui/include/gui/test/CallbackUtils.h b/libs/gui/include/gui/test/CallbackUtils.h
index 08785b4..1c900e9 100644
--- a/libs/gui/include/gui/test/CallbackUtils.h
+++ b/libs/gui/include/gui/test/CallbackUtils.h
@@ -51,6 +51,7 @@
     enum Buffer {
         NOT_ACQUIRED = 0,
         ACQUIRED,
+        ACQUIRED_NULL,
     };
 
     enum PreviousBuffer {
@@ -133,17 +134,28 @@
               : mBufferResult(bufferResult), mPreviousBufferResult(previousBufferResult) {}
 
         void verifySurfaceControlStats(const SurfaceControlStats& surfaceControlStats,
-                                       nsecs_t latchTime) const {
+                                       nsecs_t /* latchTime */) const {
             const auto& [surfaceControl, latch, acquireTimeOrFence, presentFence,
                          previousReleaseFence, transformHint, frameEvents, ignore] =
-                surfaceControlStats;
+                    surfaceControlStats;
 
-            ASSERT_TRUE(std::holds_alternative<nsecs_t>(acquireTimeOrFence));
-            ASSERT_EQ(std::get<nsecs_t>(acquireTimeOrFence) > 0,
-                      mBufferResult == ExpectedResult::Buffer::ACQUIRED)
-                    << "bad acquire time";
-            ASSERT_LE(std::get<nsecs_t>(acquireTimeOrFence), latchTime)
-                    << "acquire time should be <= latch time";
+            nsecs_t acquireTime = -1;
+            if (std::holds_alternative<nsecs_t>(acquireTimeOrFence)) {
+                acquireTime = std::get<nsecs_t>(acquireTimeOrFence);
+            } else {
+                auto fence = std::get<sp<Fence>>(acquireTimeOrFence);
+                if (fence) {
+                    ASSERT_EQ(fence->wait(3000), NO_ERROR);
+                    acquireTime = fence->getSignalTime();
+                }
+            }
+
+            if (mBufferResult == ExpectedResult::Buffer::ACQUIRED) {
+                ASSERT_GT(acquireTime, 0) << "acquire time should be valid";
+            } else {
+                ASSERT_LE(acquireTime, 0) << "acquire time should not be valid";
+            }
+            ASSERT_EQ(acquireTime > 0, mBufferResult == ExpectedResult::Buffer::ACQUIRED);
 
             if (mPreviousBufferResult == ExpectedResult::PreviousBuffer::RELEASED) {
                 ASSERT_NE(previousReleaseFence, nullptr)
diff --git a/libs/gui/include/private/gui/ComposerServiceAIDL.h b/libs/gui/include/private/gui/ComposerServiceAIDL.h
index 9a96976..6352a58 100644
--- a/libs/gui/include/private/gui/ComposerServiceAIDL.h
+++ b/libs/gui/include/private/gui/ComposerServiceAIDL.h
@@ -20,6 +20,7 @@
 #include <sys/types.h>
 
 #include <android/gui/ISurfaceComposer.h>
+#include <ui/DisplayId.h>
 
 #include <utils/Singleton.h>
 #include <utils/StrongPointer.h>
@@ -50,28 +51,6 @@
     // Get a connection to the Composer Service.  This will block until
     // a connection is established. Returns null if permission is denied.
     static sp<gui::ISurfaceComposer> getComposerService();
-
-    // the following two methods are moved from ISurfaceComposer.h
-    // TODO(b/74619554): Remove this stopgap once the framework is display-agnostic.
-    std::optional<PhysicalDisplayId> getInternalDisplayId() const {
-        std::vector<int64_t> displayIds;
-        binder::Status status = mComposerService->getPhysicalDisplayIds(&displayIds);
-        return (!status.isOk() || displayIds.empty())
-                ? std::nullopt
-                : DisplayId::fromValue<PhysicalDisplayId>(
-                          static_cast<uint64_t>(displayIds.front()));
-    }
-
-    // TODO(b/74619554): Remove this stopgap once the framework is display-agnostic.
-    sp<IBinder> getInternalDisplayToken() const {
-        const auto displayId = getInternalDisplayId();
-        if (!displayId) return nullptr;
-        sp<IBinder> display;
-        binder::Status status =
-                mComposerService->getPhysicalDisplayToken(static_cast<int64_t>(displayId->value),
-                                                          &display);
-        return status.isOk() ? display : nullptr;
-    }
 };
 
 // ---------------------------------------------------------------------------
diff --git a/libs/gui/tests/Android.bp b/libs/gui/tests/Android.bp
index 0702e0f..cd35d2f 100644
--- a/libs/gui/tests/Android.bp
+++ b/libs/gui/tests/Android.bp
@@ -24,6 +24,7 @@
         "BLASTBufferQueue_test.cpp",
         "BufferItemConsumer_test.cpp",
         "BufferQueue_test.cpp",
+        "CompositorTiming_test.cpp",
         "CpuConsumer_test.cpp",
         "EndToEndNativeInputTest.cpp",
         "DisplayInfo_test.cpp",
diff --git a/libs/gui/tests/BLASTBufferQueue_test.cpp b/libs/gui/tests/BLASTBufferQueue_test.cpp
index b993289..a3ad680 100644
--- a/libs/gui/tests/BLASTBufferQueue_test.cpp
+++ b/libs/gui/tests/BLASTBufferQueue_test.cpp
@@ -19,6 +19,7 @@
 #include <gui/BLASTBufferQueue.h>
 
 #include <android/hardware/graphics/common/1.2/types.h>
+#include <gui/AidlStatusUtil.h>
 #include <gui/BufferQueueCore.h>
 #include <gui/BufferQueueProducer.h>
 #include <gui/FrameTimestamps.h>
@@ -31,6 +32,7 @@
 #include <private/gui/ComposerService.h>
 #include <private/gui/ComposerServiceAIDL.h>
 #include <ui/DisplayMode.h>
+#include <ui/DisplayState.h>
 #include <ui/GraphicBuffer.h>
 #include <ui/GraphicTypes.h>
 #include <ui/Transform.h>
@@ -115,15 +117,17 @@
         mBlastBufferQueueAdapter->syncNextTransaction(callback, acquireSingleBuffer);
     }
 
-    void syncNextTransaction(std::function<void(Transaction*)> callback,
+    bool syncNextTransaction(std::function<void(Transaction*)> callback,
                              bool acquireSingleBuffer = true) {
-        mBlastBufferQueueAdapter->syncNextTransaction(callback, acquireSingleBuffer);
+        return mBlastBufferQueueAdapter->syncNextTransaction(callback, acquireSingleBuffer);
     }
 
     void stopContinuousSyncTransaction() {
         mBlastBufferQueueAdapter->stopContinuousSyncTransaction();
     }
 
+    void clearSyncTransaction() { mBlastBufferQueueAdapter->clearSyncTransaction(); }
+
     int getWidth() { return mBlastBufferQueueAdapter->mSize.width; }
 
     int getHeight() { return mBlastBufferQueueAdapter->mSize.height; }
@@ -175,30 +179,35 @@
     BLASTBufferQueueTest() {
         const ::testing::TestInfo* const testInfo =
                 ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("Begin test: %s.%s", testInfo->test_case_name(), testInfo->name());
+        ALOGD("Begin test: %s.%s", testInfo->test_case_name(), testInfo->name());
     }
 
     ~BLASTBufferQueueTest() {
         const ::testing::TestInfo* const testInfo =
                 ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("End test:   %s.%s", testInfo->test_case_name(), testInfo->name());
+        ALOGD("End test:   %s.%s", testInfo->test_case_name(), testInfo->name());
     }
 
     void SetUp() {
         mComposer = ComposerService::getComposerService();
         mClient = new SurfaceComposerClient();
-        mDisplayToken = mClient->getInternalDisplayToken();
+        const auto ids = SurfaceComposerClient::getPhysicalDisplayIds();
+        ASSERT_FALSE(ids.empty());
+        // display 0 is picked as this test is not much display depedent
+        mDisplayToken = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
         ASSERT_NE(nullptr, mDisplayToken.get());
         Transaction t;
         t.setDisplayLayerStack(mDisplayToken, ui::DEFAULT_LAYER_STACK);
         t.apply();
         t.clear();
 
-        ui::DisplayMode mode;
-        ASSERT_EQ(NO_ERROR, SurfaceComposerClient::getActiveDisplayMode(mDisplayToken, &mode));
-        const ui::Size& resolution = mode.resolution;
+        ui::DisplayState displayState;
+        ASSERT_EQ(NO_ERROR, SurfaceComposerClient::getDisplayState(mDisplayToken, &displayState));
+        const ui::Size& resolution = displayState.layerStackSpaceRect;
         mDisplayWidth = resolution.getWidth();
         mDisplayHeight = resolution.getHeight();
+        ALOGD("Display: %dx%d orientation:%d", mDisplayWidth, mDisplayHeight,
+              displayState.orientation);
 
         mSurfaceControl = mClient->createSurface(String8("TestSurface"), mDisplayWidth,
                                                  mDisplayHeight, PIXEL_FORMAT_RGBA_8888,
@@ -305,11 +314,12 @@
 
         const sp<SyncScreenCaptureListener> captureListener = new SyncScreenCaptureListener();
         binder::Status status = sf->captureDisplay(captureArgs, captureListener);
-        if (status.transactionError() != NO_ERROR) {
-            return status.transactionError();
+        status_t err = gui::aidl_utils::statusTFromBinderStatus(status);
+        if (err != NO_ERROR) {
+            return err;
         }
         captureResults = captureListener->waitForResults();
-        return captureResults.result;
+        return fenceStatus(captureResults.fenceResult);
     }
 
     void queueBuffer(sp<IGraphicBufferProducer> igbp, uint8_t r, uint8_t g, uint8_t b,
@@ -1103,7 +1113,11 @@
     ASSERT_NE(nullptr, adapter.getTransactionReadyCallback());
 
     auto callback2 = [](Transaction*) {};
-    adapter.syncNextTransaction(callback2);
+    ASSERT_FALSE(adapter.syncNextTransaction(callback2));
+
+    sp<IGraphicBufferProducer> igbProducer;
+    setUpProducer(adapter, igbProducer);
+    queueBuffer(igbProducer, 0, 255, 0, 0);
 
     std::unique_lock<std::mutex> lock(mutex);
     if (!receivedCallback) {
@@ -1115,6 +1129,37 @@
     ASSERT_TRUE(receivedCallback);
 }
 
+TEST_F(BLASTBufferQueueTest, ClearSyncTransaction) {
+    std::mutex mutex;
+    std::condition_variable callbackReceivedCv;
+    bool receivedCallback = false;
+
+    BLASTBufferQueueHelper adapter(mSurfaceControl, mDisplayWidth, mDisplayHeight);
+    ASSERT_EQ(nullptr, adapter.getTransactionReadyCallback());
+    auto callback = [&](Transaction*) {
+        std::unique_lock<std::mutex> lock(mutex);
+        receivedCallback = true;
+        callbackReceivedCv.notify_one();
+    };
+    adapter.syncNextTransaction(callback);
+    ASSERT_NE(nullptr, adapter.getTransactionReadyCallback());
+
+    adapter.clearSyncTransaction();
+
+    sp<IGraphicBufferProducer> igbProducer;
+    setUpProducer(adapter, igbProducer);
+    queueBuffer(igbProducer, 0, 255, 0, 0);
+
+    std::unique_lock<std::mutex> lock(mutex);
+    if (!receivedCallback) {
+        ASSERT_EQ(callbackReceivedCv.wait_for(lock, std::chrono::seconds(3)),
+                  std::cv_status::timeout)
+                << "did not receive callback";
+    }
+
+    ASSERT_FALSE(receivedCallback);
+}
+
 TEST_F(BLASTBufferQueueTest, SyncNextTransactionDropBuffer) {
     uint8_t r = 255;
     uint8_t g = 0;
@@ -1146,6 +1191,7 @@
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
     ASSERT_NO_FATAL_FAILURE(
             checkScreenCapture(r, g, b, {0, 0, (int32_t)mDisplayWidth, (int32_t)mDisplayHeight}));
+    sync.apply();
 }
 
 // This test will currently fail because the old surfacecontrol will steal the last presented buffer
diff --git a/libs/gui/tests/BufferItemConsumer_test.cpp b/libs/gui/tests/BufferItemConsumer_test.cpp
index fc6551c..6880678 100644
--- a/libs/gui/tests/BufferItemConsumer_test.cpp
+++ b/libs/gui/tests/BufferItemConsumer_test.cpp
@@ -68,7 +68,7 @@
     void HandleBufferFreed() {
         std::lock_guard<std::mutex> lock(mMutex);
         mFreedBufferCount++;
-        ALOGV("HandleBufferFreed, mFreedBufferCount=%d", mFreedBufferCount);
+        ALOGD("HandleBufferFreed, mFreedBufferCount=%d", mFreedBufferCount);
     }
 
     void DequeueBuffer(int* outSlot) {
@@ -80,7 +80,7 @@
                                                 nullptr, nullptr);
         ASSERT_GE(ret, 0);
 
-        ALOGV("dequeueBuffer: slot=%d", slot);
+        ALOGD("dequeueBuffer: slot=%d", slot);
         if (ret & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) {
             ret = mProducer->requestBuffer(slot, &mBuffers[slot]);
             ASSERT_EQ(NO_ERROR, ret);
@@ -89,7 +89,7 @@
     }
 
     void QueueBuffer(int slot) {
-        ALOGV("enqueueBuffer: slot=%d", slot);
+        ALOGD("enqueueBuffer: slot=%d", slot);
         IGraphicBufferProducer::QueueBufferInput bufferInput(
             0ULL, true, HAL_DATASPACE_UNKNOWN, Rect::INVALID_RECT,
             NATIVE_WINDOW_SCALING_MODE_FREEZE, 0, Fence::NO_FENCE);
@@ -104,12 +104,12 @@
         status_t ret = mBIC->acquireBuffer(&buffer, 0, false);
         ASSERT_EQ(NO_ERROR, ret);
 
-        ALOGV("acquireBuffer: slot=%d", buffer.mSlot);
+        ALOGD("acquireBuffer: slot=%d", buffer.mSlot);
         *outSlot = buffer.mSlot;
     }
 
     void ReleaseBuffer(int slot) {
-        ALOGV("releaseBuffer: slot=%d", slot);
+        ALOGD("releaseBuffer: slot=%d", slot);
         BufferItem buffer;
         buffer.mSlot = slot;
         buffer.mGraphicBuffer = mBuffers[slot];
diff --git a/libs/gui/tests/BufferQueue_test.cpp b/libs/gui/tests/BufferQueue_test.cpp
index d1208ee..2f1fd3e 100644
--- a/libs/gui/tests/BufferQueue_test.cpp
+++ b/libs/gui/tests/BufferQueue_test.cpp
@@ -49,14 +49,14 @@
     BufferQueueTest() {
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("Begin test: %s.%s", testInfo->test_case_name(),
+        ALOGD("Begin test: %s.%s", testInfo->test_case_name(),
                 testInfo->name());
     }
 
     ~BufferQueueTest() {
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("End test:   %s.%s", testInfo->test_case_name(),
+        ALOGD("End test:   %s.%s", testInfo->test_case_name(),
                 testInfo->name());
     }
 
diff --git a/libs/gui/tests/CompositorTiming_test.cpp b/libs/gui/tests/CompositorTiming_test.cpp
new file mode 100644
index 0000000..d8bb21d
--- /dev/null
+++ b/libs/gui/tests/CompositorTiming_test.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <gui/CompositorTiming.h>
+
+namespace android::test {
+namespace {
+
+constexpr nsecs_t kMillisecond = 1'000'000;
+constexpr nsecs_t kVsyncPeriod = 8'333'333;
+constexpr nsecs_t kVsyncPhase = -2'166'667;
+constexpr nsecs_t kIdealLatency = -kVsyncPhase;
+
+} // namespace
+
+TEST(CompositorTimingTest, InvalidVsyncPeriod) {
+    const nsecs_t vsyncDeadline = systemTime();
+    constexpr nsecs_t kInvalidVsyncPeriod = -1;
+
+    const gui::CompositorTiming timing(vsyncDeadline, kInvalidVsyncPeriod, kVsyncPhase,
+                                       kIdealLatency);
+
+    EXPECT_EQ(timing.deadline, 0);
+    EXPECT_EQ(timing.interval, gui::CompositorTiming::kDefaultVsyncPeriod);
+    EXPECT_EQ(timing.presentLatency, gui::CompositorTiming::kDefaultVsyncPeriod);
+}
+
+TEST(CompositorTimingTest, PresentLatencySnapping) {
+    for (nsecs_t presentDelay = 0, compositeTime = systemTime(); presentDelay < 10 * kVsyncPeriod;
+         presentDelay += kMillisecond, compositeTime += kVsyncPeriod) {
+        const nsecs_t presentLatency = kIdealLatency + presentDelay;
+        const nsecs_t vsyncDeadline = compositeTime + presentLatency + kVsyncPeriod;
+
+        const gui::CompositorTiming timing(vsyncDeadline, kVsyncPeriod, kVsyncPhase,
+                                           presentLatency);
+
+        EXPECT_EQ(timing.deadline, compositeTime + presentDelay + kVsyncPeriod);
+        EXPECT_EQ(timing.interval, kVsyncPeriod);
+
+        // The presentDelay should be rounded to a multiple of the VSYNC period, such that the
+        // remainder (presentLatency % interval) always evaluates to the VSYNC phase offset.
+        EXPECT_GE(timing.presentLatency, kIdealLatency);
+        EXPECT_EQ(timing.presentLatency % timing.interval, kIdealLatency);
+    }
+}
+
+} // namespace android::test
diff --git a/libs/gui/tests/CpuConsumer_test.cpp b/libs/gui/tests/CpuConsumer_test.cpp
index 00e32d9..0a14afa 100644
--- a/libs/gui/tests/CpuConsumer_test.cpp
+++ b/libs/gui/tests/CpuConsumer_test.cpp
@@ -62,7 +62,7 @@
         const ::testing::TestInfo* const test_info =
                 ::testing::UnitTest::GetInstance()->current_test_info();
         CpuConsumerTestParams params = GetParam();
-        ALOGV("** Starting test %s (%d x %d, %d, 0x%x)",
+        ALOGD("** Starting test %s (%d x %d, %d, 0x%x)",
                 test_info->name(),
                 params.width, params.height,
                 params.maxLockedBuffers, params.format);
@@ -582,7 +582,7 @@
     uint32_t stride[numInQueue];
 
     for (int i = 0; i < numInQueue; i++) {
-        ALOGV("Producing frame %d", i);
+        ALOGD("Producing frame %d", i);
         ASSERT_NO_FATAL_FAILURE(produceOneFrame(mANW, params, time[i],
                         &stride[i]));
     }
@@ -590,7 +590,7 @@
     // Consume
 
     for (int i = 0; i < numInQueue; i++) {
-        ALOGV("Consuming frame %d", i);
+        ALOGD("Consuming frame %d", i);
         CpuConsumer::LockedBuffer b;
         err = mCC->lockNextBuffer(&b);
         ASSERT_NO_ERROR(err, "getNextBuffer error: ");
@@ -624,7 +624,7 @@
     uint32_t stride;
 
     for (int i = 0; i < params.maxLockedBuffers + 1; i++) {
-        ALOGV("Producing frame %d", i);
+        ALOGD("Producing frame %d", i);
         ASSERT_NO_FATAL_FAILURE(produceOneFrame(mANW, params, time,
                         &stride));
     }
@@ -633,7 +633,7 @@
 
     std::vector<CpuConsumer::LockedBuffer> b(params.maxLockedBuffers);
     for (int i = 0; i < params.maxLockedBuffers; i++) {
-        ALOGV("Locking frame %d", i);
+        ALOGD("Locking frame %d", i);
         err = mCC->lockNextBuffer(&b[i]);
         ASSERT_NO_ERROR(err, "getNextBuffer error: ");
 
@@ -647,16 +647,16 @@
         checkAnyBuffer(b[i], GetParam().format);
     }
 
-    ALOGV("Locking frame %d (too many)", params.maxLockedBuffers);
+    ALOGD("Locking frame %d (too many)", params.maxLockedBuffers);
     CpuConsumer::LockedBuffer bTooMuch;
     err = mCC->lockNextBuffer(&bTooMuch);
     ASSERT_TRUE(err == NOT_ENOUGH_DATA) << "Allowing too many locks";
 
-    ALOGV("Unlocking frame 0");
+    ALOGD("Unlocking frame 0");
     err = mCC->unlockBuffer(b[0]);
     ASSERT_NO_ERROR(err, "Could not unlock buffer 0: ");
 
-    ALOGV("Locking frame %d (should work now)", params.maxLockedBuffers);
+    ALOGD("Locking frame %d (should work now)", params.maxLockedBuffers);
     err = mCC->lockNextBuffer(&bTooMuch);
     ASSERT_NO_ERROR(err, "Did not allow new lock after unlock");
 
@@ -669,11 +669,11 @@
 
     checkAnyBuffer(bTooMuch, GetParam().format);
 
-    ALOGV("Unlocking extra buffer");
+    ALOGD("Unlocking extra buffer");
     err = mCC->unlockBuffer(bTooMuch);
     ASSERT_NO_ERROR(err, "Could not unlock extra buffer: ");
 
-    ALOGV("Locking frame %d (no more available)", params.maxLockedBuffers + 1);
+    ALOGD("Locking frame %d (no more available)", params.maxLockedBuffers + 1);
     err = mCC->lockNextBuffer(&b[0]);
     ASSERT_EQ(BAD_VALUE, err) << "Not out of buffers somehow";
 
diff --git a/libs/gui/tests/DisplayEventStructLayout_test.cpp b/libs/gui/tests/DisplayEventStructLayout_test.cpp
index da88463..3949d70 100644
--- a/libs/gui/tests/DisplayEventStructLayout_test.cpp
+++ b/libs/gui/tests/DisplayEventStructLayout_test.cpp
@@ -35,6 +35,7 @@
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync, count, 0);
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync, vsyncData.frameInterval, 8);
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync, vsyncData.preferredFrameTimelineIndex, 16);
+    CHECK_OFFSET(DisplayEventReceiver::Event::VSync, vsyncData.frameTimelinesLength, 20);
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync, vsyncData.frameTimelines, 24);
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync, vsyncData.frameTimelines[0].vsyncId, 24);
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync, vsyncData.frameTimelines[0].deadlineTimestamp,
@@ -44,16 +45,16 @@
     // Also test the offsets of the last frame timeline. A loop is not used because the non-const
     // index cannot be used in static_assert.
     const int lastFrameTimelineOffset = /* Start of array */ 24 +
-            (VsyncEventData::kFrameTimelinesLength - 1) * /* Size of FrameTimeline */ 24;
+            (VsyncEventData::kFrameTimelinesCapacity - 1) * /* Size of FrameTimeline */ 24;
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync,
-                 vsyncData.frameTimelines[VsyncEventData::kFrameTimelinesLength - 1].vsyncId,
+                 vsyncData.frameTimelines[VsyncEventData::kFrameTimelinesCapacity - 1].vsyncId,
                  lastFrameTimelineOffset);
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync,
-                 vsyncData.frameTimelines[VsyncEventData::kFrameTimelinesLength - 1]
+                 vsyncData.frameTimelines[VsyncEventData::kFrameTimelinesCapacity - 1]
                          .deadlineTimestamp,
                  lastFrameTimelineOffset + 8);
     CHECK_OFFSET(DisplayEventReceiver::Event::VSync,
-                 vsyncData.frameTimelines[VsyncEventData::kFrameTimelinesLength - 1]
+                 vsyncData.frameTimelines[VsyncEventData::kFrameTimelinesCapacity - 1]
                          .expectedPresentationTime,
                  lastFrameTimelineOffset + 16);
 
diff --git a/libs/gui/tests/DisplayedContentSampling_test.cpp b/libs/gui/tests/DisplayedContentSampling_test.cpp
index b647aab..0a2750a 100644
--- a/libs/gui/tests/DisplayedContentSampling_test.cpp
+++ b/libs/gui/tests/DisplayedContentSampling_test.cpp
@@ -32,7 +32,10 @@
     void SetUp() {
         mComposerClient = new SurfaceComposerClient;
         ASSERT_EQ(OK, mComposerClient->initCheck());
-        mDisplayToken = mComposerClient->getInternalDisplayToken();
+        const auto ids = SurfaceComposerClient::getPhysicalDisplayIds();
+        ASSERT_FALSE(ids.empty());
+        // display 0 is picked for now, can extend to support all displays if needed
+        mDisplayToken = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
         ASSERT_TRUE(mDisplayToken);
     }
 
diff --git a/libs/gui/tests/EndToEndNativeInputTest.cpp b/libs/gui/tests/EndToEndNativeInputTest.cpp
index 2637f59..4ec7a06 100644
--- a/libs/gui/tests/EndToEndNativeInputTest.cpp
+++ b/libs/gui/tests/EndToEndNativeInputTest.cpp
@@ -164,7 +164,7 @@
     void assertFocusChange(bool hasFocus) {
         InputEvent *ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_FOCUS, ev->getType());
+        ASSERT_EQ(InputEventType::FOCUS, ev->getType());
         FocusEvent *focusEvent = static_cast<FocusEvent *>(ev);
         EXPECT_EQ(hasFocus, focusEvent->getHasFocus());
     }
@@ -172,7 +172,7 @@
     void expectTap(int x, int y) {
         InputEvent* ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, ev->getType());
+        ASSERT_EQ(InputEventType::MOTION, ev->getType());
         MotionEvent* mev = static_cast<MotionEvent*>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_DOWN, mev->getAction());
         EXPECT_EQ(x, mev->getX(0));
@@ -181,7 +181,7 @@
 
         ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, ev->getType());
+        ASSERT_EQ(InputEventType::MOTION, ev->getType());
         mev = static_cast<MotionEvent*>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_UP, mev->getAction());
         EXPECT_EQ(0, mev->getFlags() & VERIFIED_MOTION_EVENT_FLAGS);
@@ -190,7 +190,7 @@
     void expectTapWithFlag(int x, int y, int32_t flags) {
         InputEvent *ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, ev->getType());
+        ASSERT_EQ(InputEventType::MOTION, ev->getType());
         MotionEvent *mev = static_cast<MotionEvent *>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_DOWN, mev->getAction());
         EXPECT_EQ(x, mev->getX(0));
@@ -199,7 +199,7 @@
 
         ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, ev->getType());
+        ASSERT_EQ(InputEventType::MOTION, ev->getType());
         mev = static_cast<MotionEvent *>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_UP, mev->getAction());
         EXPECT_EQ(flags, mev->getFlags() & flags);
@@ -208,7 +208,7 @@
     void expectTapInDisplayCoordinates(int displayX, int displayY) {
         InputEvent *ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, ev->getType());
+        ASSERT_EQ(InputEventType::MOTION, ev->getType());
         MotionEvent *mev = static_cast<MotionEvent *>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_DOWN, mev->getAction());
         const PointerCoords &coords = *mev->getRawPointerCoords(0 /*pointerIndex*/);
@@ -218,7 +218,7 @@
 
         ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, ev->getType());
+        ASSERT_EQ(InputEventType::MOTION, ev->getType());
         mev = static_cast<MotionEvent *>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_UP, mev->getAction());
         EXPECT_EQ(0, mev->getFlags() & VERIFIED_MOTION_EVENT_FLAGS);
@@ -227,7 +227,7 @@
     void expectKey(uint32_t keycode) {
         InputEvent *ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_KEY, ev->getType());
+        ASSERT_EQ(InputEventType::KEY, ev->getType());
         KeyEvent *keyEvent = static_cast<KeyEvent *>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_DOWN, keyEvent->getAction());
         EXPECT_EQ(keycode, keyEvent->getKeyCode());
@@ -235,7 +235,7 @@
 
         ev = consumeEvent();
         ASSERT_NE(ev, nullptr);
-        ASSERT_EQ(AINPUT_EVENT_TYPE_KEY, ev->getType());
+        ASSERT_EQ(InputEventType::KEY, ev->getType());
         keyEvent = static_cast<KeyEvent *>(ev);
         EXPECT_EQ(AMOTION_EVENT_ACTION_UP, keyEvent->getAction());
         EXPECT_EQ(keycode, keyEvent->getKeyCode());
@@ -272,8 +272,6 @@
         FocusRequest request;
         request.token = mInputInfo.token;
         request.windowName = mInputInfo.name;
-        request.focusedToken = nullptr;
-        request.focusedWindowName = "";
         request.timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
         request.displayId = displayId;
         t.setFocusedWindow(request);
@@ -360,8 +358,10 @@
     void SetUp() {
         mComposerClient = new SurfaceComposerClient;
         ASSERT_EQ(NO_ERROR, mComposerClient->initCheck());
-
-        const auto display = mComposerClient->getInternalDisplayToken();
+        const auto ids = SurfaceComposerClient::getPhysicalDisplayIds();
+        ASSERT_FALSE(ids.empty());
+        // display 0 is picked for now, can extend to support all displays if needed
+        const auto display = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
         ASSERT_NE(display, nullptr);
 
         ui::DisplayMode mode;
@@ -510,6 +510,22 @@
     bgSurface->expectTap(1, 1);
 }
 
+TEST_F(InputSurfacesTest, input_respects_surface_insets_with_replaceTouchableRegionWithCrop) {
+    std::unique_ptr<InputSurface> bgSurface = makeSurface(100, 100);
+    std::unique_ptr<InputSurface> fgSurface = makeSurface(100, 100);
+    bgSurface->showAt(100, 100);
+
+    fgSurface->mInputInfo.surfaceInset = 5;
+    fgSurface->mInputInfo.replaceTouchableRegionWithCrop = true;
+    fgSurface->showAt(100, 100);
+
+    injectTap(106, 106);
+    fgSurface->expectTap(1, 1);
+
+    injectTap(101, 101);
+    bgSurface->expectTap(1, 1);
+}
+
 // Ensure a surface whose insets are cropped, handles the touch offset correctly. ref:b/120413463
 TEST_F(InputSurfacesTest, input_respects_cropped_surface_insets) {
     std::unique_ptr<InputSurface> parentSurface = makeSurface(100, 100);
@@ -612,7 +628,7 @@
 
     // Expect no crash for overflow.
     injectTap(12, 24);
-    fgSurface->expectTap(6, 12);
+    bgSurface->expectTap(12, 24);
 }
 
 // Ensure we ignore transparent region when getting screen bounds when positioning input frame.
@@ -1219,32 +1235,6 @@
     surface->expectKey(AKEYCODE_V);
 }
 
-/**
- * When multiple DisplayDevices are mapped to the same layerStack, use the configuration for the
- * display that can receive input.
- */
-TEST_F(MultiDisplayTests, many_to_one_display_mapping) {
-    ui::LayerStack layerStack = ui::LayerStack::fromValue(42);
-    createDisplay(1000, 1000, false /*isSecure*/, layerStack, false /*receivesInput*/,
-                  100 /*offsetX*/, 100 /*offsetY*/);
-    createDisplay(1000, 1000, false /*isSecure*/, layerStack, true /*receivesInput*/,
-                  200 /*offsetX*/, 200 /*offsetY*/);
-    createDisplay(1000, 1000, false /*isSecure*/, layerStack, false /*receivesInput*/,
-                  300 /*offsetX*/, 300 /*offsetY*/);
-    std::unique_ptr<InputSurface> surface = makeSurface(100, 100);
-    surface->doTransaction([&](auto &t, auto &sc) { t.setLayerStack(sc, layerStack); });
-    surface->showAt(10, 10);
-
-    // Input injection happens in logical display coordinates.
-    injectTapOnDisplay(11, 11, layerStack.id);
-    // Expect that the display transform for the display that receives input was used.
-    surface->expectTapInDisplayCoordinates(211, 211);
-
-    surface->requestFocus(layerStack.id);
-    surface->assertFocusChange(true);
-    injectKeyOnDisplay(AKEYCODE_V, layerStack.id);
-}
-
 TEST_F(MultiDisplayTests, drop_input_for_secure_layer_on_nonsecure_display) {
     ui::LayerStack layerStack = ui::LayerStack::fromValue(42);
     createDisplay(1000, 1000, false /*isSecure*/, layerStack);
diff --git a/libs/gui/tests/GLTest.cpp b/libs/gui/tests/GLTest.cpp
index 73e8fbe..9024b70 100644
--- a/libs/gui/tests/GLTest.cpp
+++ b/libs/gui/tests/GLTest.cpp
@@ -31,7 +31,7 @@
 void GLTest::SetUp() {
     const ::testing::TestInfo* const testInfo =
         ::testing::UnitTest::GetInstance()->current_test_info();
-    ALOGV("Begin test: %s.%s", testInfo->test_case_name(), testInfo->name());
+    ALOGD("Begin test: %s.%s", testInfo->test_case_name(), testInfo->name());
 
     mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
     ASSERT_EQ(EGL_SUCCESS, eglGetError());
@@ -135,7 +135,7 @@
 
     const ::testing::TestInfo* const testInfo =
         ::testing::UnitTest::GetInstance()->current_test_info();
-    ALOGV("End test:   %s.%s", testInfo->test_case_name(), testInfo->name());
+    ALOGD("End test:   %s.%s", testInfo->test_case_name(), testInfo->name());
 }
 
 EGLint const* GLTest::getConfigAttribs() {
diff --git a/libs/gui/tests/IGraphicBufferProducer_test.cpp b/libs/gui/tests/IGraphicBufferProducer_test.cpp
index 3427731..e6cb89c 100644
--- a/libs/gui/tests/IGraphicBufferProducer_test.cpp
+++ b/libs/gui/tests/IGraphicBufferProducer_test.cpp
@@ -84,7 +84,7 @@
     virtual void SetUp() {
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("Begin test: %s.%s", testInfo->test_case_name(),
+        ALOGD("Begin test: %s.%s", testInfo->test_case_name(),
                 testInfo->name());
 
         mMC = new MockConsumer;
@@ -114,7 +114,7 @@
     virtual void TearDown() {
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("End test:   %s.%s", testInfo->test_case_name(),
+        ALOGD("End test:   %s.%s", testInfo->test_case_name(),
                 testInfo->name());
     }
 
diff --git a/libs/gui/tests/RegionSampling_test.cpp b/libs/gui/tests/RegionSampling_test.cpp
index c9106be..b18b544 100644
--- a/libs/gui/tests/RegionSampling_test.cpp
+++ b/libs/gui/tests/RegionSampling_test.cpp
@@ -19,14 +19,16 @@
 
 #include <android/gui/BnRegionSamplingListener.h>
 #include <binder/ProcessState.h>
+#include <gui/AidlStatusUtil.h>
 #include <gui/DisplayEventReceiver.h>
 #include <gui/ISurfaceComposer.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
-#include <private/gui/ComposerService.h>
+#include <private/gui/ComposerServiceAIDL.h>
 #include <utils/Looper.h>
 
 using namespace std::chrono_literals;
+using android::gui::aidl_utils::statusTFromBinderStatus;
 
 namespace android::test {
 
@@ -242,24 +244,33 @@
 };
 
 TEST_F(RegionSamplingTest, invalidLayerHandle_doesNotCrash) {
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
     sp<Listener> listener = new Listener();
-    const Rect sampleArea{100, 100, 200, 200};
+    gui::ARect sampleArea;
+    sampleArea.left = 100;
+    sampleArea.top = 100;
+    sampleArea.right = 200;
+    sampleArea.bottom = 200;
     // Passing in composer service as the layer handle should not crash, we'll
     // treat it as a layer that no longer exists and silently allow sampling to
     // occur.
-    status_t status = composer->addRegionSamplingListener(sampleArea,
-                                                          IInterface::asBinder(composer), listener);
-    ASSERT_EQ(NO_ERROR, status);
+    binder::Status status =
+            composer->addRegionSamplingListener(sampleArea, IInterface::asBinder(composer),
+                                                listener);
+    ASSERT_EQ(NO_ERROR, statusTFromBinderStatus(status));
     composer->removeRegionSamplingListener(listener);
 }
 
 TEST_F(RegionSamplingTest, DISABLED_CollectsLuma) {
     fill_render(rgba_green);
 
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
     sp<Listener> listener = new Listener();
-    const Rect sampleArea{100, 100, 200, 200};
+    gui::ARect sampleArea;
+    sampleArea.left = 100;
+    sampleArea.top = 100;
+    sampleArea.right = 200;
+    sampleArea.bottom = 200;
     composer->addRegionSamplingListener(sampleArea, mTopLayer->getHandle(), listener);
 
     EXPECT_TRUE(listener->wait_event(300ms)) << "timed out waiting for luma event to be received";
@@ -271,9 +282,13 @@
 TEST_F(RegionSamplingTest, DISABLED_CollectsChangingLuma) {
     fill_render(rgba_green);
 
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
     sp<Listener> listener = new Listener();
-    const Rect sampleArea{100, 100, 200, 200};
+    gui::ARect sampleArea;
+    sampleArea.left = 100;
+    sampleArea.top = 100;
+    sampleArea.right = 200;
+    sampleArea.bottom = 200;
     composer->addRegionSamplingListener(sampleArea, mTopLayer->getHandle(), listener);
 
     EXPECT_TRUE(listener->wait_event(300ms)) << "timed out waiting for luma event to be received";
@@ -291,13 +306,21 @@
 
 TEST_F(RegionSamplingTest, DISABLED_CollectsLumaFromTwoRegions) {
     fill_render(rgba_green);
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
     sp<Listener> greenListener = new Listener();
-    const Rect greenSampleArea{100, 100, 200, 200};
+    gui::ARect greenSampleArea;
+    greenSampleArea.left = 100;
+    greenSampleArea.top = 100;
+    greenSampleArea.right = 200;
+    greenSampleArea.bottom = 200;
     composer->addRegionSamplingListener(greenSampleArea, mTopLayer->getHandle(), greenListener);
 
     sp<Listener> grayListener = new Listener();
-    const Rect graySampleArea{500, 100, 600, 200};
+    gui::ARect graySampleArea;
+    graySampleArea.left = 500;
+    graySampleArea.top = 100;
+    graySampleArea.right = 600;
+    graySampleArea.bottom = 200;
     composer->addRegionSamplingListener(graySampleArea, mTopLayer->getHandle(), grayListener);
 
     EXPECT_TRUE(grayListener->wait_event(300ms))
@@ -312,29 +335,49 @@
 }
 
 TEST_F(RegionSamplingTest, DISABLED_TestIfInvalidInputParameters) {
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
     sp<Listener> listener = new Listener();
-    const Rect sampleArea{100, 100, 200, 200};
+
+    gui::ARect invalidRect;
+    invalidRect.left = Rect::INVALID_RECT.left;
+    invalidRect.top = Rect::INVALID_RECT.top;
+    invalidRect.right = Rect::INVALID_RECT.right;
+    invalidRect.bottom = Rect::INVALID_RECT.bottom;
+
+    gui::ARect sampleArea;
+    sampleArea.left = 100;
+    sampleArea.top = 100;
+    sampleArea.right = 200;
+    sampleArea.bottom = 200;
     // Invalid input sampleArea
     EXPECT_EQ(BAD_VALUE,
-              composer->addRegionSamplingListener(Rect::INVALID_RECT, mTopLayer->getHandle(),
-                                                  listener));
+              statusTFromBinderStatus(composer->addRegionSamplingListener(invalidRect,
+                                                                          mTopLayer->getHandle(),
+                                                                          listener)));
     listener->reset();
     // Invalid input binder
-    EXPECT_EQ(NO_ERROR, composer->addRegionSamplingListener(sampleArea, NULL, listener));
+    EXPECT_EQ(NO_ERROR,
+              statusTFromBinderStatus(
+                      composer->addRegionSamplingListener(sampleArea, NULL, listener)));
     // Invalid input listener
     EXPECT_EQ(BAD_VALUE,
-              composer->addRegionSamplingListener(sampleArea, mTopLayer->getHandle(), NULL));
-    EXPECT_EQ(BAD_VALUE, composer->removeRegionSamplingListener(NULL));
+              statusTFromBinderStatus(composer->addRegionSamplingListener(sampleArea,
+                                                                          mTopLayer->getHandle(),
+                                                                          NULL)));
+    EXPECT_EQ(BAD_VALUE, statusTFromBinderStatus(composer->removeRegionSamplingListener(NULL)));
     // remove the listener
     composer->removeRegionSamplingListener(listener);
 }
 
 TEST_F(RegionSamplingTest, DISABLED_TestCallbackAfterRemoveListener) {
     fill_render(rgba_green);
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
     sp<Listener> listener = new Listener();
-    const Rect sampleArea{100, 100, 200, 200};
+    gui::ARect sampleArea;
+    sampleArea.left = 100;
+    sampleArea.top = 100;
+    sampleArea.right = 200;
+    sampleArea.bottom = 200;
     composer->addRegionSamplingListener(sampleArea, mTopLayer->getHandle(), listener);
     fill_render(rgba_green);
 
@@ -349,13 +392,18 @@
 }
 
 TEST_F(RegionSamplingTest, DISABLED_CollectsLumaFromMovingLayer) {
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
     sp<Listener> listener = new Listener();
     Rect sampleArea{100, 100, 200, 200};
+    gui::ARect sampleAreaA;
+    sampleAreaA.left = sampleArea.left;
+    sampleAreaA.top = sampleArea.top;
+    sampleAreaA.right = sampleArea.right;
+    sampleAreaA.bottom = sampleArea.bottom;
 
     // Test: listener in (100, 100). See layer before move, no layer after move.
     fill_render(rgba_blue);
-    composer->addRegionSamplingListener(sampleArea, mTopLayer->getHandle(), listener);
+    composer->addRegionSamplingListener(sampleAreaA, mTopLayer->getHandle(), listener);
     EXPECT_TRUE(listener->wait_event(300ms)) << "timed out waiting for luma event to be received";
     EXPECT_NEAR(listener->luma(), luma_blue, error_margin);
     listener->reset();
@@ -367,7 +415,11 @@
     // Test: listener offset to (600, 600). No layer before move, see layer after move.
     fill_render(rgba_green);
     sampleArea.offsetTo(600, 600);
-    composer->addRegionSamplingListener(sampleArea, mTopLayer->getHandle(), listener);
+    sampleAreaA.left = sampleArea.left;
+    sampleAreaA.top = sampleArea.top;
+    sampleAreaA.right = sampleArea.right;
+    sampleAreaA.bottom = sampleArea.bottom;
+    composer->addRegionSamplingListener(sampleAreaA, mTopLayer->getHandle(), listener);
     EXPECT_TRUE(listener->wait_event(300ms)) << "timed out waiting for luma event to be received";
     EXPECT_NEAR(listener->luma(), luma_gray, error_margin);
     listener->reset();
diff --git a/libs/gui/tests/SamplingDemo.cpp b/libs/gui/tests/SamplingDemo.cpp
index a083a22..f98437b 100644
--- a/libs/gui/tests/SamplingDemo.cpp
+++ b/libs/gui/tests/SamplingDemo.cpp
@@ -26,7 +26,7 @@
 #include <gui/ISurfaceComposer.h>
 #include <gui/SurfaceComposerClient.h>
 #include <gui/SurfaceControl.h>
-#include <private/gui/ComposerService.h>
+#include <private/gui/ComposerServiceAIDL.h>
 #include <utils/Trace.h>
 
 using namespace std::chrono_literals;
@@ -121,10 +121,22 @@
     const Rect backButtonArea{200, 1606, 248, 1654};
     sp<android::Button> backButton = new android::Button("BackButton", backButtonArea);
 
-    sp<ISurfaceComposer> composer = ComposerService::getComposerService();
-    composer->addRegionSamplingListener(homeButtonArea, homeButton->getStopLayerHandle(),
+    gui::ARect homeButtonAreaA;
+    homeButtonAreaA.left = 490;
+    homeButtonAreaA.top = 1606;
+    homeButtonAreaA.right = 590;
+    homeButtonAreaA.bottom = 1654;
+
+    gui::ARect backButtonAreaA;
+    backButtonAreaA.left = 200;
+    backButtonAreaA.top = 1606;
+    backButtonAreaA.right = 248;
+    backButtonAreaA.bottom = 1654;
+
+    sp<gui::ISurfaceComposer> composer = ComposerServiceAIDL::getComposerService();
+    composer->addRegionSamplingListener(homeButtonAreaA, homeButton->getStopLayerHandle(),
                                         homeButton);
-    composer->addRegionSamplingListener(backButtonArea, backButton->getStopLayerHandle(),
+    composer->addRegionSamplingListener(backButtonAreaA, backButton->getStopLayerHandle(),
                                         backButton);
 
     ProcessState::self()->startThreadPool();
diff --git a/libs/gui/tests/StreamSplitter_test.cpp b/libs/gui/tests/StreamSplitter_test.cpp
index b65cdda..2f14924 100644
--- a/libs/gui/tests/StreamSplitter_test.cpp
+++ b/libs/gui/tests/StreamSplitter_test.cpp
@@ -36,14 +36,14 @@
     StreamSplitterTest() {
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("Begin test: %s.%s", testInfo->test_case_name(),
+        ALOGD("Begin test: %s.%s", testInfo->test_case_name(),
                 testInfo->name());
     }
 
     ~StreamSplitterTest() {
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("End test:   %s.%s", testInfo->test_case_name(),
+        ALOGD("End test:   %s.%s", testInfo->test_case_name(),
                 testInfo->name());
     }
 };
diff --git a/libs/gui/tests/SurfaceTextureClient_test.cpp b/libs/gui/tests/SurfaceTextureClient_test.cpp
index c7458a3..82b6697 100644
--- a/libs/gui/tests/SurfaceTextureClient_test.cpp
+++ b/libs/gui/tests/SurfaceTextureClient_test.cpp
@@ -42,7 +42,7 @@
     virtual void SetUp() {
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("Begin test: %s.%s", testInfo->test_case_name(),
+        ALOGD("Begin test: %s.%s", testInfo->test_case_name(),
                 testInfo->name());
 
         sp<IGraphicBufferProducer> producer;
@@ -99,7 +99,7 @@
 
         const ::testing::TestInfo* const testInfo =
             ::testing::UnitTest::GetInstance()->current_test_info();
-        ALOGV("End test:   %s.%s", testInfo->test_case_name(),
+        ALOGD("End test:   %s.%s", testInfo->test_case_name(),
                 testInfo->name());
     }
 
diff --git a/libs/gui/tests/Surface_test.cpp b/libs/gui/tests/Surface_test.cpp
index cb977f0..4f4f1f5 100644
--- a/libs/gui/tests/Surface_test.cpp
+++ b/libs/gui/tests/Surface_test.cpp
@@ -24,6 +24,7 @@
 #include <android/hardware/configstore/1.0/ISurfaceFlingerConfigs.h>
 #include <binder/ProcessState.h>
 #include <configstore/Utils.h>
+#include <gui/AidlStatusUtil.h>
 #include <gui/BufferItemConsumer.h>
 #include <gui/IProducerListener.h>
 #include <gui/ISurfaceComposer.h>
@@ -212,11 +213,12 @@
 
         const sp<SyncScreenCaptureListener> captureListener = new SyncScreenCaptureListener();
         binder::Status status = sf->captureDisplay(captureArgs, captureListener);
-        if (status.transactionError() != NO_ERROR) {
-            return status.transactionError();
+        status_t err = gui::aidl_utils::statusTFromBinderStatus(status);
+        if (err != NO_ERROR) {
+            return err;
         }
         captureResults = captureListener->waitForResults();
-        return captureResults.result;
+        return fenceStatus(captureResults.fenceResult);
     }
 
     sp<Surface> mSurface;
@@ -261,7 +263,10 @@
     sp<ANativeWindow> anw(mSurface);
 
     // Verify the screenshot works with no protected buffers.
-    const sp<IBinder> display = ComposerServiceAIDL::getInstance().getInternalDisplayToken();
+    const auto ids = SurfaceComposerClient::getPhysicalDisplayIds();
+    ASSERT_FALSE(ids.empty());
+    // display 0 is picked for now, can extend to support all displays if needed
+    const sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
     ASSERT_FALSE(display == nullptr);
 
     DisplayCaptureArgs captureArgs;
@@ -690,197 +695,17 @@
         mSupportsPresent = supportsPresent;
     }
 
-    sp<ISurfaceComposerClient> createConnection() override { return nullptr; }
-    sp<IDisplayEventConnection> createDisplayEventConnection(
-            ISurfaceComposer::VsyncSource, ISurfaceComposer::EventRegistrationFlags) override {
-        return nullptr;
-    }
-    status_t setTransactionState(const FrameTimelineInfo& /*frameTimelineInfo*/,
-                                 const Vector<ComposerState>& /*state*/,
-                                 const Vector<DisplayState>& /*displays*/, uint32_t /*flags*/,
-                                 const sp<IBinder>& /*applyToken*/,
-                                 const InputWindowCommands& /*inputWindowCommands*/,
-                                 int64_t /*desiredPresentTime*/, bool /*isAutoTimestamp*/,
-                                 const client_cache_t& /*cachedBuffer*/,
-                                 bool /*hasListenerCallbacks*/,
-                                 const std::vector<ListenerCallbacks>& /*listenerCallbacks*/,
-                                 uint64_t /*transactionId*/) override {
+    status_t setTransactionState(
+            const FrameTimelineInfo& /*frameTimelineInfo*/, Vector<ComposerState>& /*state*/,
+            const Vector<DisplayState>& /*displays*/, uint32_t /*flags*/,
+            const sp<IBinder>& /*applyToken*/, InputWindowCommands /*inputWindowCommands*/,
+            int64_t /*desiredPresentTime*/, bool /*isAutoTimestamp*/,
+            const std::vector<client_cache_t>& /*cachedBuffer*/, bool /*hasListenerCallbacks*/,
+            const std::vector<ListenerCallbacks>& /*listenerCallbacks*/, uint64_t /*transactionId*/,
+            const std::vector<uint64_t>& /*mergedTransactionIds*/) override {
         return NO_ERROR;
     }
 
-    void bootFinished() override {}
-    bool authenticateSurfaceTexture(
-            const sp<IGraphicBufferProducer>& /*surface*/) const override {
-        return false;
-    }
-
-    status_t getSupportedFrameTimestamps(std::vector<FrameEvent>* outSupported)
-            const override {
-        *outSupported = {
-                FrameEvent::REQUESTED_PRESENT,
-                FrameEvent::ACQUIRE,
-                FrameEvent::LATCH,
-                FrameEvent::FIRST_REFRESH_START,
-                FrameEvent::LAST_REFRESH_START,
-                FrameEvent::GPU_COMPOSITION_DONE,
-                FrameEvent::DEQUEUE_READY,
-                FrameEvent::RELEASE
-        };
-        if (mSupportsPresent) {
-            outSupported->push_back(
-                        FrameEvent::DISPLAY_PRESENT);
-        }
-        return NO_ERROR;
-    }
-
-    status_t getStaticDisplayInfo(const sp<IBinder>& /*display*/, ui::StaticDisplayInfo*) override {
-        return NO_ERROR;
-    }
-    status_t getDynamicDisplayInfo(const sp<IBinder>& /*display*/,
-                                   ui::DynamicDisplayInfo*) override {
-        return NO_ERROR;
-    }
-    status_t getDisplayNativePrimaries(const sp<IBinder>& /*display*/,
-            ui::DisplayPrimaries& /*primaries*/) override {
-        return NO_ERROR;
-    }
-    status_t setActiveColorMode(const sp<IBinder>& /*display*/, ColorMode /*colorMode*/) override {
-        return NO_ERROR;
-    }
-    status_t setBootDisplayMode(const sp<IBinder>& /*display*/, ui::DisplayModeId /*id*/) override {
-        return NO_ERROR;
-    }
-
-    status_t clearAnimationFrameStats() override { return NO_ERROR; }
-    status_t getAnimationFrameStats(FrameStats* /*outStats*/) const override {
-        return NO_ERROR;
-    }
-    status_t overrideHdrTypes(const sp<IBinder>& /*display*/,
-                              const std::vector<ui::Hdr>& /*hdrTypes*/) override {
-        return NO_ERROR;
-    }
-    status_t onPullAtom(const int32_t /*atomId*/, std::string* /*outData*/,
-                        bool* /*success*/) override {
-        return NO_ERROR;
-    }
-    status_t enableVSyncInjections(bool /*enable*/) override {
-        return NO_ERROR;
-    }
-    status_t injectVSync(nsecs_t /*when*/) override { return NO_ERROR; }
-    status_t getLayerDebugInfo(std::vector<LayerDebugInfo>* /*layers*/) override {
-        return NO_ERROR;
-    }
-    status_t getCompositionPreference(
-            ui::Dataspace* /*outDefaultDataspace*/, ui::PixelFormat* /*outDefaultPixelFormat*/,
-            ui::Dataspace* /*outWideColorGamutDataspace*/,
-            ui::PixelFormat* /*outWideColorGamutPixelFormat*/) const override {
-        return NO_ERROR;
-    }
-    status_t getDisplayedContentSamplingAttributes(const sp<IBinder>& /*display*/,
-                                                   ui::PixelFormat* /*outFormat*/,
-                                                   ui::Dataspace* /*outDataspace*/,
-                                                   uint8_t* /*outComponentMask*/) const override {
-        return NO_ERROR;
-    }
-    status_t setDisplayContentSamplingEnabled(const sp<IBinder>& /*display*/, bool /*enable*/,
-                                              uint8_t /*componentMask*/,
-                                              uint64_t /*maxFrames*/) override {
-        return NO_ERROR;
-    }
-    status_t getDisplayedContentSample(const sp<IBinder>& /*display*/, uint64_t /*maxFrames*/,
-                                       uint64_t /*timestamp*/,
-                                       DisplayedFrameStats* /*outStats*/) const override {
-        return NO_ERROR;
-    }
-
-    status_t getColorManagement(bool* /*outGetColorManagement*/) const override { return NO_ERROR; }
-    status_t getProtectedContentSupport(bool* /*outSupported*/) const override { return NO_ERROR; }
-
-    status_t addRegionSamplingListener(const Rect& /*samplingArea*/,
-                                       const sp<IBinder>& /*stopLayerHandle*/,
-                                       const sp<IRegionSamplingListener>& /*listener*/) override {
-        return NO_ERROR;
-    }
-    status_t removeRegionSamplingListener(
-            const sp<IRegionSamplingListener>& /*listener*/) override {
-        return NO_ERROR;
-    }
-    status_t addFpsListener(int32_t /*taskId*/, const sp<gui::IFpsListener>& /*listener*/) {
-        return NO_ERROR;
-    }
-    status_t removeFpsListener(const sp<gui::IFpsListener>& /*listener*/) { return NO_ERROR; }
-
-    status_t addTunnelModeEnabledListener(const sp<gui::ITunnelModeEnabledListener>& /*listener*/) {
-        return NO_ERROR;
-    }
-
-    status_t removeTunnelModeEnabledListener(
-            const sp<gui::ITunnelModeEnabledListener>& /*listener*/) {
-        return NO_ERROR;
-    }
-
-    status_t setDesiredDisplayModeSpecs(const sp<IBinder>& /*displayToken*/,
-                                        ui::DisplayModeId /*defaultMode*/,
-                                        bool /*allowGroupSwitching*/,
-                                        float /*primaryRefreshRateMin*/,
-                                        float /*primaryRefreshRateMax*/,
-                                        float /*appRequestRefreshRateMin*/,
-                                        float /*appRequestRefreshRateMax*/) {
-        return NO_ERROR;
-    }
-    status_t getDesiredDisplayModeSpecs(const sp<IBinder>& /*displayToken*/,
-                                        ui::DisplayModeId* /*outDefaultMode*/,
-                                        bool* /*outAllowGroupSwitching*/,
-                                        float* /*outPrimaryRefreshRateMin*/,
-                                        float* /*outPrimaryRefreshRateMax*/,
-                                        float* /*outAppRequestRefreshRateMin*/,
-                                        float* /*outAppRequestRefreshRateMax*/) override {
-        return NO_ERROR;
-    };
-
-    status_t setGlobalShadowSettings(const half4& /*ambientColor*/, const half4& /*spotColor*/,
-                                     float /*lightPosY*/, float /*lightPosZ*/,
-                                     float /*lightRadius*/) override {
-        return NO_ERROR;
-    }
-
-    status_t getDisplayDecorationSupport(
-            const sp<IBinder>& /*displayToken*/,
-            std::optional<DisplayDecorationSupport>* /*outSupport*/) const override {
-        return NO_ERROR;
-    }
-
-    status_t setFrameRate(const sp<IGraphicBufferProducer>& /*surface*/, float /*frameRate*/,
-                          int8_t /*compatibility*/, int8_t /*changeFrameRateStrategy*/) override {
-        return NO_ERROR;
-    }
-
-    status_t setFrameTimelineInfo(const sp<IGraphicBufferProducer>& /*surface*/,
-                                  const FrameTimelineInfo& /*frameTimelineInfo*/) override {
-        return NO_ERROR;
-    }
-
-    status_t addTransactionTraceListener(
-            const sp<gui::ITransactionTraceListener>& /*listener*/) override {
-        return NO_ERROR;
-    }
-
-    int getGPUContextPriority() override { return 0; };
-
-    status_t getMaxAcquiredBufferCount(int* /*buffers*/) const override { return NO_ERROR; }
-
-    status_t addWindowInfosListener(
-            const sp<gui::IWindowInfosListener>& /*windowInfosListener*/) const override {
-        return NO_ERROR;
-    }
-
-    status_t removeWindowInfosListener(
-            const sp<gui::IWindowInfosListener>& /*windowInfosListener*/) const override {
-        return NO_ERROR;
-    }
-
-    status_t setOverrideFrameRate(uid_t /*uid*/, float /*frameRate*/) override { return NO_ERROR; }
-
 protected:
     IBinder* onAsBinder() override { return nullptr; }
 
@@ -894,7 +719,23 @@
 
     void setSupportsPresent(bool supportsPresent) { mSupportsPresent = supportsPresent; }
 
+    binder::Status bootFinished() override { return binder::Status::ok(); }
+
+    binder::Status createDisplayEventConnection(
+            VsyncSource /*vsyncSource*/, EventRegistration /*eventRegistration*/,
+            const sp<IBinder>& /*layerHandle*/,
+            sp<gui::IDisplayEventConnection>* outConnection) override {
+        *outConnection = nullptr;
+        return binder::Status::ok();
+    }
+
+    binder::Status createConnection(sp<gui::ISurfaceComposerClient>* outClient) override {
+        *outClient = nullptr;
+        return binder::Status::ok();
+    }
+
     binder::Status createDisplay(const std::string& /*displayName*/, bool /*secure*/,
+                                 float /*requestedRefreshRate*/,
                                  sp<IBinder>* /*outDisplay*/) override {
         return binder::Status::ok();
     }
@@ -907,10 +748,6 @@
         return binder::Status::ok();
     }
 
-    binder::Status getPrimaryPhysicalDisplayId(int64_t* /*outDisplayId*/) override {
-        return binder::Status::ok();
-    }
-
     binder::Status getPhysicalDisplayToken(int64_t /*displayId*/,
                                            sp<IBinder>* /*outDisplay*/) override {
         return binder::Status::ok();
@@ -920,6 +757,21 @@
         return binder::Status::ok();
     }
 
+    binder::Status getSupportedFrameTimestamps(std::vector<FrameEvent>* outSupported) override {
+        *outSupported = {FrameEvent::REQUESTED_PRESENT,
+                         FrameEvent::ACQUIRE,
+                         FrameEvent::LATCH,
+                         FrameEvent::FIRST_REFRESH_START,
+                         FrameEvent::LAST_REFRESH_START,
+                         FrameEvent::GPU_COMPOSITION_DONE,
+                         FrameEvent::DEQUEUE_READY,
+                         FrameEvent::RELEASE};
+        if (mSupportsPresent) {
+            outSupported->push_back(FrameEvent::DISPLAY_PRESENT);
+        }
+        return binder::Status::ok();
+    }
+
     binder::Status getDisplayStats(const sp<IBinder>& /*display*/,
                                    gui::DisplayStatInfo* /*outStatInfo*/) override {
         return binder::Status::ok();
@@ -930,6 +782,35 @@
         return binder::Status::ok();
     }
 
+    binder::Status getStaticDisplayInfo(int64_t /*displayId*/,
+                                        gui::StaticDisplayInfo* /*outInfo*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getDynamicDisplayInfoFromId(int64_t /*displayId*/,
+                                               gui::DynamicDisplayInfo* /*outInfo*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getDynamicDisplayInfoFromToken(const sp<IBinder>& /*display*/,
+                                                  gui::DynamicDisplayInfo* /*outInfo*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getDisplayNativePrimaries(const sp<IBinder>& /*display*/,
+                                             gui::DisplayPrimaries* /*outPrimaries*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status setActiveColorMode(const sp<IBinder>& /*display*/, int /*colorMode*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status setBootDisplayMode(const sp<IBinder>& /*display*/,
+                                      int /*displayModeId*/) override {
+        return binder::Status::ok();
+    }
+
     binder::Status clearBootDisplayMode(const sp<IBinder>& /*display*/) override {
         return binder::Status::ok();
     }
@@ -938,6 +819,21 @@
         return binder::Status::ok();
     }
 
+    binder::Status getHdrConversionCapabilities(
+            std::vector<gui::HdrConversionCapability>*) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status setHdrConversionStrategy(
+            const gui::HdrConversionStrategy& /*hdrConversionStrategy*/,
+            int32_t* /*outPreferredHdrOutputType*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getHdrOutputConversionSupport(bool* /*outSupport*/) override {
+        return binder::Status::ok();
+    }
+
     binder::Status setAutoLowLatencyMode(const sp<IBinder>& /*display*/, bool /*on*/) override {
         return binder::Status::ok();
     }
@@ -960,11 +856,99 @@
         return binder::Status::ok();
     }
 
+    binder::Status clearAnimationFrameStats() override { return binder::Status::ok(); }
+
+    binder::Status getAnimationFrameStats(gui::FrameStats* /*outStats*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status overrideHdrTypes(const sp<IBinder>& /*display*/,
+                                    const std::vector<int32_t>& /*hdrTypes*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status onPullAtom(int32_t /*atomId*/, gui::PullAtomData* /*outPullData*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getLayerDebugInfo(std::vector<gui::LayerDebugInfo>* /*outLayers*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getColorManagement(bool* /*outGetColorManagement*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getCompositionPreference(gui::CompositionPreference* /*outPref*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getDisplayedContentSamplingAttributes(
+            const sp<IBinder>& /*display*/, gui::ContentSamplingAttributes* /*outAttrs*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status setDisplayContentSamplingEnabled(const sp<IBinder>& /*display*/, bool /*enable*/,
+                                                    int8_t /*componentMask*/,
+                                                    int64_t /*maxFrames*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getProtectedContentSupport(bool* /*outSupporte*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getDisplayedContentSample(const sp<IBinder>& /*display*/, int64_t /*maxFrames*/,
+                                             int64_t /*timestamp*/,
+                                             gui::DisplayedFrameStats* /*outStats*/) override {
+        return binder::Status::ok();
+    }
+
     binder::Status isWideColorDisplay(const sp<IBinder>& /*token*/,
                                       bool* /*outIsWideColorDisplay*/) override {
         return binder::Status::ok();
     }
 
+    binder::Status addRegionSamplingListener(
+            const gui::ARect& /*samplingArea*/, const sp<IBinder>& /*stopLayerHandle*/,
+            const sp<gui::IRegionSamplingListener>& /*listener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status removeRegionSamplingListener(
+            const sp<gui::IRegionSamplingListener>& /*listener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status addFpsListener(int32_t /*taskId*/,
+                                  const sp<gui::IFpsListener>& /*listener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status removeFpsListener(const sp<gui::IFpsListener>& /*listener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status addTunnelModeEnabledListener(
+            const sp<gui::ITunnelModeEnabledListener>& /*listener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status removeTunnelModeEnabledListener(
+            const sp<gui::ITunnelModeEnabledListener>& /*listener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status setDesiredDisplayModeSpecs(const sp<IBinder>& /*displayToken*/,
+                                              const gui::DisplayModeSpecs&) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getDesiredDisplayModeSpecs(const sp<IBinder>& /*displayToken*/,
+                                              gui::DisplayModeSpecs*) override {
+        return binder::Status::ok();
+    }
+
     binder::Status getDisplayBrightnessSupport(const sp<IBinder>& /*displayToken*/,
                                                bool* /*outSupport*/) override {
         return binder::Status::ok();
@@ -989,6 +973,44 @@
 
     binder::Status notifyPowerBoost(int /*boostId*/) override { return binder::Status::ok(); }
 
+    binder::Status setGlobalShadowSettings(const gui::Color& /*ambientColor*/,
+                                           const gui::Color& /*spotColor*/, float /*lightPosY*/,
+                                           float /*lightPosZ*/, float /*lightRadius*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getDisplayDecorationSupport(
+            const sp<IBinder>& /*displayToken*/,
+            std::optional<gui::DisplayDecorationSupport>* /*outSupport*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status setOverrideFrameRate(int32_t /*uid*/, float /*frameRate*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getGpuContextPriority(int32_t* /*outPriority*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getMaxAcquiredBufferCount(int32_t* /*buffers*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status addWindowInfosListener(
+            const sp<gui::IWindowInfosListener>& /*windowInfosListener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status removeWindowInfosListener(
+            const sp<gui::IWindowInfosListener>& /*windowInfosListener*/) override {
+        return binder::Status::ok();
+    }
+
+    binder::Status getOverlaySupport(gui::OverlayProperties* /*properties*/) override {
+        return binder::Status::ok();
+    }
+
 protected:
     IBinder* onAsBinder() override { return nullptr; }
 
@@ -1034,10 +1056,10 @@
 
 class TestSurface : public Surface {
 public:
-    TestSurface(const sp<IGraphicBufferProducer>& bufferProducer,
-            FenceToFenceTimeMap* fenceMap)
-        : Surface(bufferProducer),
-          mFakeSurfaceComposer(new FakeSurfaceComposer) {
+    TestSurface(const sp<IGraphicBufferProducer>& bufferProducer, FenceToFenceTimeMap* fenceMap)
+          : Surface(bufferProducer),
+            mFakeSurfaceComposer(new FakeSurfaceComposer),
+            mFakeSurfaceComposerAIDL(new FakeSurfaceComposerAIDL) {
         mFakeFrameEventHistory = new FakeProducerFrameEventHistory(fenceMap);
         mFrameEventHistory.reset(mFakeFrameEventHistory);
     }
@@ -1048,6 +1070,10 @@
         return mFakeSurfaceComposer;
     }
 
+    sp<gui::ISurfaceComposer> composerServiceAIDL() const override {
+        return mFakeSurfaceComposerAIDL;
+    }
+
     nsecs_t now() const override {
         return mNow;
     }
@@ -1058,6 +1084,7 @@
 
 public:
     sp<FakeSurfaceComposer> mFakeSurfaceComposer;
+    sp<FakeSurfaceComposerAIDL> mFakeSurfaceComposerAIDL;
     nsecs_t mNow = 0;
 
     // mFrameEventHistory owns the instance of FakeProducerFrameEventHistory,
@@ -1070,20 +1097,30 @@
 protected:
     struct FenceAndFenceTime {
         explicit FenceAndFenceTime(FenceToFenceTimeMap& fenceMap)
-           : mFence(new Fence),
-             mFenceTime(fenceMap.createFenceTimeForTest(mFence)) {}
-        sp<Fence> mFence { nullptr };
-        std::shared_ptr<FenceTime> mFenceTime { nullptr };
+              : mFenceTime(fenceMap.createFenceTimeForTest(mFence)) {}
+
+        sp<Fence> mFence = sp<Fence>::make();
+        std::shared_ptr<FenceTime> mFenceTime;
     };
 
+    static CompositorTiming makeCompositorTiming(nsecs_t deadline = 1'000'000'000,
+                                                 nsecs_t interval = 16'666'667,
+                                                 nsecs_t presentLatency = 50'000'000) {
+        CompositorTiming timing;
+        timing.deadline = deadline;
+        timing.interval = interval;
+        timing.presentLatency = presentLatency;
+        return timing;
+    }
+
     struct RefreshEvents {
         RefreshEvents(FenceToFenceTimeMap& fenceMap, nsecs_t refreshStart)
-          : mFenceMap(fenceMap),
-            kCompositorTiming(
-                {refreshStart, refreshStart + 1, refreshStart + 2 }),
-            kStartTime(refreshStart + 3),
-            kGpuCompositionDoneTime(refreshStart + 4),
-            kPresentTime(refreshStart + 5) {}
+              : mFenceMap(fenceMap),
+                kCompositorTiming(
+                        makeCompositorTiming(refreshStart, refreshStart + 1, refreshStart + 2)),
+                kStartTime(refreshStart + 3),
+                kGpuCompositionDoneTime(refreshStart + 4),
+                kPresentTime(refreshStart + 5) {}
 
         void signalPostCompositeFences() {
             mFenceMap.signalAllForTest(
@@ -1093,8 +1130,8 @@
 
         FenceToFenceTimeMap& mFenceMap;
 
-        FenceAndFenceTime mGpuCompositionDone { mFenceMap };
-        FenceAndFenceTime mPresent { mFenceMap };
+        FenceAndFenceTime mGpuCompositionDone{mFenceMap};
+        FenceAndFenceTime mPresent{mFenceMap};
 
         const CompositorTiming kCompositorTiming;
 
@@ -1360,11 +1397,7 @@
 // This test verifies that the frame timestamps are retrieved if explicitly
 // enabled via native_window_enable_frame_timestamps.
 TEST_F(GetFrameTimestampsTest, EnabledSimple) {
-    CompositorTiming initialCompositorTiming {
-        1000000000, // 1s deadline
-        16666667, // 16ms interval
-        50000000, // 50ms present latency
-    };
+    const CompositorTiming initialCompositorTiming = makeCompositorTiming();
     mCfeh->initializeCompositorTiming(initialCompositorTiming);
 
     enableFrameTimestamps();
@@ -1424,6 +1457,7 @@
 TEST_F(GetFrameTimestampsTest, QueryPresentSupported) {
     bool displayPresentSupported = true;
     mSurface->mFakeSurfaceComposer->setSupportsPresent(displayPresentSupported);
+    mSurface->mFakeSurfaceComposerAIDL->setSupportsPresent(displayPresentSupported);
 
     // Verify supported bits are forwarded.
     int supportsPresent = -1;
@@ -1435,6 +1469,7 @@
 TEST_F(GetFrameTimestampsTest, QueryPresentNotSupported) {
     bool displayPresentSupported = false;
     mSurface->mFakeSurfaceComposer->setSupportsPresent(displayPresentSupported);
+    mSurface->mFakeSurfaceComposerAIDL->setSupportsPresent(displayPresentSupported);
 
     // Verify supported bits are forwarded.
     int supportsPresent = -1;
@@ -1501,11 +1536,7 @@
 // This verifies the compositor timing is updated by refresh events
 // and piggy backed on a queue, dequeue, and enabling of timestamps..
 TEST_F(GetFrameTimestampsTest, CompositorTimingUpdatesBasic) {
-    CompositorTiming initialCompositorTiming {
-        1000000000, // 1s deadline
-        16666667, // 16ms interval
-        50000000, // 50ms present latency
-    };
+    const CompositorTiming initialCompositorTiming = makeCompositorTiming();
     mCfeh->initializeCompositorTiming(initialCompositorTiming);
 
     enableFrameTimestamps();
@@ -1586,11 +1617,7 @@
 // This verifies the compositor deadline properly snaps to the the next
 // deadline based on the current time.
 TEST_F(GetFrameTimestampsTest, CompositorTimingDeadlineSnaps) {
-    CompositorTiming initialCompositorTiming {
-        1000000000, // 1s deadline
-        16666667, // 16ms interval
-        50000000, // 50ms present latency
-    };
+    const CompositorTiming initialCompositorTiming = makeCompositorTiming();
     mCfeh->initializeCompositorTiming(initialCompositorTiming);
 
     enableFrameTimestamps();
@@ -2012,6 +2039,7 @@
 TEST_F(GetFrameTimestampsTest, PresentUnsupportedNoSync) {
     enableFrameTimestamps();
     mSurface->mFakeSurfaceComposer->setSupportsPresent(false);
+    mSurface->mFakeSurfaceComposerAIDL->setSupportsPresent(false);
 
     // Dequeue and queue frame 1.
     const uint64_t fId1 = getNextFrameId();
diff --git a/libs/gui/tests/VsyncEventData_test.cpp b/libs/gui/tests/VsyncEventData_test.cpp
index f114522..a2138f2 100644
--- a/libs/gui/tests/VsyncEventData_test.cpp
+++ b/libs/gui/tests/VsyncEventData_test.cpp
@@ -36,6 +36,7 @@
     FrameTimeline timeline1 = FrameTimeline{4, 5, 6};
     data.vsync.frameTimelines[0] = timeline0;
     data.vsync.frameTimelines[1] = timeline1;
+    data.vsync.frameTimelinesLength = 2;
 
     Parcel p;
     data.writeToParcel(&p);
@@ -45,7 +46,8 @@
     data2.readFromParcel(&p);
     ASSERT_EQ(data.vsync.frameInterval, data2.vsync.frameInterval);
     ASSERT_EQ(data.vsync.preferredFrameTimelineIndex, data2.vsync.preferredFrameTimelineIndex);
-    for (int i = 0; i < VsyncEventData::kFrameTimelinesLength; i++) {
+    ASSERT_EQ(data.vsync.frameTimelinesLength, data2.vsync.frameTimelinesLength);
+    for (int i = 0; i < VsyncEventData::kFrameTimelinesCapacity; i++) {
         ASSERT_EQ(data.vsync.frameTimelines[i].vsyncId, data2.vsync.frameTimelines[i].vsyncId);
         ASSERT_EQ(data.vsync.frameTimelines[i].deadlineTimestamp,
                   data2.vsync.frameTimelines[i].deadlineTimestamp);
diff --git a/libs/gui/tests/WindowInfo_test.cpp b/libs/gui/tests/WindowInfo_test.cpp
index 99658cc..11b87ef 100644
--- a/libs/gui/tests/WindowInfo_test.cpp
+++ b/libs/gui/tests/WindowInfo_test.cpp
@@ -71,7 +71,7 @@
     i.applicationInfo.name = "ApplicationFooBar";
     i.applicationInfo.token = new BBinder();
     i.applicationInfo.dispatchingTimeoutMillis = 0x12345678ABCD;
-    i.isClone = true;
+    i.focusTransferTarget = new BBinder();
 
     Parcel p;
     i.writeToParcel(&p);
@@ -102,7 +102,7 @@
     ASSERT_EQ(i.replaceTouchableRegionWithCrop, i2.replaceTouchableRegionWithCrop);
     ASSERT_EQ(i.touchableRegionCropHandle, i2.touchableRegionCropHandle);
     ASSERT_EQ(i.applicationInfo, i2.applicationInfo);
-    ASSERT_EQ(i.isClone, i2.isClone);
+    ASSERT_EQ(i.focusTransferTarget, i2.focusTransferTarget);
 }
 
 TEST(InputApplicationInfo, Parcelling) {
diff --git a/libs/input/Android.bp b/libs/input/Android.bp
index b2fec79..869458c 100644
--- a/libs/input/Android.bp
+++ b/libs/input/Android.bp
@@ -26,7 +26,6 @@
 filegroup {
     name: "inputconstants_aidl",
     srcs: [
-        "android/os/BlockUntrustedTouchesMode.aidl",
         "android/os/IInputConstants.aidl",
         "android/os/InputEventInjectionResult.aidl",
         "android/os/InputEventInjectionSync.aidl",
@@ -42,34 +41,53 @@
         "-Wall",
         "-Wextra",
         "-Werror",
+        "-Wno-unused-parameter",
     ],
     srcs: [
         "Input.cpp",
         "InputDevice.cpp",
         "InputEventLabels.cpp",
+        "InputVerifier.cpp",
         "Keyboard.cpp",
         "KeyCharacterMap.cpp",
         "KeyLayoutMap.cpp",
+        "MotionPredictor.cpp",
         "PrintTools.cpp",
         "PropertyMap.cpp",
+        "TfLiteMotionPredictor.cpp",
         "TouchVideoFrame.cpp",
         "VelocityControl.cpp",
         "VelocityTracker.cpp",
+        "VirtualInputDevice.cpp",
         "VirtualKeyMap.cpp",
     ],
 
-    header_libs: ["jni_headers"],
+    header_libs: [
+        "flatbuffer_headers",
+        "jni_headers",
+        "tensorflow_headers",
+    ],
     export_header_lib_headers: ["jni_headers"],
 
+    generated_headers: [
+        "toolbox_input_labels",
+    ],
+
     shared_libs: [
         "libbase",
-        "liblog",
         "libcutils",
+        "liblog",
+        "libPlatformProperties",
         "libvintf",
     ],
 
+    ldflags: [
+        "-Wl,--exclude-libs=libtflite_static.a",
+    ],
+
     static_libs: [
         "libui-types",
+        "libtflite_static",
     ],
 
     export_static_lib_headers: [
@@ -89,7 +107,6 @@
             shared_libs: [
                 "libutils",
                 "libbinder",
-                "libui",
             ],
 
             static_libs: [
@@ -103,12 +120,18 @@
             sanitize: {
                 misc_undefined: ["integer"],
             },
+
+            required: [
+                "motion_predictor_model_prebuilt",
+            ],
         },
         host: {
             shared: {
                 enabled: false,
             },
             include_dirs: [
+                "bionic/libc/kernel/android/uapi/",
+                "bionic/libc/kernel/uapi",
                 "frameworks/native/libs/arect/include",
             ],
         },
@@ -144,6 +167,7 @@
 
 cc_defaults {
     name: "libinput_fuzz_defaults",
+    cpp_std: "c++20",
     host_supported: true,
     shared_libs: [
         "libutils",
diff --git a/libs/input/Input.cpp b/libs/input/Input.cpp
index 2581668..00925ba 100644
--- a/libs/input/Input.cpp
+++ b/libs/input/Input.cpp
@@ -21,7 +21,9 @@
 #include <cutils/compiler.h>
 #include <inttypes.h>
 #include <string.h>
+#include <optional>
 
+#include <android-base/file.h>
 #include <android-base/logging.h>
 #include <android-base/stringprintf.h>
 #include <cutils/compiler.h>
@@ -34,7 +36,7 @@
 #ifdef __linux__
 #include <binder/Parcel.h>
 #endif
-#ifdef __ANDROID__
+#if defined(__ANDROID__)
 #include <sys/random.h>
 #endif
 
@@ -44,25 +46,6 @@
 
 namespace {
 
-float transformAngle(const ui::Transform& transform, float angleRadians) {
-    // Construct and transform a vector oriented at the specified clockwise angle from vertical.
-    // Coordinate system: down is increasing Y, right is increasing X.
-    float x = sinf(angleRadians);
-    float y = -cosf(angleRadians);
-    vec2 transformedPoint = transform.transform(x, y);
-
-    // Determine how the origin is transformed by the matrix so that we
-    // can transform orientation vectors.
-    const vec2 origin = transform.transform(0, 0);
-
-    transformedPoint.x -= origin.x;
-    transformedPoint.y -= origin.y;
-
-    // Derive the transformed vector's clockwise angle from vertical.
-    // The return value of atan2f is in range [-pi, pi] which conforms to the orientation API.
-    return atan2f(transformedPoint.x, -transformedPoint.y);
-}
-
 bool shouldDisregardTransformation(uint32_t source) {
     // Do not apply any transformations to axes from joysticks, touchpads, or relative mice.
     return isFromSource(source, AINPUT_SOURCE_CLASS_JOYSTICK) ||
@@ -87,38 +70,41 @@
             return "AMBIGUOUS_GESTURE";
         case MotionClassification::DEEP_PRESS:
             return "DEEP_PRESS";
-    }
-}
-
-const char* motionToolTypeToString(int32_t toolType) {
-    switch (toolType) {
-        case AMOTION_EVENT_TOOL_TYPE_UNKNOWN:
-            return "UNKNOWN";
-        case AMOTION_EVENT_TOOL_TYPE_FINGER:
-            return "FINGER";
-        case AMOTION_EVENT_TOOL_TYPE_STYLUS:
-            return "STYLUS";
-        case AMOTION_EVENT_TOOL_TYPE_MOUSE:
-            return "MOUSE";
-        case AMOTION_EVENT_TOOL_TYPE_ERASER:
-            return "ERASER";
-        case AMOTION_EVENT_TOOL_TYPE_PALM:
-            return "PALM";
-        default:
-            return "INVALID";
+        case MotionClassification::TWO_FINGER_SWIPE:
+            return "TWO_FINGER_SWIPE";
+        case MotionClassification::MULTI_FINGER_SWIPE:
+            return "MULTI_FINGER_SWIPE";
+        case MotionClassification::PINCH:
+            return "PINCH";
     }
 }
 
 // --- IdGenerator ---
+#if defined(__ANDROID__)
+[[maybe_unused]]
+#endif
+static status_t
+getRandomBytes(uint8_t* data, size_t size) {
+    int ret = TEMP_FAILURE_RETRY(open("/dev/urandom", O_RDONLY | O_CLOEXEC | O_NOFOLLOW));
+    if (ret == -1) {
+        return -errno;
+    }
+
+    base::unique_fd fd(ret);
+    if (!base::ReadFully(fd, data, size)) {
+        return -errno;
+    }
+    return OK;
+}
+
 IdGenerator::IdGenerator(Source source) : mSource(source) {}
 
 int32_t IdGenerator::nextId() const {
     constexpr uint32_t SEQUENCE_NUMBER_MASK = ~SOURCE_MASK;
     int32_t id = 0;
 
-// Avoid building against syscall getrandom(2) on host, which will fail build on Mac. Host doesn't
-// use sequence number so just always return mSource.
-#ifdef __ANDROID__
+#if defined(__ANDROID__)
+    // On device, prefer 'getrandom' to '/dev/urandom' because it's faster.
     constexpr size_t BUF_LEN = sizeof(id);
     size_t totalBytes = 0;
     while (totalBytes < BUF_LEN) {
@@ -130,8 +116,17 @@
         }
         totalBytes += bytes;
     }
+#else
+#if defined(__linux__)
+    // On host, <sys/random.h> / GRND_NONBLOCK is not available
+    while (true) {
+        status_t result = getRandomBytes(reinterpret_cast<uint8_t*>(&id), sizeof(id));
+        if (result == OK) {
+            break;
+        }
+    }
+#endif // __linux__
 #endif // __ANDROID__
-
     return (id & SEQUENCE_NUMBER_MASK) | static_cast<int32_t>(mSource);
 }
 
@@ -156,28 +151,23 @@
     return roundTransformedCoords(transformedXy - transformedOrigin);
 }
 
-const char* inputEventTypeToString(int32_t type) {
-    switch (type) {
-        case AINPUT_EVENT_TYPE_KEY: {
-            return "KEY";
-        }
-        case AINPUT_EVENT_TYPE_MOTION: {
-            return "MOTION";
-        }
-        case AINPUT_EVENT_TYPE_FOCUS: {
-            return "FOCUS";
-        }
-        case AINPUT_EVENT_TYPE_CAPTURE: {
-            return "CAPTURE";
-        }
-        case AINPUT_EVENT_TYPE_DRAG: {
-            return "DRAG";
-        }
-        case AINPUT_EVENT_TYPE_TOUCH_MODE: {
-            return "TOUCH_MODE";
-        }
-    }
-    return "UNKNOWN";
+float transformAngle(const ui::Transform& transform, float angleRadians) {
+    // Construct and transform a vector oriented at the specified clockwise angle from vertical.
+    // Coordinate system: down is increasing Y, right is increasing X.
+    float x = sinf(angleRadians);
+    float y = -cosf(angleRadians);
+    vec2 transformedPoint = transform.transform(x, y);
+
+    // Determine how the origin is transformed by the matrix so that we
+    // can transform orientation vectors.
+    const vec2 origin = transform.transform(0, 0);
+
+    transformedPoint.x -= origin.x;
+    transformedPoint.y -= origin.y;
+
+    // Derive the transformed vector's clockwise angle from vertical.
+    // The return value of atan2f is in range [-pi, pi] which conforms to the orientation API.
+    return atan2f(transformedPoint.x, -transformedPoint.y);
 }
 
 std::string inputEventSourceToString(int32_t source) {
@@ -223,6 +213,10 @@
     return (source & test) == test;
 }
 
+bool isStylusToolType(ToolType toolType) {
+    return toolType == ToolType::STYLUS || toolType == ToolType::ERASER;
+}
+
 VerifiedKeyEvent verifiedKeyEventFromKeyEvent(const KeyEvent& event) {
     return {{VerifiedInputEvent::Type::KEY, event.getDeviceId(), event.getEventTime(),
              event.getSource(), event.getDisplayId()},
@@ -269,13 +263,44 @@
     return idGen.nextId();
 }
 
+std::ostream& operator<<(std::ostream& out, const InputEvent& event) {
+    switch (event.getType()) {
+        case InputEventType::KEY: {
+            const KeyEvent& keyEvent = static_cast<const KeyEvent&>(event);
+            out << keyEvent;
+            return out;
+        }
+        case InputEventType::MOTION: {
+            const MotionEvent& motionEvent = static_cast<const MotionEvent&>(event);
+            out << motionEvent;
+            return out;
+        }
+        case InputEventType::FOCUS: {
+            out << "FocusEvent";
+            return out;
+        }
+        case InputEventType::CAPTURE: {
+            out << "CaptureEvent";
+            return out;
+        }
+        case InputEventType::DRAG: {
+            out << "DragEvent";
+            return out;
+        }
+        case InputEventType::TOUCH_MODE: {
+            out << "TouchModeEvent";
+            return out;
+        }
+    }
+}
+
 // --- KeyEvent ---
 
 const char* KeyEvent::getLabel(int32_t keyCode) {
     return InputEventLookup::getLabelByKeyCode(keyCode);
 }
 
-int32_t KeyEvent::getKeyCodeFromLabel(const char* label) {
+std::optional<int> KeyEvent::getKeyCodeFromLabel(const char* label) {
     return InputEventLookup::getKeyCodeByLabel(label);
 }
 
@@ -319,6 +344,28 @@
     return "UNKNOWN";
 }
 
+std::ostream& operator<<(std::ostream& out, const KeyEvent& event) {
+    out << "KeyEvent { action=" << KeyEvent::actionToString(event.getAction());
+
+    out << ", keycode=" << event.getKeyCode() << "(" << KeyEvent::getLabel(event.getKeyCode())
+        << ")";
+
+    if (event.getMetaState() != 0) {
+        out << ", metaState=" << event.getMetaState();
+    }
+
+    out << ", eventTime=" << event.getEventTime();
+    out << ", downTime=" << event.getDownTime();
+    out << ", flags=" << std::hex << event.getFlags() << std::dec;
+    out << ", repeatCount=" << event.getRepeatCount();
+    out << ", deviceId=" << event.getDeviceId();
+    out << ", source=" << inputEventSourceToString(event.getSource());
+    out << ", displayId=" << event.getDisplayId();
+    out << ", eventId=" << event.getId();
+    out << "}";
+    return out;
+}
+
 // --- PointerCoords ---
 
 float PointerCoords::getAxisValue(int32_t axis) const {
@@ -391,6 +438,8 @@
     for (uint32_t i = 0; i < count; i++) {
         values[i] = parcel->readFloat();
     }
+
+    isResampled = parcel->readBool();
     return OK;
 }
 
@@ -401,6 +450,8 @@
     for (uint32_t i = 0; i < count; i++) {
         parcel->writeFloat(values[i]);
     }
+
+    parcel->writeBool(isResampled);
     return OK;
 }
 #endif
@@ -420,15 +471,10 @@
             return false;
         }
     }
-    return true;
-}
-
-void PointerCoords::copyFrom(const PointerCoords& other) {
-    bits = other.bits;
-    uint32_t count = BitSet64::count(bits);
-    for (uint32_t i = 0; i < count; i++) {
-        values[i] = other.values[i];
+    if (isResampled != other.isResampled) {
+        return false;
     }
+    return true;
 }
 
 void PointerCoords::transform(const ui::Transform& transform) {
@@ -541,21 +587,21 @@
                                 &pointerCoords[getPointerCount()]);
 }
 
-int MotionEvent::getSurfaceRotation() const {
+std::optional<ui::Rotation> MotionEvent::getSurfaceRotation() const {
     // The surface rotation is the rotation from the window's coordinate space to that of the
     // display. Since the event's transform takes display space coordinates to window space, the
     // returned surface rotation is the inverse of the rotation for the surface.
     switch (mTransform.getOrientation()) {
         case ui::Transform::ROT_0:
-            return DISPLAY_ORIENTATION_0;
+            return ui::ROTATION_0;
         case ui::Transform::ROT_90:
-            return DISPLAY_ORIENTATION_270;
+            return ui::ROTATION_270;
         case ui::Transform::ROT_180:
-            return DISPLAY_ORIENTATION_180;
+            return ui::ROTATION_180;
         case ui::Transform::ROT_270:
-            return DISPLAY_ORIENTATION_90;
+            return ui::ROTATION_90;
         default:
-            return -1;
+            return std::nullopt;
     }
 }
 
@@ -752,7 +798,7 @@
         mPointerProperties.push_back({});
         PointerProperties& properties = mPointerProperties.back();
         properties.id = parcel->readInt32();
-        properties.toolType = parcel->readInt32();
+        properties.toolType = static_cast<ToolType>(parcel->readInt32());
     }
 
     while (sampleCount > 0) {
@@ -808,7 +854,7 @@
     for (size_t i = 0; i < pointerCount; i++) {
         const PointerProperties& properties = mPointerProperties[i];
         parcel->writeInt32(properties.id);
-        parcel->writeInt32(properties.toolType);
+        parcel->writeInt32(static_cast<int32_t>(properties.toolType));
     }
 
     const PointerCoords* pc = mSamplePointerCoords.data();
@@ -846,7 +892,7 @@
     return InputEventLookup::getAxisLabel(axis);
 }
 
-int32_t MotionEvent::getAxisFromLabel(const char* label) {
+std::optional<int> MotionEvent::getAxisFromLabel(const char* label) {
     return InputEventLookup::getAxisByLabel(label);
 }
 
@@ -972,9 +1018,9 @@
             out << ", x[" << i << "]=" << x;
             out << ", y[" << i << "]=" << y;
         }
-        int toolType = event.getToolType(i);
-        if (toolType != AMOTION_EVENT_TOOL_TYPE_FINGER) {
-            out << ", toolType[" << i << "]=" << toolType;
+        ToolType toolType = event.getToolType(i);
+        if (toolType != ToolType::FINGER) {
+            out << ", toolType[" << i << "]=" << ftl::enum_string(toolType);
         }
     }
     if (event.getButtonState() != 0) {
@@ -1126,44 +1172,51 @@
 
 void PooledInputEventFactory::recycle(InputEvent* event) {
     switch (event->getType()) {
-    case AINPUT_EVENT_TYPE_KEY:
-        if (mKeyEventPool.size() < mMaxPoolSize) {
-            mKeyEventPool.push(std::unique_ptr<KeyEvent>(static_cast<KeyEvent*>(event)));
-            return;
+        case InputEventType::KEY: {
+            if (mKeyEventPool.size() < mMaxPoolSize) {
+                mKeyEventPool.push(std::unique_ptr<KeyEvent>(static_cast<KeyEvent*>(event)));
+                return;
+            }
+            break;
         }
-        break;
-    case AINPUT_EVENT_TYPE_MOTION:
-        if (mMotionEventPool.size() < mMaxPoolSize) {
-            mMotionEventPool.push(std::unique_ptr<MotionEvent>(static_cast<MotionEvent*>(event)));
-            return;
+        case InputEventType::MOTION: {
+            if (mMotionEventPool.size() < mMaxPoolSize) {
+                mMotionEventPool.push(
+                        std::unique_ptr<MotionEvent>(static_cast<MotionEvent*>(event)));
+                return;
+            }
+            break;
         }
-        break;
-    case AINPUT_EVENT_TYPE_FOCUS:
-        if (mFocusEventPool.size() < mMaxPoolSize) {
-            mFocusEventPool.push(std::unique_ptr<FocusEvent>(static_cast<FocusEvent*>(event)));
-            return;
+        case InputEventType::FOCUS: {
+            if (mFocusEventPool.size() < mMaxPoolSize) {
+                mFocusEventPool.push(std::unique_ptr<FocusEvent>(static_cast<FocusEvent*>(event)));
+                return;
+            }
+            break;
         }
-        break;
-    case AINPUT_EVENT_TYPE_CAPTURE:
-        if (mCaptureEventPool.size() < mMaxPoolSize) {
-            mCaptureEventPool.push(
-                    std::unique_ptr<CaptureEvent>(static_cast<CaptureEvent*>(event)));
-            return;
+        case InputEventType::CAPTURE: {
+            if (mCaptureEventPool.size() < mMaxPoolSize) {
+                mCaptureEventPool.push(
+                        std::unique_ptr<CaptureEvent>(static_cast<CaptureEvent*>(event)));
+                return;
+            }
+            break;
         }
-        break;
-    case AINPUT_EVENT_TYPE_DRAG:
-        if (mDragEventPool.size() < mMaxPoolSize) {
-            mDragEventPool.push(std::unique_ptr<DragEvent>(static_cast<DragEvent*>(event)));
-            return;
+        case InputEventType::DRAG: {
+            if (mDragEventPool.size() < mMaxPoolSize) {
+                mDragEventPool.push(std::unique_ptr<DragEvent>(static_cast<DragEvent*>(event)));
+                return;
+            }
+            break;
         }
-        break;
-    case AINPUT_EVENT_TYPE_TOUCH_MODE:
-        if (mTouchModeEventPool.size() < mMaxPoolSize) {
-            mTouchModeEventPool.push(
-                    std::unique_ptr<TouchModeEvent>(static_cast<TouchModeEvent*>(event)));
-            return;
+        case InputEventType::TOUCH_MODE: {
+            if (mTouchModeEventPool.size() < mMaxPoolSize) {
+                mTouchModeEventPool.push(
+                        std::unique_ptr<TouchModeEvent>(static_cast<TouchModeEvent*>(event)));
+                return;
+            }
+            break;
         }
-        break;
     }
     delete event;
 }
diff --git a/libs/input/InputDevice.cpp b/libs/input/InputDevice.cpp
index a908969..9c7c0c1 100644
--- a/libs/input/InputDevice.cpp
+++ b/libs/input/InputDevice.cpp
@@ -22,6 +22,7 @@
 
 #include <android-base/stringprintf.h>
 #include <ftl/enum.h>
+#include <gui/constants.h>
 #include <input/InputDevice.h>
 #include <input/InputEventLabels.h>
 
@@ -166,7 +167,7 @@
 // --- InputDeviceInfo ---
 
 InputDeviceInfo::InputDeviceInfo() {
-    initialize(-1, 0, -1, InputDeviceIdentifier(), "", false, false);
+    initialize(-1, 0, -1, InputDeviceIdentifier(), "", false, false, ADISPLAY_ID_NONE);
 }
 
 InputDeviceInfo::InputDeviceInfo(const InputDeviceInfo& other)
@@ -177,9 +178,12 @@
         mAlias(other.mAlias),
         mIsExternal(other.mIsExternal),
         mHasMic(other.mHasMic),
+        mKeyboardLayoutInfo(other.mKeyboardLayoutInfo),
         mSources(other.mSources),
         mKeyboardType(other.mKeyboardType),
         mKeyCharacterMap(other.mKeyCharacterMap),
+        mUsiVersion(other.mUsiVersion),
+        mAssociatedDisplayId(other.mAssociatedDisplayId),
         mHasVibrator(other.mHasVibrator),
         mHasBattery(other.mHasBattery),
         mHasButtonUnderPad(other.mHasButtonUnderPad),
@@ -192,8 +196,8 @@
 }
 
 void InputDeviceInfo::initialize(int32_t id, int32_t generation, int32_t controllerNumber,
-        const InputDeviceIdentifier& identifier, const std::string& alias, bool isExternal,
-        bool hasMic) {
+                                 const InputDeviceIdentifier& identifier, const std::string& alias,
+                                 bool isExternal, bool hasMic, int32_t associatedDisplayId) {
     mId = id;
     mGeneration = generation;
     mControllerNumber = controllerNumber;
@@ -203,10 +207,12 @@
     mHasMic = hasMic;
     mSources = 0;
     mKeyboardType = AINPUT_KEYBOARD_TYPE_NONE;
+    mAssociatedDisplayId = associatedDisplayId;
     mHasVibrator = false;
     mHasBattery = false;
     mHasButtonUnderPad = false;
     mHasSensor = false;
+    mUsiVersion.reset();
     mMotionRanges.clear();
     mSensors.clear();
     mLights.clear();
@@ -265,6 +271,10 @@
     mKeyboardType = std::max(mKeyboardType, keyboardType);
 }
 
+void InputDeviceInfo::setKeyboardLayoutInfo(KeyboardLayoutInfo layoutInfo) {
+    mKeyboardLayoutInfo = std::move(layoutInfo);
+}
+
 std::vector<InputDeviceSensorInfo> InputDeviceInfo::getSensors() {
     std::vector<InputDeviceSensorInfo> infos;
     infos.reserve(mSensors.size());
diff --git a/libs/input/InputEventLabels.cpp b/libs/input/InputEventLabels.cpp
index 2d768ce..f99a7d6 100644
--- a/libs/input/InputEventLabels.cpp
+++ b/libs/input/InputEventLabels.cpp
@@ -16,6 +16,9 @@
 
 #include <input/InputEventLabels.h>
 
+#include <linux/input-event-codes.h>
+#include <linux/input.h>
+
 #define DEFINE_KEYCODE(key) { #key, AKEYCODE_##key }
 #define DEFINE_AXIS(axis) { #axis, AMOTION_EVENT_AXIS_##axis }
 #define DEFINE_LED(led) { #led, ALED_##led }
@@ -23,6 +26,8 @@
 
 namespace android {
 
+// clang-format off
+
 // NOTE: If you add a new keycode here you must also add it to several other files.
 //       Refer to frameworks/base/core/java/android/view/KeyEvent.java for the full list.
 #define KEYCODES_SEQUENCE \
@@ -330,7 +335,19 @@
     DEFINE_KEYCODE(DEMO_APP_1), \
     DEFINE_KEYCODE(DEMO_APP_2), \
     DEFINE_KEYCODE(DEMO_APP_3), \
-    DEFINE_KEYCODE(DEMO_APP_4)
+    DEFINE_KEYCODE(DEMO_APP_4), \
+    DEFINE_KEYCODE(KEYBOARD_BACKLIGHT_DOWN), \
+    DEFINE_KEYCODE(KEYBOARD_BACKLIGHT_UP), \
+    DEFINE_KEYCODE(KEYBOARD_BACKLIGHT_TOGGLE), \
+    DEFINE_KEYCODE(STYLUS_BUTTON_PRIMARY), \
+    DEFINE_KEYCODE(STYLUS_BUTTON_SECONDARY), \
+    DEFINE_KEYCODE(STYLUS_BUTTON_TERTIARY), \
+    DEFINE_KEYCODE(STYLUS_BUTTON_TAIL), \
+    DEFINE_KEYCODE(RECENT_APPS), \
+    DEFINE_KEYCODE(MACRO_1), \
+    DEFINE_KEYCODE(MACRO_2), \
+    DEFINE_KEYCODE(MACRO_3), \
+    DEFINE_KEYCODE(MACRO_4)
 
 // NOTE: If you add a new axis here you must also add it to several other files.
 //       Refer to frameworks/base/core/java/android/view/MotionEvent.java for the full list.
@@ -382,8 +399,12 @@
     DEFINE_AXIS(GENERIC_13), \
     DEFINE_AXIS(GENERIC_14), \
     DEFINE_AXIS(GENERIC_15), \
-    DEFINE_AXIS(GENERIC_16)
-
+    DEFINE_AXIS(GENERIC_16), \
+    DEFINE_AXIS(GESTURE_X_OFFSET), \
+    DEFINE_AXIS(GESTURE_Y_OFFSET), \
+    DEFINE_AXIS(GESTURE_SCROLL_X_DISTANCE), \
+    DEFINE_AXIS(GESTURE_SCROLL_Y_DISTANCE), \
+    DEFINE_AXIS(GESTURE_PINCH_SCALE_FACTOR)
 
 // NOTE: If you add new LEDs here, you must also add them to Input.h
 #define LEDS_SEQUENCE \
@@ -409,6 +430,8 @@
     DEFINE_FLAG(GESTURE), \
     DEFINE_FLAG(WAKE)
 
+// clang-format on
+
 // --- InputEventLookup ---
 const std::unordered_map<std::string, int> InputEventLookup::KEYCODES = {KEYCODES_SEQUENCE};
 
@@ -422,11 +445,11 @@
 
 const std::unordered_map<std::string, int> InputEventLookup::FLAGS = {FLAGS_SEQUENCE};
 
-int InputEventLookup::lookupValueByLabel(const std::unordered_map<std::string, int>& map,
-                                         const char* literal) {
+std::optional<int> InputEventLookup::lookupValueByLabel(
+        const std::unordered_map<std::string, int>& map, const char* literal) {
     std::string str(literal);
     auto it = map.find(str);
-    return it != map.end() ? it->second : 0;
+    return it != map.end() ? std::make_optional(it->second) : std::nullopt;
 }
 
 const char* InputEventLookup::lookupLabelByValue(const std::vector<InputEventLabel>& vec,
@@ -437,8 +460,8 @@
     return nullptr;
 }
 
-int32_t InputEventLookup::getKeyCodeByLabel(const char* label) {
-    return int32_t(lookupValueByLabel(KEYCODES, label));
+std::optional<int> InputEventLookup::getKeyCodeByLabel(const char* label) {
+    return lookupValueByLabel(KEYCODES, label);
 }
 
 const char* InputEventLookup::getLabelByKeyCode(int32_t keyCode) {
@@ -448,20 +471,101 @@
     return nullptr;
 }
 
-uint32_t InputEventLookup::getKeyFlagByLabel(const char* label) {
-    return uint32_t(lookupValueByLabel(FLAGS, label));
+std::optional<int> InputEventLookup::getKeyFlagByLabel(const char* label) {
+    return lookupValueByLabel(FLAGS, label);
 }
 
-int32_t InputEventLookup::getAxisByLabel(const char* label) {
-    return int32_t(lookupValueByLabel(AXES, label));
+std::optional<int> InputEventLookup::getAxisByLabel(const char* label) {
+    return lookupValueByLabel(AXES, label);
 }
 
 const char* InputEventLookup::getAxisLabel(int32_t axisId) {
     return lookupLabelByValue(AXES_NAMES, axisId);
 }
 
-int32_t InputEventLookup::getLedByLabel(const char* label) {
-    return int32_t(lookupValueByLabel(LEDS, label));
+std::optional<int> InputEventLookup::getLedByLabel(const char* label) {
+    return lookupValueByLabel(LEDS, label);
+}
+
+namespace {
+
+struct label {
+    const char* name;
+    int value;
+};
+
+#define LABEL(constant) \
+    { #constant, constant }
+#define LABEL_END \
+    { nullptr, -1 }
+
+static struct label ev_key_value_labels[] = {
+        {"UP", 0},
+        {"DOWN", 1},
+        {"REPEAT", 2},
+        LABEL_END,
+};
+
+#include "input.h-labels.h"
+
+#undef LABEL
+#undef LABEL_END
+
+std::string getLabel(const label* labels, int value) {
+    if (labels == nullptr) return std::to_string(value);
+    while (labels->name != nullptr && value != labels->value) {
+        labels++;
+    }
+    return labels->name != nullptr ? labels->name : std::to_string(value);
+}
+
+const label* getCodeLabelsForType(int32_t type) {
+    switch (type) {
+        case EV_SYN:
+            return syn_labels;
+        case EV_KEY:
+            return key_labels;
+        case EV_REL:
+            return rel_labels;
+        case EV_ABS:
+            return abs_labels;
+        case EV_SW:
+            return sw_labels;
+        case EV_MSC:
+            return msc_labels;
+        case EV_LED:
+            return led_labels;
+        case EV_REP:
+            return rep_labels;
+        case EV_SND:
+            return snd_labels;
+        case EV_FF:
+            return ff_labels;
+        case EV_FF_STATUS:
+            return ff_status_labels;
+        default:
+            return nullptr;
+    }
+}
+
+const label* getValueLabelsForTypeAndCode(int32_t type, int32_t code) {
+    if (type == EV_KEY) {
+        return ev_key_value_labels;
+    }
+    if (type == EV_MSC && code == ABS_MT_TOOL_TYPE) {
+        return mt_tool_labels;
+    }
+    return nullptr;
+}
+
+} // namespace
+
+EvdevEventLabel InputEventLookup::getLinuxEvdevLabel(int32_t type, int32_t code, int32_t value) {
+    return {
+            .type = getLabel(ev_labels, type),
+            .code = getLabel(getCodeLabelsForType(type), code),
+            .value = getLabel(getValueLabelsForTypeAndCode(type, code), value),
+    };
 }
 
 } // namespace android
diff --git a/libs/input/InputTransport.cpp b/libs/input/InputTransport.cpp
index 6195052..f6b4648 100644
--- a/libs/input/InputTransport.cpp
+++ b/libs/input/InputTransport.cpp
@@ -5,20 +5,6 @@
 //
 #define LOG_TAG "InputTransport"
 
-//#define LOG_NDEBUG 0
-
-// Log debug messages about channel messages (send message, receive message)
-#define DEBUG_CHANNEL_MESSAGES 0
-
-// Log debug messages whenever InputChannel objects are created/destroyed
-static constexpr bool DEBUG_CHANNEL_LIFECYCLE = false;
-
-// Log debug messages about transport actions
-static constexpr bool DEBUG_TRANSPORT_ACTIONS = false;
-
-// Log debug messages about touch event resampling
-#define DEBUG_RESAMPLING 0
-
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
@@ -27,6 +13,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <android-base/properties.h>
 #include <android-base/stringprintf.h>
 #include <binder/Parcel.h>
 #include <cutils/properties.h>
@@ -36,6 +23,63 @@
 
 #include <input/InputTransport.h>
 
+namespace {
+
+/**
+ * Log debug messages about channel messages (send message, receive message).
+ * Enable this via "adb shell setprop log.tag.InputTransportMessages DEBUG"
+ * (requires restart)
+ */
+const bool DEBUG_CHANNEL_MESSAGES =
+        __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Messages", ANDROID_LOG_INFO);
+
+/**
+ * Log debug messages whenever InputChannel objects are created/destroyed.
+ * Enable this via "adb shell setprop log.tag.InputTransportLifecycle DEBUG"
+ * (requires restart)
+ */
+const bool DEBUG_CHANNEL_LIFECYCLE =
+        __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Lifecycle", ANDROID_LOG_INFO);
+
+/**
+ * Log debug messages relating to the consumer end of the transport channel.
+ * Enable this via "adb shell setprop log.tag.InputTransportConsumer DEBUG" (requires restart)
+ */
+
+const bool DEBUG_TRANSPORT_CONSUMER =
+        __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Consumer", ANDROID_LOG_INFO);
+
+const bool IS_DEBUGGABLE_BUILD =
+#if defined(__ANDROID__)
+        android::base::GetBoolProperty("ro.debuggable", false);
+#else
+        true;
+#endif
+
+/**
+ * Log debug messages relating to the producer end of the transport channel.
+ * Enable this via "adb shell setprop log.tag.InputTransportPublisher DEBUG".
+ * This requires a restart on non-debuggable (e.g. user) builds, but should take effect immediately
+ * on debuggable builds (e.g. userdebug).
+ */
+bool debugTransportPublisher() {
+    if (!IS_DEBUGGABLE_BUILD) {
+        static const bool DEBUG_TRANSPORT_PUBLISHER =
+                __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Publisher", ANDROID_LOG_INFO);
+        return DEBUG_TRANSPORT_PUBLISHER;
+    }
+    return __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Publisher", ANDROID_LOG_INFO);
+}
+
+/**
+ * Log debug messages about touch event resampling.
+ * Enable this via "adb shell setprop log.tag.InputTransportResampling DEBUG" (requires restart)
+ */
+const bool DEBUG_RESAMPLING =
+        __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Resampling", ANDROID_LOG_INFO);
+
+} // namespace
+
 using android::base::StringPrintf;
 
 namespace android {
@@ -51,7 +95,7 @@
 
 // Latency added during resampling.  A few milliseconds doesn't hurt much but
 // reduces the impact of mispredicted touch positions.
-static const nsecs_t RESAMPLE_LATENCY = 5 * NANOS_PER_MS;
+const std::chrono::duration RESAMPLE_LATENCY = 5ms;
 
 // Minimum time difference between consecutive samples before attempting to resample.
 static const nsecs_t RESAMPLE_MIN_DELTA = 2 * NANOS_PER_MS;
@@ -76,6 +120,14 @@
  */
 static const char* PROPERTY_RESAMPLING_ENABLED = "ro.input.resampling";
 
+/**
+ * Crash if the events that are getting sent to the InputPublisher are inconsistent.
+ * Enable this via "adb shell setprop log.tag.InputTransportVerifyEvents DEBUG"
+ */
+static bool verifyEvents() {
+    return __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "VerifyEvents", ANDROID_LOG_INFO);
+}
+
 template<typename T>
 inline static T min(const T& a, const T& b) {
     return a < b ? a : b;
@@ -93,6 +145,10 @@
     return value ? "true" : "false";
 }
 
+static bool shouldResampleTool(ToolType toolType) {
+    return toolType == ToolType::FINGER || toolType == ToolType::UNKNOWN;
+}
+
 // --- InputMessage ---
 
 bool InputMessage::isValid(size_t actualSize) const {
@@ -132,7 +188,7 @@
             return valid;
         }
     }
-    ALOGE("Invalid message type: %" PRIu32, header.type);
+    ALOGE("Invalid message type: %s", ftl::enum_string(header.type).c_str());
     return false;
 }
 
@@ -267,6 +323,8 @@
                 memcpy(&msg->body.motion.pointers[i].coords.values[0],
                         &body.motion.pointers[i].coords.values[0],
                         count * (sizeof(body.motion.pointers[i].coords.values[0])));
+                msg->body.motion.pointers[i].coords.isResampled =
+                        body.motion.pointers[i].coords.isResampled;
             }
             break;
         }
@@ -320,15 +378,13 @@
 
 InputChannel::InputChannel(const std::string name, android::base::unique_fd fd, sp<IBinder> token)
       : mName(std::move(name)), mFd(std::move(fd)), mToken(std::move(token)) {
-    if (DEBUG_CHANNEL_LIFECYCLE) {
-        ALOGD("Input channel constructed: name='%s', fd=%d", getName().c_str(), getFd().get());
-    }
+    ALOGD_IF(DEBUG_CHANNEL_LIFECYCLE, "Input channel constructed: name='%s', fd=%d",
+             getName().c_str(), getFd().get());
 }
 
 InputChannel::~InputChannel() {
-    if (DEBUG_CHANNEL_LIFECYCLE) {
-        ALOGD("Input channel destroyed: name='%s', fd=%d", getName().c_str(), getFd().get());
-    }
+    ALOGD_IF(DEBUG_CHANNEL_LIFECYCLE, "Input channel destroyed: name='%s', fd=%d",
+             getName().c_str(), getFd().get());
 }
 
 status_t InputChannel::openInputChannelPair(const std::string& name,
@@ -373,10 +429,8 @@
 
     if (nWrite < 0) {
         int error = errno;
-#if DEBUG_CHANNEL_MESSAGES
-        ALOGD("channel '%s' ~ error sending message of type %d, %s", mName.c_str(),
-              msg->header.type, strerror(error));
-#endif
+        ALOGD_IF(DEBUG_CHANNEL_MESSAGES, "channel '%s' ~ error sending message of type %s, %s",
+                 mName.c_str(), ftl::enum_string(msg->header.type).c_str(), strerror(error));
         if (error == EAGAIN || error == EWOULDBLOCK) {
             return WOULD_BLOCK;
         }
@@ -387,16 +441,14 @@
     }
 
     if (size_t(nWrite) != msgLength) {
-#if DEBUG_CHANNEL_MESSAGES
-        ALOGD("channel '%s' ~ error sending message type %d, send was incomplete",
-                mName.c_str(), msg->header.type);
-#endif
+        ALOGD_IF(DEBUG_CHANNEL_MESSAGES,
+                 "channel '%s' ~ error sending message type %s, send was incomplete", mName.c_str(),
+                 ftl::enum_string(msg->header.type).c_str());
         return DEAD_OBJECT;
     }
 
-#if DEBUG_CHANNEL_MESSAGES
-    ALOGD("channel '%s' ~ sent message of type %d", mName.c_str(), msg->header.type);
-#endif
+    ALOGD_IF(DEBUG_CHANNEL_MESSAGES, "channel '%s' ~ sent message of type %s", mName.c_str(),
+             ftl::enum_string(msg->header.type).c_str());
     return OK;
 }
 
@@ -408,9 +460,8 @@
 
     if (nRead < 0) {
         int error = errno;
-#if DEBUG_CHANNEL_MESSAGES
-        ALOGD("channel '%s' ~ receive message failed, errno=%d", mName.c_str(), errno);
-#endif
+        ALOGD_IF(DEBUG_CHANNEL_MESSAGES, "channel '%s' ~ receive message failed, errno=%d",
+                 mName.c_str(), errno);
         if (error == EAGAIN || error == EWOULDBLOCK) {
             return WOULD_BLOCK;
         }
@@ -421,9 +472,8 @@
     }
 
     if (nRead == 0) { // check for EOF
-#if DEBUG_CHANNEL_MESSAGES
-        ALOGD("channel '%s' ~ receive message failed because peer was closed", mName.c_str());
-#endif
+        ALOGD_IF(DEBUG_CHANNEL_MESSAGES,
+                 "channel '%s' ~ receive message failed because peer was closed", mName.c_str());
         return DEAD_OBJECT;
     }
 
@@ -432,9 +482,8 @@
         return BAD_VALUE;
     }
 
-#if DEBUG_CHANNEL_MESSAGES
-    ALOGD("channel '%s' ~ received message of type %d", mName.c_str(), msg->header.type);
-#endif
+    ALOGD_IF(DEBUG_CHANNEL_MESSAGES, "channel '%s' ~ received message of type %s", mName.c_str(),
+             ftl::enum_string(msg->header.type).c_str());
     return OK;
 }
 
@@ -490,7 +539,8 @@
 
 // --- InputPublisher ---
 
-InputPublisher::InputPublisher(const std::shared_ptr<InputChannel>& channel) : mChannel(channel) {}
+InputPublisher::InputPublisher(const std::shared_ptr<InputChannel>& channel)
+      : mChannel(channel), mInputVerifier(channel->getName()) {}
 
 InputPublisher::~InputPublisher() {
 }
@@ -502,17 +552,19 @@
                                          int32_t metaState, int32_t repeatCount, nsecs_t downTime,
                                          nsecs_t eventTime) {
     if (ATRACE_ENABLED()) {
-        std::string message = StringPrintf("publishKeyEvent(inputChannel=%s, keyCode=%" PRId32 ")",
-                mChannel->getName().c_str(), keyCode);
+        std::string message =
+                StringPrintf("publishKeyEvent(inputChannel=%s, action=%s, keyCode=%s)",
+                             mChannel->getName().c_str(), KeyEvent::actionToString(action),
+                             KeyEvent::getLabel(keyCode));
         ATRACE_NAME(message.c_str());
     }
-    if (DEBUG_TRANSPORT_ACTIONS) {
-        ALOGD("channel '%s' publisher ~ publishKeyEvent: seq=%u, deviceId=%d, source=0x%x, "
-              "action=0x%x, flags=0x%x, keyCode=%d, scanCode=%d, metaState=0x%x, repeatCount=%d,"
-              "downTime=%" PRId64 ", eventTime=%" PRId64,
-              mChannel->getName().c_str(), seq, deviceId, source, action, flags, keyCode, scanCode,
-              metaState, repeatCount, downTime, eventTime);
-    }
+    ALOGD_IF(debugTransportPublisher(),
+             "channel '%s' publisher ~ %s: seq=%u, id=%d, deviceId=%d, source=%s, "
+             "action=%s, flags=0x%x, keyCode=%s, scanCode=%d, metaState=0x%x, repeatCount=%d,"
+             "downTime=%" PRId64 ", eventTime=%" PRId64,
+             mChannel->getName().c_str(), __func__, seq, eventId, deviceId,
+             inputEventSourceToString(source).c_str(), KeyEvent::actionToString(action), flags,
+             KeyEvent::getLabel(keyCode), scanCode, metaState, repeatCount, downTime, eventTime);
 
     if (!seq) {
         ALOGE("Attempted to publish a key event with sequence number 0.");
@@ -548,24 +600,29 @@
         uint32_t pointerCount, const PointerProperties* pointerProperties,
         const PointerCoords* pointerCoords) {
     if (ATRACE_ENABLED()) {
-        std::string message = StringPrintf(
-                "publishMotionEvent(inputChannel=%s, action=%" PRId32 ")",
-                mChannel->getName().c_str(), action);
+        std::string message = StringPrintf("publishMotionEvent(inputChannel=%s, action=%s)",
+                                           mChannel->getName().c_str(),
+                                           MotionEvent::actionToString(action).c_str());
         ATRACE_NAME(message.c_str());
     }
-    if (DEBUG_TRANSPORT_ACTIONS) {
+    if (verifyEvents()) {
+        mInputVerifier.processMovement(deviceId, action, pointerCount, pointerProperties,
+                                       pointerCoords, flags);
+    }
+    if (debugTransportPublisher()) {
         std::string transformString;
         transform.dump(transformString, "transform", "        ");
-        ALOGD("channel '%s' publisher ~ publishMotionEvent: seq=%u, deviceId=%d, source=0x%x, "
+        ALOGD("channel '%s' publisher ~ %s: seq=%u, id=%d, deviceId=%d, source=%s, "
               "displayId=%" PRId32 ", "
-              "action=0x%x, actionButton=0x%08x, flags=0x%x, edgeFlags=0x%x, "
+              "action=%s, actionButton=0x%08x, flags=0x%x, edgeFlags=0x%x, "
               "metaState=0x%x, buttonState=0x%x, classification=%s,"
               "xPrecision=%f, yPrecision=%f, downTime=%" PRId64 ", eventTime=%" PRId64 ", "
               "pointerCount=%" PRIu32 " \n%s",
-              mChannel->getName().c_str(), seq, deviceId, source, displayId, action, actionButton,
-              flags, edgeFlags, metaState, buttonState,
-              motionClassificationToString(classification), xPrecision, yPrecision, downTime,
-              eventTime, pointerCount, transformString.c_str());
+              mChannel->getName().c_str(), __func__, seq, eventId, deviceId,
+              inputEventSourceToString(source).c_str(), displayId,
+              MotionEvent::actionToString(action).c_str(), actionButton, flags, edgeFlags,
+              metaState, buttonState, motionClassificationToString(classification), xPrecision,
+              yPrecision, downTime, eventTime, pointerCount, transformString.c_str());
     }
 
     if (!seq) {
@@ -627,6 +684,8 @@
                                            mChannel->getName().c_str(), toString(hasFocus));
         ATRACE_NAME(message.c_str());
     }
+    ALOGD_IF(debugTransportPublisher(), "channel '%s' publisher ~ %s: seq=%u, id=%d, hasFocus=%s",
+             mChannel->getName().c_str(), __func__, seq, eventId, toString(hasFocus));
 
     InputMessage msg;
     msg.header.type = InputMessage::Type::FOCUS;
@@ -644,6 +703,9 @@
                              mChannel->getName().c_str(), toString(pointerCaptureEnabled));
         ATRACE_NAME(message.c_str());
     }
+    ALOGD_IF(debugTransportPublisher(),
+             "channel '%s' publisher ~ %s: seq=%u, id=%d, pointerCaptureEnabled=%s",
+             mChannel->getName().c_str(), __func__, seq, eventId, toString(pointerCaptureEnabled));
 
     InputMessage msg;
     msg.header.type = InputMessage::Type::CAPTURE;
@@ -661,6 +723,9 @@
                              mChannel->getName().c_str(), x, y, toString(isExiting));
         ATRACE_NAME(message.c_str());
     }
+    ALOGD_IF(debugTransportPublisher(),
+             "channel '%s' publisher ~ %s: seq=%u, id=%d, x=%f, y=%f, isExiting=%s",
+             mChannel->getName().c_str(), __func__, seq, eventId, x, y, toString(isExiting));
 
     InputMessage msg;
     msg.header.type = InputMessage::Type::DRAG;
@@ -679,6 +744,9 @@
                              mChannel->getName().c_str(), toString(isInTouchMode));
         ATRACE_NAME(message.c_str());
     }
+    ALOGD_IF(debugTransportPublisher(),
+             "channel '%s' publisher ~ %s: seq=%u, id=%d, isInTouchMode=%s",
+             mChannel->getName().c_str(), __func__, seq, eventId, toString(isInTouchMode));
 
     InputMessage msg;
     msg.header.type = InputMessage::Type::TOUCH_MODE;
@@ -689,16 +757,18 @@
 }
 
 android::base::Result<InputPublisher::ConsumerResponse> InputPublisher::receiveConsumerResponse() {
-    if (DEBUG_TRANSPORT_ACTIONS) {
-        ALOGD("channel '%s' publisher ~ %s", mChannel->getName().c_str(), __func__);
-    }
-
     InputMessage msg;
     status_t result = mChannel->receiveMessage(&msg);
     if (result) {
+        ALOGD_IF(debugTransportPublisher(), "channel '%s' publisher ~ %s: %s",
+                 mChannel->getName().c_str(), __func__, strerror(result));
         return android::base::Error(result);
     }
     if (msg.header.type == InputMessage::Type::FINISHED) {
+        ALOGD_IF(debugTransportPublisher(),
+                 "channel '%s' publisher ~ %s: finished: seq=%u, handled=%s",
+                 mChannel->getName().c_str(), __func__, msg.header.seq,
+                 toString(msg.body.finished.handled));
         return Finished{
                 .seq = msg.header.seq,
                 .handled = msg.body.finished.handled,
@@ -707,6 +777,8 @@
     }
 
     if (msg.header.type == InputMessage::Type::TIMELINE) {
+        ALOGD_IF(debugTransportPublisher(), "channel '%s' publisher ~ %s: timeline: id=%d",
+                 mChannel->getName().c_str(), __func__, msg.body.timeline.eventId);
         return Timeline{
                 .inputEventId = msg.body.timeline.eventId,
                 .graphicsTimeline = msg.body.timeline.graphicsTimeline,
@@ -721,7 +793,11 @@
 // --- InputConsumer ---
 
 InputConsumer::InputConsumer(const std::shared_ptr<InputChannel>& channel)
-      : mResampleTouch(isTouchResamplingEnabled()), mChannel(channel), mMsgDeferred(false) {}
+      : InputConsumer(channel, isTouchResamplingEnabled()) {}
+
+InputConsumer::InputConsumer(const std::shared_ptr<InputChannel>& channel,
+                             bool enableTouchResampling)
+      : mResampleTouch(enableTouchResampling), mChannel(channel), mMsgDeferred(false) {}
 
 InputConsumer::~InputConsumer() {
 }
@@ -732,10 +808,9 @@
 
 status_t InputConsumer::consume(InputEventFactoryInterface* factory, bool consumeBatches,
                                 nsecs_t frameTime, uint32_t* outSeq, InputEvent** outEvent) {
-    if (DEBUG_TRANSPORT_ACTIONS) {
-        ALOGD("channel '%s' consumer ~ consume: consumeBatches=%s, frameTime=%" PRId64,
-              mChannel->getName().c_str(), toString(consumeBatches), frameTime);
-    }
+    ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+             "channel '%s' consumer ~ consume: consumeBatches=%s, frameTime=%" PRId64,
+             mChannel->getName().c_str(), toString(consumeBatches), frameTime);
 
     *outSeq = 0;
     *outEvent = nullptr;
@@ -751,17 +826,19 @@
             // Receive a fresh message.
             status_t result = mChannel->receiveMessage(&mMsg);
             if (result == OK) {
-                mConsumeTimes.emplace(mMsg.header.seq, systemTime(SYSTEM_TIME_MONOTONIC));
+                const auto [_, inserted] =
+                        mConsumeTimes.emplace(mMsg.header.seq, systemTime(SYSTEM_TIME_MONOTONIC));
+                LOG_ALWAYS_FATAL_IF(!inserted, "Already have a consume time for seq=%" PRIu32,
+                                    mMsg.header.seq);
             }
             if (result) {
                 // Consume the next batched event unless batches are being held for later.
                 if (consumeBatches || result != WOULD_BLOCK) {
                     result = consumeBatch(factory, frameTime, outSeq, outEvent);
                     if (*outEvent) {
-                        if (DEBUG_TRANSPORT_ACTIONS) {
-                            ALOGD("channel '%s' consumer ~ consumed batch event, seq=%u",
-                                  mChannel->getName().c_str(), *outSeq);
-                        }
+                        ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+                                 "channel '%s' consumer ~ consumed batch event, seq=%u",
+                                 mChannel->getName().c_str(), *outSeq);
                         break;
                     }
                 }
@@ -777,11 +854,10 @@
                 initializeKeyEvent(keyEvent, &mMsg);
                 *outSeq = mMsg.header.seq;
                 *outEvent = keyEvent;
-                if (DEBUG_TRANSPORT_ACTIONS) {
-                    ALOGD("channel '%s' consumer ~ consumed key event, seq=%u",
-                          mChannel->getName().c_str(), *outSeq);
-                }
-            break;
+                ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+                         "channel '%s' consumer ~ consumed key event, seq=%u",
+                         mChannel->getName().c_str(), *outSeq);
+                break;
             }
 
             case InputMessage::Type::MOTION: {
@@ -790,11 +866,10 @@
                     Batch& batch = mBatches[batchIndex];
                     if (canAddSample(batch, &mMsg)) {
                         batch.samples.push_back(mMsg);
-                        if (DEBUG_TRANSPORT_ACTIONS) {
-                            ALOGD("channel '%s' consumer ~ appended to batch event",
-                                  mChannel->getName().c_str());
-                        }
-                    break;
+                        ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+                                 "channel '%s' consumer ~ appended to batch event",
+                                 mChannel->getName().c_str());
+                        break;
                     } else if (isPointerEvent(mMsg.body.motion.source) &&
                                mMsg.body.motion.action == AMOTION_EVENT_ACTION_CANCEL) {
                         // No need to process events that we are going to cancel anyways
@@ -815,12 +890,11 @@
                         if (result) {
                             return result;
                         }
-                        if (DEBUG_TRANSPORT_ACTIONS) {
-                            ALOGD("channel '%s' consumer ~ consumed batch event and "
-                                  "deferred current event, seq=%u",
-                                  mChannel->getName().c_str(), *outSeq);
-                        }
-                    break;
+                        ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+                                 "channel '%s' consumer ~ consumed batch event and "
+                                 "deferred current event, seq=%u",
+                                 mChannel->getName().c_str(), *outSeq);
+                        break;
                     }
                 }
 
@@ -830,10 +904,9 @@
                     Batch batch;
                     batch.samples.push_back(mMsg);
                     mBatches.push_back(batch);
-                    if (DEBUG_TRANSPORT_ACTIONS) {
-                        ALOGD("channel '%s' consumer ~ started batch event",
-                              mChannel->getName().c_str());
-                    }
+                    ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+                             "channel '%s' consumer ~ started batch event",
+                             mChannel->getName().c_str());
                     break;
                 }
 
@@ -845,10 +918,9 @@
                 *outSeq = mMsg.header.seq;
                 *outEvent = motionEvent;
 
-                if (DEBUG_TRANSPORT_ACTIONS) {
-                    ALOGD("channel '%s' consumer ~ consumed motion event, seq=%u",
-                          mChannel->getName().c_str(), *outSeq);
-                }
+                ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+                         "channel '%s' consumer ~ consumed motion event, seq=%u",
+                         mChannel->getName().c_str(), *outSeq);
                 break;
             }
 
@@ -918,7 +990,7 @@
 
         nsecs_t sampleTime = frameTime;
         if (mResampleTouch) {
-            sampleTime -= RESAMPLE_LATENCY;
+            sampleTime -= std::chrono::nanoseconds(RESAMPLE_LATENCY).count();
         }
         ssize_t split = findSampleNoLaterThan(batch, sampleTime);
         if (split < 0) {
@@ -1065,13 +1137,12 @@
                     state.recentCoordinatesAreIdentical(id)) {
                 PointerCoords& msgCoords = msg.body.motion.pointers[i].coords;
                 const PointerCoords& resampleCoords = state.lastResample.getPointerById(id);
-#if DEBUG_RESAMPLING
-                ALOGD("[%d] - rewrite (%0.3f, %0.3f), old (%0.3f, %0.3f)", id,
-                        resampleCoords.getX(), resampleCoords.getY(),
-                        msgCoords.getX(), msgCoords.getY());
-#endif
+                ALOGD_IF(DEBUG_RESAMPLING, "[%d] - rewrite (%0.3f, %0.3f), old (%0.3f, %0.3f)", id,
+                         resampleCoords.getX(), resampleCoords.getY(), msgCoords.getX(),
+                         msgCoords.getY());
                 msgCoords.setAxisValue(AMOTION_EVENT_AXIS_X, resampleCoords.getX());
                 msgCoords.setAxisValue(AMOTION_EVENT_AXIS_Y, resampleCoords.getY());
+                msgCoords.isResampled = true;
             } else {
                 state.lastResample.idBits.clearBit(id);
             }
@@ -1089,17 +1160,13 @@
 
     ssize_t index = findTouchState(event->getDeviceId(), event->getSource());
     if (index < 0) {
-#if DEBUG_RESAMPLING
-        ALOGD("Not resampled, no touch state for device.");
-#endif
+        ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, no touch state for device.");
         return;
     }
 
     TouchState& touchState = mTouchStates[index];
     if (touchState.historySize < 1) {
-#if DEBUG_RESAMPLING
-        ALOGD("Not resampled, no history for device.");
-#endif
+        ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, no history for device.");
         return;
     }
 
@@ -1109,9 +1176,7 @@
     for (size_t i = 0; i < pointerCount; i++) {
         uint32_t id = event->getPointerId(i);
         if (!current->idBits.hasBit(id)) {
-#if DEBUG_RESAMPLING
-            ALOGD("Not resampled, missing id %d", id);
-#endif
+            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, missing id %d", id);
             return;
         }
     }
@@ -1127,9 +1192,8 @@
         other = &future;
         nsecs_t delta = future.eventTime - current->eventTime;
         if (delta < RESAMPLE_MIN_DELTA) {
-#if DEBUG_RESAMPLING
-            ALOGD("Not resampled, delta time is too small: %" PRId64 " ns.", delta);
-#endif
+            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, delta time is too small: %" PRId64 " ns.",
+                     delta);
             return;
         }
         alpha = float(sampleTime - current->eventTime) / delta;
@@ -1139,30 +1203,30 @@
         other = touchState.getHistory(1);
         nsecs_t delta = current->eventTime - other->eventTime;
         if (delta < RESAMPLE_MIN_DELTA) {
-#if DEBUG_RESAMPLING
-            ALOGD("Not resampled, delta time is too small: %" PRId64 " ns.", delta);
-#endif
+            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, delta time is too small: %" PRId64 " ns.",
+                     delta);
             return;
         } else if (delta > RESAMPLE_MAX_DELTA) {
-#if DEBUG_RESAMPLING
-            ALOGD("Not resampled, delta time is too large: %" PRId64 " ns.", delta);
-#endif
+            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, delta time is too large: %" PRId64 " ns.",
+                     delta);
             return;
         }
         nsecs_t maxPredict = current->eventTime + min(delta / 2, RESAMPLE_MAX_PREDICTION);
         if (sampleTime > maxPredict) {
-#if DEBUG_RESAMPLING
-            ALOGD("Sample time is too far in the future, adjusting prediction "
-                    "from %" PRId64 " to %" PRId64 " ns.",
-                    sampleTime - current->eventTime, maxPredict - current->eventTime);
-#endif
+            ALOGD_IF(DEBUG_RESAMPLING,
+                     "Sample time is too far in the future, adjusting prediction "
+                     "from %" PRId64 " to %" PRId64 " ns.",
+                     sampleTime - current->eventTime, maxPredict - current->eventTime);
             sampleTime = maxPredict;
         }
         alpha = float(current->eventTime - sampleTime) / delta;
     } else {
-#if DEBUG_RESAMPLING
-        ALOGD("Not resampled, insufficient data.");
-#endif
+        ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, insufficient data.");
+        return;
+    }
+
+    if (current->eventTime == sampleTime) {
+        // Prevents having 2 events with identical times and coordinates.
         return;
     }
 
@@ -1179,6 +1243,8 @@
             // We maintain the previously resampled value for this pointer (stored in
             // oldLastResample) when the coordinates for this pointer haven't changed since then.
             // This way we don't introduce artificial jitter when pointers haven't actually moved.
+            // The isResampled flag isn't cleared as the values don't reflect what the device is
+            // actually reporting.
 
             // We know here that the coordinates for the pointer haven't changed because we
             // would've cleared the resampled bit in rewriteMessage if they had. We can't modify
@@ -1190,43 +1256,32 @@
         PointerCoords& resampledCoords = touchState.lastResample.pointers[i];
         const PointerCoords& currentCoords = current->getPointerById(id);
         resampledCoords.copyFrom(currentCoords);
-        if (other->idBits.hasBit(id)
-                && shouldResampleTool(event->getToolType(i))) {
+        if (other->idBits.hasBit(id) && shouldResampleTool(event->getToolType(i))) {
             const PointerCoords& otherCoords = other->getPointerById(id);
             resampledCoords.setAxisValue(AMOTION_EVENT_AXIS_X,
-                    lerp(currentCoords.getX(), otherCoords.getX(), alpha));
+                                         lerp(currentCoords.getX(), otherCoords.getX(), alpha));
             resampledCoords.setAxisValue(AMOTION_EVENT_AXIS_Y,
-                    lerp(currentCoords.getY(), otherCoords.getY(), alpha));
-#if DEBUG_RESAMPLING
-            ALOGD("[%d] - out (%0.3f, %0.3f), cur (%0.3f, %0.3f), "
-                    "other (%0.3f, %0.3f), alpha %0.3f",
-                    id, resampledCoords.getX(), resampledCoords.getY(),
-                    currentCoords.getX(), currentCoords.getY(),
-                    otherCoords.getX(), otherCoords.getY(),
-                    alpha);
-#endif
+                                         lerp(currentCoords.getY(), otherCoords.getY(), alpha));
+            resampledCoords.isResampled = true;
+            ALOGD_IF(DEBUG_RESAMPLING,
+                     "[%d] - out (%0.3f, %0.3f), cur (%0.3f, %0.3f), "
+                     "other (%0.3f, %0.3f), alpha %0.3f",
+                     id, resampledCoords.getX(), resampledCoords.getY(), currentCoords.getX(),
+                     currentCoords.getY(), otherCoords.getX(), otherCoords.getY(), alpha);
         } else {
-#if DEBUG_RESAMPLING
-            ALOGD("[%d] - out (%0.3f, %0.3f), cur (%0.3f, %0.3f)",
-                    id, resampledCoords.getX(), resampledCoords.getY(),
-                    currentCoords.getX(), currentCoords.getY());
-#endif
+            ALOGD_IF(DEBUG_RESAMPLING, "[%d] - out (%0.3f, %0.3f), cur (%0.3f, %0.3f)", id,
+                     resampledCoords.getX(), resampledCoords.getY(), currentCoords.getX(),
+                     currentCoords.getY());
         }
     }
 
     event->addSample(sampleTime, touchState.lastResample.pointers);
 }
 
-bool InputConsumer::shouldResampleTool(int32_t toolType) {
-    return toolType == AMOTION_EVENT_TOOL_TYPE_FINGER
-            || toolType == AMOTION_EVENT_TOOL_TYPE_UNKNOWN;
-}
-
 status_t InputConsumer::sendFinishedSignal(uint32_t seq, bool handled) {
-    if (DEBUG_TRANSPORT_ACTIONS) {
-        ALOGD("channel '%s' consumer ~ sendFinishedSignal: seq=%u, handled=%s",
-              mChannel->getName().c_str(), seq, toString(handled));
-    }
+    ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+             "channel '%s' consumer ~ sendFinishedSignal: seq=%u, handled=%s",
+             mChannel->getName().c_str(), seq, toString(handled));
 
     if (!seq) {
         ALOGE("Attempted to send a finished signal with sequence number 0.");
@@ -1273,13 +1328,12 @@
 
 status_t InputConsumer::sendTimeline(int32_t inputEventId,
                                      std::array<nsecs_t, GraphicsTimeline::SIZE> graphicsTimeline) {
-    if (DEBUG_TRANSPORT_ACTIONS) {
-        ALOGD("channel '%s' consumer ~ sendTimeline: inputEventId=%" PRId32
-              ", gpuCompletedTime=%" PRId64 ", presentTime=%" PRId64,
-              mChannel->getName().c_str(), inputEventId,
-              graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME],
-              graphicsTimeline[GraphicsTimeline::PRESENT_TIME]);
-    }
+    ALOGD_IF(DEBUG_TRANSPORT_CONSUMER,
+             "channel '%s' consumer ~ sendTimeline: inputEventId=%" PRId32
+             ", gpuCompletedTime=%" PRId64 ", presentTime=%" PRId64,
+             mChannel->getName().c_str(), inputEventId,
+             graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME],
+             graphicsTimeline[GraphicsTimeline::PRESENT_TIME]);
 
     InputMessage msg;
     msg.header.type = InputMessage::Type::TIMELINE;
diff --git a/libs/input/InputVerifier.cpp b/libs/input/InputVerifier.cpp
new file mode 100644
index 0000000..eb75804
--- /dev/null
+++ b/libs/input/InputVerifier.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "InputVerifier"
+
+#include <android-base/logging.h>
+#include <input/InputVerifier.h>
+
+namespace android {
+
+/**
+ * Log all of the movements that are sent to this verifier. Helps to identify the streams that lead
+ * to inconsistent events.
+ * Enable this via "adb shell setprop log.tag.InputVerifierLogEvents DEBUG"
+ */
+static bool logEvents() {
+    return __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "LogEvents", ANDROID_LOG_INFO);
+}
+
+// --- InputVerifier ---
+
+InputVerifier::InputVerifier(const std::string& name) : mName(name){};
+
+void InputVerifier::processMovement(int32_t deviceId, int32_t action, uint32_t pointerCount,
+                                    const PointerProperties* pointerProperties,
+                                    const PointerCoords* pointerCoords, int32_t flags) {
+    if (logEvents()) {
+        LOG(ERROR) << "Processing " << MotionEvent::actionToString(action) << " for device "
+                   << deviceId << " (" << pointerCount << " pointer"
+                   << (pointerCount == 1 ? "" : "s") << ") on " << mName;
+    }
+
+    switch (MotionEvent::getActionMasked(action)) {
+        case AMOTION_EVENT_ACTION_DOWN: {
+            auto [it, inserted] = mTouchingPointerIdsByDevice.insert({deviceId, {}});
+            if (!inserted) {
+                LOG(FATAL) << "Got ACTION_DOWN, but already have touching pointers " << it->second
+                           << " for device " << deviceId << " on " << mName;
+            }
+            it->second.set(pointerProperties[0].id);
+            break;
+        }
+        case AMOTION_EVENT_ACTION_POINTER_DOWN: {
+            auto it = mTouchingPointerIdsByDevice.find(deviceId);
+            if (it == mTouchingPointerIdsByDevice.end()) {
+                LOG(FATAL) << "Got POINTER_DOWN, but no touching pointers for device " << deviceId
+                           << " on " << mName;
+            }
+            it->second.set(pointerProperties[MotionEvent::getActionIndex(action)].id);
+            break;
+        }
+        case AMOTION_EVENT_ACTION_MOVE: {
+            ensureTouchingPointersMatch(deviceId, pointerCount, pointerProperties, "MOVE");
+            break;
+        }
+        case AMOTION_EVENT_ACTION_POINTER_UP: {
+            auto it = mTouchingPointerIdsByDevice.find(deviceId);
+            if (it == mTouchingPointerIdsByDevice.end()) {
+                LOG(FATAL) << "Got POINTER_UP, but no touching pointers for device " << deviceId
+                           << " on " << mName;
+            }
+            it->second.reset(pointerProperties[MotionEvent::getActionIndex(action)].id);
+            break;
+        }
+        case AMOTION_EVENT_ACTION_UP: {
+            auto it = mTouchingPointerIdsByDevice.find(deviceId);
+            if (it == mTouchingPointerIdsByDevice.end()) {
+                LOG(FATAL) << "Got ACTION_UP, but no record for deviceId " << deviceId << " on "
+                           << mName;
+            }
+            const auto& [_, touchingPointerIds] = *it;
+            if (touchingPointerIds.count() != 1) {
+                LOG(FATAL) << "Got ACTION_UP, but we have pointers: " << touchingPointerIds
+                           << " for deviceId " << deviceId << " on " << mName;
+            }
+            const int32_t pointerId = pointerProperties[0].id;
+            if (!touchingPointerIds.test(pointerId)) {
+                LOG(FATAL) << "Got ACTION_UP, but pointerId " << pointerId
+                           << " is not touching. Touching pointers: " << touchingPointerIds
+                           << " for deviceId " << deviceId << " on " << mName;
+            }
+            mTouchingPointerIdsByDevice.erase(it);
+            break;
+        }
+        case AMOTION_EVENT_ACTION_CANCEL: {
+            if ((flags & AMOTION_EVENT_FLAG_CANCELED) != AMOTION_EVENT_FLAG_CANCELED) {
+                LOG(FATAL) << "For ACTION_CANCEL, must set FLAG_CANCELED";
+            }
+            ensureTouchingPointersMatch(deviceId, pointerCount, pointerProperties, "CANCEL");
+            mTouchingPointerIdsByDevice.erase(deviceId);
+            break;
+        }
+    }
+}
+
+void InputVerifier::ensureTouchingPointersMatch(int32_t deviceId, uint32_t pointerCount,
+                                                const PointerProperties* pointerProperties,
+                                                const char* action) const {
+    auto it = mTouchingPointerIdsByDevice.find(deviceId);
+    if (it == mTouchingPointerIdsByDevice.end()) {
+        LOG(FATAL) << "Got " << action << ", but no touching pointers for device " << deviceId
+                   << " on " << mName;
+    }
+    const auto& [_, touchingPointerIds] = *it;
+    for (size_t i = 0; i < pointerCount; i++) {
+        const int32_t pointerId = pointerProperties[i].id;
+        if (!touchingPointerIds.test(pointerId)) {
+            LOG(FATAL) << "Got " << action << " for pointerId " << pointerId
+                       << " but the touching pointers are " << touchingPointerIds << " on "
+                       << mName;
+        }
+    }
+};
+
+} // namespace android
diff --git a/libs/input/KeyCharacterMap.cpp b/libs/input/KeyCharacterMap.cpp
index 7a379f5..d571917 100644
--- a/libs/input/KeyCharacterMap.cpp
+++ b/libs/input/KeyCharacterMap.cpp
@@ -43,7 +43,6 @@
 // Enables debug output for mapping.
 #define DEBUG_MAPPING 0
 
-
 namespace android {
 
 static const char* WHITESPACE = " \t\r";
@@ -86,75 +85,7 @@
 
 // --- KeyCharacterMap ---
 
-KeyCharacterMap::KeyCharacterMap(const std::string& filename)
-      : mType(KeyboardType::UNKNOWN), mLoadFileName(filename) {}
-
-KeyCharacterMap::KeyCharacterMap(const KeyCharacterMap& other)
-      : mType(other.mType),
-        mLoadFileName(other.mLoadFileName),
-        mLayoutOverlayApplied(other.mLayoutOverlayApplied),
-        mKeysByScanCode(other.mKeysByScanCode),
-        mKeysByUsageCode(other.mKeysByUsageCode) {
-    for (size_t i = 0; i < other.mKeys.size(); i++) {
-        mKeys.add(other.mKeys.keyAt(i), new Key(*other.mKeys.valueAt(i)));
-    }
-}
-
-KeyCharacterMap::~KeyCharacterMap() {
-    clear();
-}
-
-bool KeyCharacterMap::operator==(const KeyCharacterMap& other) const {
-    if (mType != other.mType) {
-        return false;
-    }
-    if (mLoadFileName != other.mLoadFileName) {
-        return false;
-    }
-    if (mLayoutOverlayApplied != other.mLayoutOverlayApplied) {
-        return false;
-    }
-    if (mKeys.size() != other.mKeys.size() ||
-        mKeysByScanCode.size() != other.mKeysByScanCode.size() ||
-        mKeysByUsageCode.size() != other.mKeysByUsageCode.size()) {
-        return false;
-    }
-
-    for (size_t i = 0; i < mKeys.size(); i++) {
-        if (mKeys.keyAt(i) != other.mKeys.keyAt(i)) {
-            return false;
-        }
-        const Key* key = mKeys.valueAt(i);
-        const Key* otherKey = other.mKeys.valueAt(i);
-        if (key->label != otherKey->label || key->number != otherKey->number) {
-            return false;
-        }
-    }
-
-    for (size_t i = 0; i < mKeysByScanCode.size(); i++) {
-        if (mKeysByScanCode.keyAt(i) != other.mKeysByScanCode.keyAt(i)) {
-            return false;
-        }
-        if (mKeysByScanCode.valueAt(i) != other.mKeysByScanCode.valueAt(i)) {
-            return false;
-        }
-    }
-
-    for (size_t i = 0; i < mKeysByUsageCode.size(); i++) {
-        if (mKeysByUsageCode.keyAt(i) != other.mKeysByUsageCode.keyAt(i)) {
-            return false;
-        }
-        if (mKeysByUsageCode.valueAt(i) != other.mKeysByUsageCode.valueAt(i)) {
-            return false;
-        }
-    }
-
-    return true;
-}
-
-bool KeyCharacterMap::operator!=(const KeyCharacterMap& other) const {
-    return !(*this == other);
-}
+KeyCharacterMap::KeyCharacterMap(const std::string& filename) : mLoadFileName(filename) {}
 
 base::Result<std::shared_ptr<KeyCharacterMap>> KeyCharacterMap::load(const std::string& filename,
                                                                      Format format) {
@@ -220,10 +151,6 @@
 void KeyCharacterMap::clear() {
     mKeysByScanCode.clear();
     mKeysByUsageCode.clear();
-    for (size_t i = 0; i < mKeys.size(); i++) {
-        Key* key = mKeys.editValueAt(i);
-        delete key;
-    }
     mKeys.clear();
     mLayoutOverlayApplied = false;
     mType = KeyboardType::UNKNOWN;
@@ -246,30 +173,27 @@
     if (mLayoutOverlayApplied) {
         reloadBaseFromFile();
     }
-    for (size_t i = 0; i < overlay.mKeys.size(); i++) {
-        int32_t keyCode = overlay.mKeys.keyAt(i);
-        Key* key = overlay.mKeys.valueAt(i);
-        ssize_t oldIndex = mKeys.indexOfKey(keyCode);
-        if (oldIndex >= 0) {
-            delete mKeys.valueAt(oldIndex);
-            mKeys.editValueAt(oldIndex) = new Key(*key);
-        } else {
-            mKeys.add(keyCode, new Key(*key));
-        }
+    for (const auto& [keyCode, key] : overlay.mKeys) {
+        mKeys.insert_or_assign(keyCode, key);
     }
 
-    for (size_t i = 0; i < overlay.mKeysByScanCode.size(); i++) {
-        mKeysByScanCode.replaceValueFor(overlay.mKeysByScanCode.keyAt(i),
-                                        overlay.mKeysByScanCode.valueAt(i));
+    for (const auto& [fromScanCode, toAndroidKeyCode] : overlay.mKeysByScanCode) {
+        mKeysByScanCode.insert_or_assign(fromScanCode, toAndroidKeyCode);
     }
 
-    for (size_t i = 0; i < overlay.mKeysByUsageCode.size(); i++) {
-        mKeysByUsageCode.replaceValueFor(overlay.mKeysByUsageCode.keyAt(i),
-                                         overlay.mKeysByUsageCode.valueAt(i));
+    for (const auto& [fromHidUsageCode, toAndroidKeyCode] : overlay.mKeysByUsageCode) {
+        mKeysByUsageCode.insert_or_assign(fromHidUsageCode, toAndroidKeyCode);
     }
     mLayoutOverlayApplied = true;
 }
 
+void KeyCharacterMap::clearLayoutOverlay() {
+    if (mLayoutOverlayApplied) {
+        reloadBaseFromFile();
+        mLayoutOverlayApplied = false;
+    }
+}
+
 KeyCharacterMap::KeyboardType KeyCharacterMap::getKeyboardType() const {
     return mType;
 }
@@ -280,8 +204,8 @@
 
 char16_t KeyCharacterMap::getDisplayLabel(int32_t keyCode) const {
     char16_t result = 0;
-    const Key* key;
-    if (getKey(keyCode, &key)) {
+    const Key* key = getKey(keyCode);
+    if (key != nullptr) {
         result = key->label;
     }
 #if DEBUG_MAPPING
@@ -292,8 +216,8 @@
 
 char16_t KeyCharacterMap::getNumber(int32_t keyCode) const {
     char16_t result = 0;
-    const Key* key;
-    if (getKey(keyCode, &key)) {
+    const Key* key = getKey(keyCode);
+    if (key != nullptr) {
         result = key->number;
     }
 #if DEBUG_MAPPING
@@ -304,9 +228,8 @@
 
 char16_t KeyCharacterMap::getCharacter(int32_t keyCode, int32_t metaState) const {
     char16_t result = 0;
-    const Key* key;
-    const Behavior* behavior;
-    if (getKeyBehavior(keyCode, metaState, &key, &behavior)) {
+    const Behavior* behavior = getKeyBehavior(keyCode, metaState);
+    if (behavior != nullptr) {
         result = behavior->character;
     }
 #if DEBUG_MAPPING
@@ -321,9 +244,8 @@
     outFallbackAction->metaState = 0;
 
     bool result = false;
-    const Key* key;
-    const Behavior* behavior;
-    if (getKeyBehavior(keyCode, metaState, &key, &behavior)) {
+    const Behavior* behavior = getKeyBehavior(keyCode, metaState);
+    if (behavior != nullptr) {
         if (behavior->fallbackKeyCode) {
             outFallbackAction->keyCode = behavior->fallbackKeyCode;
             outFallbackAction->metaState = metaState & ~behavior->metaState;
@@ -342,30 +264,26 @@
 char16_t KeyCharacterMap::getMatch(int32_t keyCode, const char16_t* chars, size_t numChars,
         int32_t metaState) const {
     char16_t result = 0;
-    const Key* key;
-    if (getKey(keyCode, &key)) {
+    const Key* key = getKey(keyCode);
+    if (key != nullptr) {
         // Try to find the most general behavior that maps to this character.
         // For example, the base key behavior will usually be last in the list.
         // However, if we find a perfect meta state match for one behavior then use that one.
-        for (const Behavior* behavior = key->firstBehavior; behavior; behavior = behavior->next) {
-            if (behavior->character) {
+        for (const Behavior& behavior : key->behaviors) {
+            if (behavior.character) {
                 for (size_t i = 0; i < numChars; i++) {
-                    if (behavior->character == chars[i]) {
-                        result = behavior->character;
-                        if ((behavior->metaState & metaState) == behavior->metaState) {
-                            goto ExactMatch;
+                    if (behavior.character == chars[i]) {
+                        result = behavior.character;
+                        if ((behavior.metaState & metaState) == behavior.metaState) {
+                            // Found exact match!
+                            return result;
                         }
                         break;
                     }
                 }
             }
         }
-    ExactMatch: ;
     }
-#if DEBUG_MAPPING
-    ALOGD("getMatch: keyCode=%d, chars=[%s], metaState=0x%08x ~ Result %d.", keyCode,
-          toString(chars, numChars).c_str(), metaState, result);
-#endif
     return result;
 }
 
@@ -402,11 +320,26 @@
     return true;
 }
 
+void KeyCharacterMap::addKeyRemapping(int32_t fromKeyCode, int32_t toKeyCode) {
+    if (fromKeyCode == toKeyCode) {
+        mKeyRemapping.erase(fromKeyCode);
+#if DEBUG_MAPPING
+        ALOGD("addKeyRemapping: Cleared remapping forKeyCode=%d ~ Result Successful.", fromKeyCode);
+#endif
+        return;
+    }
+    mKeyRemapping.insert_or_assign(fromKeyCode, toKeyCode);
+#if DEBUG_MAPPING
+    ALOGD("addKeyRemapping: fromKeyCode=%d, toKeyCode=%d ~ Result Successful.", fromKeyCode,
+          toKeyCode);
+#endif
+}
+
 status_t KeyCharacterMap::mapKey(int32_t scanCode, int32_t usageCode, int32_t* outKeyCode) const {
     if (usageCode) {
-        ssize_t index = mKeysByUsageCode.indexOfKey(usageCode);
-        if (index >= 0) {
-            *outKeyCode = mKeysByUsageCode.valueAt(index);
+        const auto it = mKeysByUsageCode.find(usageCode);
+        if (it != mKeysByUsageCode.end()) {
+            *outKeyCode = it->second;
 #if DEBUG_MAPPING
             ALOGD("mapKey: scanCode=%d, usageCode=0x%08x ~ Result keyCode=%d.",
                     scanCode, usageCode, *outKeyCode);
@@ -415,9 +348,9 @@
         }
     }
     if (scanCode) {
-        ssize_t index = mKeysByScanCode.indexOfKey(scanCode);
-        if (index >= 0) {
-            *outKeyCode = mKeysByScanCode.valueAt(index);
+        const auto it = mKeysByScanCode.find(scanCode);
+        if (it != mKeysByScanCode.end()) {
+            *outKeyCode = it->second;
 #if DEBUG_MAPPING
             ALOGD("mapKey: scanCode=%d, usageCode=0x%08x ~ Result keyCode=%d.",
                     scanCode, usageCode, *outKeyCode);
@@ -433,72 +366,80 @@
     return NAME_NOT_FOUND;
 }
 
-void KeyCharacterMap::tryRemapKey(int32_t keyCode, int32_t metaState,
-                                  int32_t *outKeyCode, int32_t *outMetaState) const {
-    *outKeyCode = keyCode;
-    *outMetaState = metaState;
+int32_t KeyCharacterMap::applyKeyRemapping(int32_t fromKeyCode) const {
+    int32_t toKeyCode = fromKeyCode;
 
-    const Key* key;
-    const Behavior* behavior;
-    if (getKeyBehavior(keyCode, metaState, &key, &behavior)) {
+    const auto it = mKeyRemapping.find(fromKeyCode);
+    if (it != mKeyRemapping.end()) {
+        toKeyCode = it->second;
+    }
+#if DEBUG_MAPPING
+    ALOGD("applyKeyRemapping: keyCode=%d ~ replacement keyCode=%d.", fromKeyCode, toKeyCode);
+#endif
+    return toKeyCode;
+}
+
+std::pair<int32_t, int32_t> KeyCharacterMap::applyKeyBehavior(int32_t fromKeyCode,
+                                                              int32_t fromMetaState) const {
+    int32_t toKeyCode = fromKeyCode;
+    int32_t toMetaState = fromMetaState;
+
+    const Behavior* behavior = getKeyBehavior(fromKeyCode, fromMetaState);
+    if (behavior != nullptr) {
         if (behavior->replacementKeyCode) {
-            *outKeyCode = behavior->replacementKeyCode;
-            int32_t newMetaState = metaState & ~behavior->metaState;
+            toKeyCode = behavior->replacementKeyCode;
+            toMetaState = fromMetaState & ~behavior->metaState;
             // Reset dependent meta states.
             if (behavior->metaState & AMETA_ALT_ON) {
-                newMetaState &= ~(AMETA_ALT_LEFT_ON | AMETA_ALT_RIGHT_ON);
+                toMetaState &= ~(AMETA_ALT_LEFT_ON | AMETA_ALT_RIGHT_ON);
             }
             if (behavior->metaState & (AMETA_ALT_LEFT_ON | AMETA_ALT_RIGHT_ON)) {
-                newMetaState &= ~AMETA_ALT_ON;
+                toMetaState &= ~AMETA_ALT_ON;
             }
             if (behavior->metaState & AMETA_CTRL_ON) {
-                newMetaState &= ~(AMETA_CTRL_LEFT_ON | AMETA_CTRL_RIGHT_ON);
+                toMetaState &= ~(AMETA_CTRL_LEFT_ON | AMETA_CTRL_RIGHT_ON);
             }
             if (behavior->metaState & (AMETA_CTRL_LEFT_ON | AMETA_CTRL_RIGHT_ON)) {
-                newMetaState &= ~AMETA_CTRL_ON;
+                toMetaState &= ~AMETA_CTRL_ON;
             }
             if (behavior->metaState & AMETA_SHIFT_ON) {
-                newMetaState &= ~(AMETA_SHIFT_LEFT_ON | AMETA_SHIFT_RIGHT_ON);
+                toMetaState &= ~(AMETA_SHIFT_LEFT_ON | AMETA_SHIFT_RIGHT_ON);
             }
             if (behavior->metaState & (AMETA_SHIFT_LEFT_ON | AMETA_SHIFT_RIGHT_ON)) {
-                newMetaState &= ~AMETA_SHIFT_ON;
+                toMetaState &= ~AMETA_SHIFT_ON;
             }
             // ... and put universal bits back if needed
-            *outMetaState = normalizeMetaState(newMetaState);
+            toMetaState = normalizeMetaState(toMetaState);
         }
     }
 
 #if DEBUG_MAPPING
-    ALOGD("tryRemapKey: keyCode=%d, metaState=0x%08x ~ "
-            "replacement keyCode=%d, replacement metaState=0x%08x.",
-            keyCode, metaState, *outKeyCode, *outMetaState);
+    ALOGD("applyKeyBehavior: keyCode=%d, metaState=0x%08x ~ "
+          "replacement keyCode=%d, replacement metaState=0x%08x.",
+          fromKeyCode, fromMetaState, toKeyCode, toMetaState);
 #endif
+    return std::make_pair(toKeyCode, toMetaState);
 }
 
-bool KeyCharacterMap::getKey(int32_t keyCode, const Key** outKey) const {
-    ssize_t index = mKeys.indexOfKey(keyCode);
-    if (index >= 0) {
-        *outKey = mKeys.valueAt(index);
-        return true;
+const KeyCharacterMap::Key* KeyCharacterMap::getKey(int32_t keyCode) const {
+    auto it = mKeys.find(keyCode);
+    if (it != mKeys.end()) {
+        return &it->second;
     }
-    return false;
+    return nullptr;
 }
 
-bool KeyCharacterMap::getKeyBehavior(int32_t keyCode, int32_t metaState,
-        const Key** outKey, const Behavior** outBehavior) const {
-    const Key* key;
-    if (getKey(keyCode, &key)) {
-        const Behavior* behavior = key->firstBehavior;
-        while (behavior) {
-            if (matchesMetaState(metaState, behavior->metaState)) {
-                *outKey = key;
-                *outBehavior = behavior;
-                return true;
+const KeyCharacterMap::Behavior* KeyCharacterMap::getKeyBehavior(int32_t keyCode,
+                                                                 int32_t metaState) const {
+    const Key* key = getKey(keyCode);
+    if (key != nullptr) {
+        for (const Behavior& behavior : key->behaviors) {
+            if (matchesMetaState(metaState, behavior.metaState)) {
+                return &behavior;
             }
-            behavior = behavior->next;
         }
     }
-    return false;
+    return nullptr;
 }
 
 bool KeyCharacterMap::matchesMetaState(int32_t eventMetaState, int32_t behaviorMetaState) {
@@ -537,19 +478,17 @@
         return false;
     }
 
-    for (size_t i = 0; i < mKeys.size(); i++) {
-        const Key* key = mKeys.valueAt(i);
-
+    for (const auto& [keyCode, key] : mKeys) {
         // Try to find the most general behavior that maps to this character.
         // For example, the base key behavior will usually be last in the list.
         const Behavior* found = nullptr;
-        for (const Behavior* behavior = key->firstBehavior; behavior; behavior = behavior->next) {
-            if (behavior->character == ch) {
-                found = behavior;
+        for (const Behavior& behavior : key.behaviors) {
+            if (behavior.character == ch) {
+                found = &behavior;
             }
         }
-        if (found) {
-            *outKeyCode = mKeys.keyAt(i);
+        if (found != nullptr) {
+            *outKeyCode = keyCode;
             *outMetaState = found->metaState;
             return true;
         }
@@ -701,12 +640,7 @@
             return nullptr;
         }
 
-        Key* key = new Key();
-        key->label = label;
-        key->number = number;
-        map->mKeys.add(keyCode, key);
-
-        Behavior* lastBehavior = nullptr;
+        Key key{.label = label, .number = number};
         while (parcel->readInt32()) {
             int32_t metaState = parcel->readInt32();
             char16_t character = parcel->readInt32();
@@ -716,23 +650,31 @@
                 return nullptr;
             }
 
-            Behavior* behavior = new Behavior();
-            behavior->metaState = metaState;
-            behavior->character = character;
-            behavior->fallbackKeyCode = fallbackKeyCode;
-            behavior->replacementKeyCode = replacementKeyCode;
-            if (lastBehavior) {
-                lastBehavior->next = behavior;
-            } else {
-                key->firstBehavior = behavior;
-            }
-            lastBehavior = behavior;
+            key.behaviors.push_back({
+                    .metaState = metaState,
+                    .character = character,
+                    .fallbackKeyCode = fallbackKeyCode,
+                    .replacementKeyCode = replacementKeyCode,
+            });
         }
+        map->mKeys.emplace(keyCode, std::move(key));
 
         if (parcel->errorCheck()) {
             return nullptr;
         }
     }
+    size_t numKeyRemapping = parcel->readInt32();
+    if (parcel->errorCheck()) {
+        return nullptr;
+    }
+    for (size_t i = 0; i < numKeyRemapping; i++) {
+        int32_t key = parcel->readInt32();
+        int32_t value = parcel->readInt32();
+        map->mKeyRemapping.insert_or_assign(key, value);
+        if (parcel->errorCheck()) {
+            return nullptr;
+        }
+    }
     size_t numKeysByScanCode = parcel->readInt32();
     if (parcel->errorCheck()) {
         return nullptr;
@@ -740,7 +682,7 @@
     for (size_t i = 0; i < numKeysByScanCode; i++) {
         int32_t key = parcel->readInt32();
         int32_t value = parcel->readInt32();
-        map->mKeysByScanCode.add(key, value);
+        map->mKeysByScanCode.insert_or_assign(key, value);
         if (parcel->errorCheck()) {
             return nullptr;
         }
@@ -752,7 +694,7 @@
     for (size_t i = 0; i < numKeysByUsageCode; i++) {
         int32_t key = parcel->readInt32();
         int32_t value = parcel->readInt32();
-        map->mKeysByUsageCode.add(key, value);
+        map->mKeysByUsageCode.insert_or_assign(key, value);
         if (parcel->errorCheck()) {
             return nullptr;
         }
@@ -771,81 +713,46 @@
 
     size_t numKeys = mKeys.size();
     parcel->writeInt32(numKeys);
-    for (size_t i = 0; i < numKeys; i++) {
-        int32_t keyCode = mKeys.keyAt(i);
-        const Key* key = mKeys.valueAt(i);
+    for (const auto& [keyCode, key] : mKeys) {
         parcel->writeInt32(keyCode);
-        parcel->writeInt32(key->label);
-        parcel->writeInt32(key->number);
-        for (const Behavior* behavior = key->firstBehavior; behavior != nullptr;
-                behavior = behavior->next) {
+        parcel->writeInt32(key.label);
+        parcel->writeInt32(key.number);
+        for (const Behavior& behavior : key.behaviors) {
             parcel->writeInt32(1);
-            parcel->writeInt32(behavior->metaState);
-            parcel->writeInt32(behavior->character);
-            parcel->writeInt32(behavior->fallbackKeyCode);
-            parcel->writeInt32(behavior->replacementKeyCode);
+            parcel->writeInt32(behavior.metaState);
+            parcel->writeInt32(behavior.character);
+            parcel->writeInt32(behavior.fallbackKeyCode);
+            parcel->writeInt32(behavior.replacementKeyCode);
         }
         parcel->writeInt32(0);
     }
+    size_t numKeyRemapping = mKeyRemapping.size();
+    parcel->writeInt32(numKeyRemapping);
+    for (auto const& [fromAndroidKeyCode, toAndroidKeyCode] : mKeyRemapping) {
+        parcel->writeInt32(fromAndroidKeyCode);
+        parcel->writeInt32(toAndroidKeyCode);
+    }
     size_t numKeysByScanCode = mKeysByScanCode.size();
     parcel->writeInt32(numKeysByScanCode);
-    for (size_t i = 0; i < numKeysByScanCode; i++) {
-        parcel->writeInt32(mKeysByScanCode.keyAt(i));
-        parcel->writeInt32(mKeysByScanCode.valueAt(i));
+    for (auto const& [fromScanCode, toAndroidKeyCode] : mKeysByScanCode) {
+        parcel->writeInt32(fromScanCode);
+        parcel->writeInt32(toAndroidKeyCode);
     }
     size_t numKeysByUsageCode = mKeysByUsageCode.size();
     parcel->writeInt32(numKeysByUsageCode);
-    for (size_t i = 0; i < numKeysByUsageCode; i++) {
-        parcel->writeInt32(mKeysByUsageCode.keyAt(i));
-        parcel->writeInt32(mKeysByUsageCode.valueAt(i));
+    for (auto const& [fromUsageCode, toAndroidKeyCode] : mKeysByUsageCode) {
+        parcel->writeInt32(fromUsageCode);
+        parcel->writeInt32(toAndroidKeyCode);
     }
 }
 #endif // __linux__
 
-// --- KeyCharacterMap::Key ---
-
-KeyCharacterMap::Key::Key() :
-        label(0), number(0), firstBehavior(nullptr) {
-}
-
-KeyCharacterMap::Key::Key(const Key& other) :
-        label(other.label), number(other.number),
-        firstBehavior(other.firstBehavior ? new Behavior(*other.firstBehavior) : nullptr) {
-}
-
-KeyCharacterMap::Key::~Key() {
-    Behavior* behavior = firstBehavior;
-    while (behavior) {
-        Behavior* next = behavior->next;
-        delete behavior;
-        behavior = next;
-    }
-}
-
-
-// --- KeyCharacterMap::Behavior ---
-
-KeyCharacterMap::Behavior::Behavior() :
-        next(nullptr), metaState(0), character(0), fallbackKeyCode(0), replacementKeyCode(0) {
-}
-
-KeyCharacterMap::Behavior::Behavior(const Behavior& other) :
-        next(other.next ? new Behavior(*other.next) : nullptr),
-        metaState(other.metaState), character(other.character),
-        fallbackKeyCode(other.fallbackKeyCode),
-        replacementKeyCode(other.replacementKeyCode) {
-}
-
-
 // --- KeyCharacterMap::Parser ---
 
 KeyCharacterMap::Parser::Parser(KeyCharacterMap* map, Tokenizer* tokenizer, Format format) :
         mMap(map), mTokenizer(tokenizer), mFormat(format), mState(STATE_TOP) {
 }
 
-KeyCharacterMap::Parser::~Parser() {
-}
-
 status_t KeyCharacterMap::Parser::parse() {
     while (!mTokenizer->isEof()) {
 #if DEBUG_PARSER
@@ -888,9 +795,9 @@
 
             mTokenizer->skipDelimiters(WHITESPACE);
             if (!mTokenizer->isEol() && mTokenizer->peekChar() != '#') {
-                ALOGE("%s: Expected end of line or trailing comment, got '%s'.",
-                      mTokenizer->getLocation().c_str(), mTokenizer->peekRemainderOfLine().c_str());
-                return BAD_VALUE;
+            ALOGE("%s: Expected end of line or trailing comment, got '%s'.",
+                  mTokenizer->getLocation().c_str(), mTokenizer->peekRemainderOfLine().c_str());
+            return BAD_VALUE;
             }
         }
 
@@ -990,17 +897,17 @@
               mapUsage ? "usage" : "scan code", codeToken.c_str());
         return BAD_VALUE;
     }
-    KeyedVector<int32_t, int32_t>& map =
-            mapUsage ? mMap->mKeysByUsageCode : mMap->mKeysByScanCode;
-    if (map.indexOfKey(code) >= 0) {
+    std::map<int32_t, int32_t>& map = mapUsage ? mMap->mKeysByUsageCode : mMap->mKeysByScanCode;
+    const auto it = map.find(code);
+    if (it != map.end()) {
         ALOGE("%s: Duplicate entry for key %s '%s'.", mTokenizer->getLocation().c_str(),
-              mapUsage ? "usage" : "scan code", codeToken.c_str());
+                mapUsage ? "usage" : "scan code", codeToken.c_str());
         return BAD_VALUE;
     }
 
     mTokenizer->skipDelimiters(WHITESPACE);
     String8 keyCodeToken = mTokenizer->nextToken(WHITESPACE);
-    int32_t keyCode = InputEventLookup::getKeyCodeByLabel(keyCodeToken.c_str());
+    std::optional<int> keyCode = InputEventLookup::getKeyCodeByLabel(keyCodeToken.c_str());
     if (!keyCode) {
         ALOGE("%s: Expected key code label, got '%s'.", mTokenizer->getLocation().c_str(),
               keyCodeToken.c_str());
@@ -1011,21 +918,21 @@
     ALOGD("Parsed map key %s: code=%d, keyCode=%d.",
             mapUsage ? "usage" : "scan code", code, keyCode);
 #endif
-    map.add(code, keyCode);
+    map.insert_or_assign(code, *keyCode);
     return NO_ERROR;
 }
 
 status_t KeyCharacterMap::Parser::parseKey() {
     String8 keyCodeToken = mTokenizer->nextToken(WHITESPACE);
-    int32_t keyCode = InputEventLookup::getKeyCodeByLabel(keyCodeToken.c_str());
+    std::optional<int> keyCode = InputEventLookup::getKeyCodeByLabel(keyCodeToken.c_str());
     if (!keyCode) {
         ALOGE("%s: Expected key code label, got '%s'.", mTokenizer->getLocation().c_str(),
               keyCodeToken.c_str());
         return BAD_VALUE;
     }
-    if (mMap->mKeys.indexOfKey(keyCode) >= 0) {
+    if (mMap->mKeys.find(*keyCode) != mMap->mKeys.end()) {
         ALOGE("%s: Duplicate entry for key code '%s'.", mTokenizer->getLocation().c_str(),
-              keyCodeToken.c_str());
+                keyCodeToken.c_str());
         return BAD_VALUE;
     }
 
@@ -1037,31 +944,29 @@
         return BAD_VALUE;
     }
 
-#if DEBUG_PARSER
-    ALOGD("Parsed beginning of key: keyCode=%d.", keyCode);
-#endif
-    mKeyCode = keyCode;
-    mMap->mKeys.add(keyCode, new Key());
+    ALOGD_IF(DEBUG_PARSER, "Parsed beginning of key: keyCode=%d.", *keyCode);
+    mKeyCode = *keyCode;
+    mMap->mKeys.emplace(*keyCode, Key{});
     mState = STATE_KEY;
     return NO_ERROR;
 }
 
 status_t KeyCharacterMap::Parser::parseKeyProperty() {
-    Key* key = mMap->mKeys.valueFor(mKeyCode);
+    Key& key = mMap->mKeys[mKeyCode];
     String8 token = mTokenizer->nextToken(WHITESPACE_OR_PROPERTY_DELIMITER);
     if (token == "}") {
         mState = STATE_TOP;
         return finishKey(key);
     }
 
-    Vector<Property> properties;
+    std::vector<Property> properties;
 
     // Parse all comma-delimited property names up to the first colon.
     for (;;) {
         if (token == "label") {
-            properties.add(Property(PROPERTY_LABEL));
+            properties.emplace_back(PROPERTY_LABEL);
         } else if (token == "number") {
-            properties.add(Property(PROPERTY_NUMBER));
+            properties.emplace_back(PROPERTY_NUMBER);
         } else {
             int32_t metaState;
             status_t status = parseModifier(token.c_str(), &metaState);
@@ -1070,7 +975,7 @@
                       mTokenizer->getLocation().c_str(), token.c_str());
                 return status;
             }
-            properties.add(Property(PROPERTY_META, metaState));
+            properties.emplace_back(PROPERTY_META, metaState);
         }
 
         mTokenizer->skipDelimiters(WHITESPACE);
@@ -1135,7 +1040,7 @@
             } else if (token == "fallback") {
                 mTokenizer->skipDelimiters(WHITESPACE);
                 token = mTokenizer->nextToken(WHITESPACE);
-                int32_t keyCode = InputEventLookup::getKeyCodeByLabel(token.c_str());
+                std::optional<int> keyCode = InputEventLookup::getKeyCodeByLabel(token.c_str());
                 if (!keyCode) {
                     ALOGE("%s: Invalid key code label for fallback behavior, got '%s'.",
                           mTokenizer->getLocation().c_str(), token.c_str());
@@ -1146,12 +1051,12 @@
                           mTokenizer->getLocation().c_str());
                     return BAD_VALUE;
                 }
-                behavior.fallbackKeyCode = keyCode;
+                behavior.fallbackKeyCode = *keyCode;
                 haveFallback = true;
             } else if (token == "replace") {
                 mTokenizer->skipDelimiters(WHITESPACE);
                 token = mTokenizer->nextToken(WHITESPACE);
-                int32_t keyCode = InputEventLookup::getKeyCodeByLabel(token.c_str());
+                std::optional<int> keyCode = InputEventLookup::getKeyCodeByLabel(token.c_str());
                 if (!keyCode) {
                     ALOGE("%s: Invalid key code label for replace, got '%s'.",
                           mTokenizer->getLocation().c_str(), token.c_str());
@@ -1167,7 +1072,7 @@
                           mTokenizer->getLocation().c_str());
                     return BAD_VALUE;
                 }
-                behavior.replacementKeyCode = keyCode;
+                behavior.replacementKeyCode = *keyCode;
                 haveReplacement = true;
 
             } else {
@@ -1180,47 +1085,44 @@
     } while (!mTokenizer->isEol() && mTokenizer->peekChar() != '#');
 
     // Add the behavior.
-    for (size_t i = 0; i < properties.size(); i++) {
-        const Property& property = properties.itemAt(i);
+    for (const Property& property : properties) {
         switch (property.property) {
         case PROPERTY_LABEL:
-            if (key->label) {
-                ALOGE("%s: Duplicate label for key.", mTokenizer->getLocation().c_str());
-                return BAD_VALUE;
-            }
-            key->label = behavior.character;
+                if (key.label) {
+                    ALOGE("%s: Duplicate label for key.", mTokenizer->getLocation().c_str());
+                    return BAD_VALUE;
+                }
+                key.label = behavior.character;
 #if DEBUG_PARSER
-            ALOGD("Parsed key label: keyCode=%d, label=%d.", mKeyCode, key->label);
+                ALOGD("Parsed key label: keyCode=%d, label=%d.", mKeyCode, key.label);
 #endif
             break;
         case PROPERTY_NUMBER:
-            if (key->number) {
-                ALOGE("%s: Duplicate number for key.", mTokenizer->getLocation().c_str());
-                return BAD_VALUE;
+            if (key.number) {
+                    ALOGE("%s: Duplicate number for key.", mTokenizer->getLocation().c_str());
+                    return BAD_VALUE;
             }
-            key->number = behavior.character;
+            key.number = behavior.character;
 #if DEBUG_PARSER
-            ALOGD("Parsed key number: keyCode=%d, number=%d.", mKeyCode, key->number);
+            ALOGD("Parsed key number: keyCode=%d, number=%d.", mKeyCode, key.number);
 #endif
             break;
         case PROPERTY_META: {
-            for (Behavior* b = key->firstBehavior; b; b = b->next) {
-                if (b->metaState == property.metaState) {
+            for (const Behavior& b : key.behaviors) {
+                    if (b.metaState == property.metaState) {
                     ALOGE("%s: Duplicate key behavior for modifier.",
                           mTokenizer->getLocation().c_str());
                     return BAD_VALUE;
-                }
+                    }
             }
-            Behavior* newBehavior = new Behavior(behavior);
-            newBehavior->metaState = property.metaState;
-            newBehavior->next = key->firstBehavior;
-            key->firstBehavior = newBehavior;
-#if DEBUG_PARSER
-            ALOGD("Parsed key meta: keyCode=%d, meta=0x%x, char=%d, fallback=%d replace=%d.",
-                    mKeyCode,
-                    newBehavior->metaState, newBehavior->character,
-                    newBehavior->fallbackKeyCode, newBehavior->replacementKeyCode);
-#endif
+            Behavior newBehavior = behavior;
+            newBehavior.metaState = property.metaState;
+            key.behaviors.push_front(newBehavior);
+            ALOGD_IF(DEBUG_PARSER,
+                     "Parsed key meta: keyCode=%d, meta=0x%x, char=%d, fallback=%d replace=%d.",
+                     mKeyCode, key.behaviors.front().metaState, key.behaviors.front().character,
+                     key.behaviors.front().fallbackKeyCode,
+                     key.behaviors.front().replacementKeyCode);
             break;
         }
         }
@@ -1228,13 +1130,13 @@
     return NO_ERROR;
 }
 
-status_t KeyCharacterMap::Parser::finishKey(Key* key) {
+status_t KeyCharacterMap::Parser::finishKey(Key& key) {
     // Fill in default number property.
-    if (!key->number) {
+    if (!key.number) {
         char16_t digit = 0;
         char16_t symbol = 0;
-        for (Behavior* b = key->firstBehavior; b; b = b->next) {
-            char16_t ch = b->character;
+        for (const Behavior& b : key.behaviors) {
+            char16_t ch = b.character;
             if (ch) {
                 if (ch >= '0' && ch <= '9') {
                     digit = ch;
@@ -1245,7 +1147,7 @@
                 }
             }
         }
-        key->number = digit ? digit : symbol;
+        key.number = digit ? digit : symbol;
     }
     return NO_ERROR;
 }
diff --git a/libs/input/KeyLayoutMap.cpp b/libs/input/KeyLayoutMap.cpp
index 79b6cea..ddc9ea4 100644
--- a/libs/input/KeyLayoutMap.cpp
+++ b/libs/input/KeyLayoutMap.cpp
@@ -16,6 +16,7 @@
 
 #define LOG_TAG "KeyLayoutMap"
 
+#include <android-base/logging.h>
 #include <android/keycodes.h>
 #include <ftl/enum.h>
 #include <input/InputEventLabels.h>
@@ -54,6 +55,21 @@
 namespace android {
 namespace {
 
+std::optional<int> parseInt(const char* str) {
+    char* end;
+    errno = 0;
+    const int value = strtol(str, &end, 0);
+    if (end == str) {
+        LOG(ERROR) << "Could not parse " << str;
+        return {};
+    }
+    if (errno == ERANGE) {
+        LOG(ERROR) << "Out of bounds: " << str;
+        return {};
+    }
+    return value;
+}
+
 constexpr const char* WHITESPACE = " \t\r";
 
 template <InputDeviceSensorType S>
@@ -192,7 +208,8 @@
 }
 
 // Return pair of sensor type and sensor data index, for the input device abs code
-base::Result<std::pair<InputDeviceSensorType, int32_t>> KeyLayoutMap::mapSensor(int32_t absCode) {
+base::Result<std::pair<InputDeviceSensorType, int32_t>> KeyLayoutMap::mapSensor(
+        int32_t absCode) const {
     auto it = mSensorsByAbsCode.find(absCode);
     if (it == mSensorsByAbsCode.end()) {
         ALOGD_IF(DEBUG_MAPPING, "mapSensor: absCode=%d, ~ Failed.", absCode);
@@ -344,24 +361,23 @@
         codeToken = mTokenizer->nextToken(WHITESPACE);
     }
 
-    char* end;
-    int32_t code = int32_t(strtol(codeToken.c_str(), &end, 0));
-    if (*end) {
+    std::optional<int> code = parseInt(codeToken.c_str());
+    if (!code) {
         ALOGE("%s: Expected key %s number, got '%s'.", mTokenizer->getLocation().c_str(),
-              mapUsage ? "usage" : "scan code", codeToken.c_str());
+                mapUsage ? "usage" : "scan code", codeToken.c_str());
         return BAD_VALUE;
     }
     std::unordered_map<int32_t, Key>& map =
             mapUsage ? mMap->mKeysByUsageCode : mMap->mKeysByScanCode;
-    if (map.find(code) != map.end()) {
+    if (map.find(*code) != map.end()) {
         ALOGE("%s: Duplicate entry for key %s '%s'.", mTokenizer->getLocation().c_str(),
-              mapUsage ? "usage" : "scan code", codeToken.c_str());
+                mapUsage ? "usage" : "scan code", codeToken.c_str());
         return BAD_VALUE;
     }
 
     mTokenizer->skipDelimiters(WHITESPACE);
     String8 keyCodeToken = mTokenizer->nextToken(WHITESPACE);
-    int32_t keyCode = InputEventLookup::getKeyCodeByLabel(keyCodeToken.c_str());
+    std::optional<int> keyCode = InputEventLookup::getKeyCodeByLabel(keyCodeToken.c_str());
     if (!keyCode) {
         ALOGE("%s: Expected key code label, got '%s'.", mTokenizer->getLocation().c_str(),
               keyCodeToken.c_str());
@@ -374,42 +390,41 @@
         if (mTokenizer->isEol() || mTokenizer->peekChar() == '#') break;
 
         String8 flagToken = mTokenizer->nextToken(WHITESPACE);
-        uint32_t flag = InputEventLookup::getKeyFlagByLabel(flagToken.c_str());
+        std::optional<int> flag = InputEventLookup::getKeyFlagByLabel(flagToken.c_str());
         if (!flag) {
             ALOGE("%s: Expected key flag label, got '%s'.", mTokenizer->getLocation().c_str(),
                   flagToken.c_str());
             return BAD_VALUE;
         }
-        if (flags & flag) {
+        if (flags & *flag) {
             ALOGE("%s: Duplicate key flag '%s'.", mTokenizer->getLocation().c_str(),
-                  flagToken.c_str());
+                    flagToken.c_str());
             return BAD_VALUE;
         }
-        flags |= flag;
+        flags |= *flag;
     }
 
     ALOGD_IF(DEBUG_PARSER, "Parsed key %s: code=%d, keyCode=%d, flags=0x%08x.",
-             mapUsage ? "usage" : "scan code", code, keyCode, flags);
+             mapUsage ? "usage" : "scan code", *code, *keyCode, flags);
 
     Key key;
-    key.keyCode = keyCode;
+    key.keyCode = *keyCode;
     key.flags = flags;
-    map.insert({code, key});
+    map.insert({*code, key});
     return NO_ERROR;
 }
 
 status_t KeyLayoutMap::Parser::parseAxis() {
     String8 scanCodeToken = mTokenizer->nextToken(WHITESPACE);
-    char* end;
-    int32_t scanCode = int32_t(strtol(scanCodeToken.c_str(), &end, 0));
-    if (*end) {
+    std::optional<int> scanCode = parseInt(scanCodeToken.c_str());
+    if (!scanCode) {
         ALOGE("%s: Expected axis scan code number, got '%s'.", mTokenizer->getLocation().c_str(),
-              scanCodeToken.c_str());
+                scanCodeToken.c_str());
         return BAD_VALUE;
     }
-    if (mMap->mAxes.find(scanCode) != mMap->mAxes.end()) {
+    if (mMap->mAxes.find(*scanCode) != mMap->mAxes.end()) {
         ALOGE("%s: Duplicate entry for axis scan code '%s'.", mTokenizer->getLocation().c_str(),
-              scanCodeToken.c_str());
+                scanCodeToken.c_str());
         return BAD_VALUE;
     }
 
@@ -422,48 +437,53 @@
 
         mTokenizer->skipDelimiters(WHITESPACE);
         String8 axisToken = mTokenizer->nextToken(WHITESPACE);
-        axisInfo.axis = InputEventLookup::getAxisByLabel(axisToken.c_str());
-        if (axisInfo.axis < 0) {
-            ALOGE("%s: Expected inverted axis label, got '%s'.", mTokenizer->getLocation().c_str(),
-                  axisToken.c_str());
+        std::optional<int> axis = InputEventLookup::getAxisByLabel(axisToken.c_str());
+        if (!axis) {
+            ALOGE("%s: Expected inverted axis label, got '%s'.",
+                    mTokenizer->getLocation().c_str(), axisToken.c_str());
             return BAD_VALUE;
         }
+        axisInfo.axis = *axis;
     } else if (token == "split") {
         axisInfo.mode = AxisInfo::MODE_SPLIT;
 
         mTokenizer->skipDelimiters(WHITESPACE);
         String8 splitToken = mTokenizer->nextToken(WHITESPACE);
-        axisInfo.splitValue = int32_t(strtol(splitToken.c_str(), &end, 0));
-        if (*end) {
-            ALOGE("%s: Expected split value, got '%s'.", mTokenizer->getLocation().c_str(),
-                  splitToken.c_str());
+        std::optional<int> splitValue = parseInt(splitToken.c_str());
+        if (!splitValue) {
+            ALOGE("%s: Expected split value, got '%s'.",
+                    mTokenizer->getLocation().c_str(), splitToken.c_str());
             return BAD_VALUE;
         }
+        axisInfo.splitValue = *splitValue;
 
         mTokenizer->skipDelimiters(WHITESPACE);
         String8 lowAxisToken = mTokenizer->nextToken(WHITESPACE);
-        axisInfo.axis = InputEventLookup::getAxisByLabel(lowAxisToken.c_str());
-        if (axisInfo.axis < 0) {
-            ALOGE("%s: Expected low axis label, got '%s'.", mTokenizer->getLocation().c_str(),
-                  lowAxisToken.c_str());
+        std::optional<int> axis = InputEventLookup::getAxisByLabel(lowAxisToken.c_str());
+        if (!axis) {
+            ALOGE("%s: Expected low axis label, got '%s'.",
+                    mTokenizer->getLocation().c_str(), lowAxisToken.c_str());
             return BAD_VALUE;
         }
+        axisInfo.axis = *axis;
 
         mTokenizer->skipDelimiters(WHITESPACE);
         String8 highAxisToken = mTokenizer->nextToken(WHITESPACE);
-        axisInfo.highAxis = InputEventLookup::getAxisByLabel(highAxisToken.c_str());
-        if (axisInfo.highAxis < 0) {
-            ALOGE("%s: Expected high axis label, got '%s'.", mTokenizer->getLocation().c_str(),
-                  highAxisToken.c_str());
+        std::optional<int> highAxis = InputEventLookup::getAxisByLabel(highAxisToken.c_str());
+        if (!highAxis) {
+            ALOGE("%s: Expected high axis label, got '%s'.",
+                    mTokenizer->getLocation().c_str(), highAxisToken.c_str());
             return BAD_VALUE;
         }
+        axisInfo.highAxis = *highAxis;
     } else {
-        axisInfo.axis = InputEventLookup::getAxisByLabel(token.c_str());
-        if (axisInfo.axis < 0) {
+        std::optional<int> axis = InputEventLookup::getAxisByLabel(token.c_str());
+        if (!axis) {
             ALOGE("%s: Expected axis label, 'split' or 'invert', got '%s'.",
                   mTokenizer->getLocation().c_str(), token.c_str());
             return BAD_VALUE;
         }
+        axisInfo.axis = *axis;
     }
 
     for (;;) {
@@ -475,12 +495,13 @@
         if (keywordToken == "flat") {
             mTokenizer->skipDelimiters(WHITESPACE);
             String8 flatToken = mTokenizer->nextToken(WHITESPACE);
-            axisInfo.flatOverride = int32_t(strtol(flatToken.c_str(), &end, 0));
-            if (*end) {
-                ALOGE("%s: Expected flat value, got '%s'.", mTokenizer->getLocation().c_str(),
-                      flatToken.c_str());
+            std::optional<int> flatOverride = parseInt(flatToken.c_str());
+            if (!flatOverride) {
+                ALOGE("%s: Expected flat value, got '%s'.",
+                        mTokenizer->getLocation().c_str(), flatToken.c_str());
                 return BAD_VALUE;
             }
+            axisInfo.flatOverride = *flatOverride;
         } else {
             ALOGE("%s: Expected keyword 'flat', got '%s'.", mTokenizer->getLocation().c_str(),
                   keywordToken.c_str());
@@ -491,9 +512,9 @@
     ALOGD_IF(DEBUG_PARSER,
              "Parsed axis: scanCode=%d, mode=%d, axis=%d, highAxis=%d, "
              "splitValue=%d, flatOverride=%d.",
-             scanCode, axisInfo.mode, axisInfo.axis, axisInfo.highAxis, axisInfo.splitValue,
+             *scanCode, axisInfo.mode, axisInfo.axis, axisInfo.highAxis, axisInfo.splitValue,
              axisInfo.flatOverride);
-    mMap->mAxes.insert({scanCode, axisInfo});
+    mMap->mAxes.insert({*scanCode, axisInfo});
     return NO_ERROR;
 }
 
@@ -505,37 +526,36 @@
         mTokenizer->skipDelimiters(WHITESPACE);
         codeToken = mTokenizer->nextToken(WHITESPACE);
     }
-    char* end;
-    int32_t code = int32_t(strtol(codeToken.c_str(), &end, 0));
-    if (*end) {
+    std::optional<int> code = parseInt(codeToken.c_str());
+    if (!code) {
         ALOGE("%s: Expected led %s number, got '%s'.", mTokenizer->getLocation().c_str(),
-              mapUsage ? "usage" : "scan code", codeToken.c_str());
+                mapUsage ? "usage" : "scan code", codeToken.c_str());
         return BAD_VALUE;
     }
 
     std::unordered_map<int32_t, Led>& map =
             mapUsage ? mMap->mLedsByUsageCode : mMap->mLedsByScanCode;
-    if (map.find(code) != map.end()) {
+    if (map.find(*code) != map.end()) {
         ALOGE("%s: Duplicate entry for led %s '%s'.", mTokenizer->getLocation().c_str(),
-              mapUsage ? "usage" : "scan code", codeToken.c_str());
+                mapUsage ? "usage" : "scan code", codeToken.c_str());
         return BAD_VALUE;
     }
 
     mTokenizer->skipDelimiters(WHITESPACE);
     String8 ledCodeToken = mTokenizer->nextToken(WHITESPACE);
-    int32_t ledCode = InputEventLookup::getLedByLabel(ledCodeToken.c_str());
-    if (ledCode < 0) {
+    std::optional<int> ledCode = InputEventLookup::getLedByLabel(ledCodeToken.c_str());
+    if (!ledCode) {
         ALOGE("%s: Expected LED code label, got '%s'.", mTokenizer->getLocation().c_str(),
-              ledCodeToken.c_str());
+                ledCodeToken.c_str());
         return BAD_VALUE;
     }
 
     ALOGD_IF(DEBUG_PARSER, "Parsed led %s: code=%d, ledCode=%d.", mapUsage ? "usage" : "scan code",
-             code, ledCode);
+             *code, *ledCode);
 
     Led led;
-    led.ledCode = ledCode;
-    map.insert({code, led});
+    led.ledCode = *ledCode;
+    map.insert({*code, led});
     return NO_ERROR;
 }
 
@@ -573,16 +593,15 @@
 // sensor 0x05 GYROSCOPE Z
 status_t KeyLayoutMap::Parser::parseSensor() {
     String8 codeToken = mTokenizer->nextToken(WHITESPACE);
-    char* end;
-    int32_t code = int32_t(strtol(codeToken.c_str(), &end, 0));
-    if (*end) {
+    std::optional<int> code = parseInt(codeToken.c_str());
+    if (!code) {
         ALOGE("%s: Expected sensor %s number, got '%s'.", mTokenizer->getLocation().c_str(),
               "abs code", codeToken.c_str());
         return BAD_VALUE;
     }
 
     std::unordered_map<int32_t, Sensor>& map = mMap->mSensorsByAbsCode;
-    if (map.find(code) != map.end()) {
+    if (map.find(*code) != map.end()) {
         ALOGE("%s: Duplicate entry for sensor %s '%s'.", mTokenizer->getLocation().c_str(),
               "abs code", codeToken.c_str());
         return BAD_VALUE;
@@ -607,13 +626,13 @@
     }
     int32_t sensorDataIndex = indexOpt.value();
 
-    ALOGD_IF(DEBUG_PARSER, "Parsed sensor: abs code=%d, sensorType=%s, sensorDataIndex=%d.", code,
+    ALOGD_IF(DEBUG_PARSER, "Parsed sensor: abs code=%d, sensorType=%s, sensorDataIndex=%d.", *code,
              ftl::enum_string(sensorType).c_str(), sensorDataIndex);
 
     Sensor sensor;
     sensor.sensorType = sensorType;
     sensor.sensorDataIndex = sensorDataIndex;
-    map.emplace(code, sensor);
+    map.emplace(*code, sensor);
     return NO_ERROR;
 }
 
diff --git a/libs/input/Keyboard.cpp b/libs/input/Keyboard.cpp
index 5f06efa..0b5c7ff 100644
--- a/libs/input/Keyboard.cpp
+++ b/libs/input/Keyboard.cpp
@@ -16,9 +16,10 @@
 
 #define LOG_TAG "Keyboard"
 
+#include <limits.h>
 #include <stdlib.h>
 #include <unistd.h>
-#include <limits.h>
+#include <optional>
 
 #include <input/InputDevice.h>
 #include <input/InputEventLabels.h>
@@ -49,25 +50,25 @@
         const PropertyMap* deviceConfiguration) {
     // Use the configured key layout if available.
     if (deviceConfiguration) {
-        String8 keyLayoutName;
-        if (deviceConfiguration->tryGetProperty(String8("keyboard.layout"),
-                keyLayoutName)) {
-            status_t status = loadKeyLayout(deviceIdentifier, keyLayoutName.c_str());
+        std::optional<std::string> keyLayoutName =
+                deviceConfiguration->getString("keyboard.layout");
+        if (keyLayoutName.has_value()) {
+            status_t status = loadKeyLayout(deviceIdentifier, *keyLayoutName);
             if (status == NAME_NOT_FOUND) {
                 ALOGE("Configuration for keyboard device '%s' requested keyboard layout '%s' but "
                       "it was not found.",
-                      deviceIdentifier.name.c_str(), keyLayoutName.c_str());
+                      deviceIdentifier.name.c_str(), keyLayoutName->c_str());
             }
         }
 
-        String8 keyCharacterMapName;
-        if (deviceConfiguration->tryGetProperty(String8("keyboard.characterMap"),
-                keyCharacterMapName)) {
-            status_t status = loadKeyCharacterMap(deviceIdentifier, keyCharacterMapName.c_str());
+        std::optional<std::string> keyCharacterMapName =
+                deviceConfiguration->getString("keyboard.characterMap");
+        if (keyCharacterMapName.has_value()) {
+            status_t status = loadKeyCharacterMap(deviceIdentifier, *keyCharacterMapName);
             if (status == NAME_NOT_FOUND) {
                 ALOGE("Configuration for keyboard device '%s' requested keyboard character "
                       "map '%s' but it was not found.",
-                      deviceIdentifier.name.c_str(), keyCharacterMapName.c_str());
+                      deviceIdentifier.name.c_str(), keyCharacterMapName->c_str());
             }
         }
 
@@ -164,9 +165,7 @@
     if (config == nullptr) {
         return false;
     }
-    bool isSpecialFunction = false;
-    config->tryGetProperty(String8("keyboard.specialFunction"), isSpecialFunction);
-    return isSpecialFunction;
+    return config->getBool("keyboard.specialFunction").value_or(false);
 }
 
 bool isEligibleBuiltInKeyboard(const InputDeviceIdentifier& deviceIdentifier,
@@ -179,9 +178,7 @@
     }
 
     if (deviceConfiguration) {
-        bool builtIn = false;
-        if (deviceConfiguration->tryGetProperty(String8("keyboard.builtIn"), builtIn)
-                && builtIn) {
+        if (deviceConfiguration->getBool("keyboard.builtIn").value_or(false)) {
             return true;
         }
     }
diff --git a/libs/input/MotionPredictor.cpp b/libs/input/MotionPredictor.cpp
new file mode 100644
index 0000000..3037573
--- /dev/null
+++ b/libs/input/MotionPredictor.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MotionPredictor"
+
+#include <input/MotionPredictor.h>
+
+#include <cinttypes>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include <android-base/strings.h>
+#include <android/input.h>
+#include <log/log.h>
+
+#include <attestation/HmacKeyManager.h>
+#include <ftl/enum.h>
+#include <input/TfLiteMotionPredictor.h>
+
+namespace android {
+namespace {
+
+const int64_t PREDICTION_INTERVAL_NANOS =
+        12500000 / 3; // TODO(b/266747937): Get this from the model.
+
+/**
+ * Log debug messages about predictions.
+ * Enable this via "adb shell setprop log.tag.MotionPredictor DEBUG"
+ */
+bool isDebug() {
+    return __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG, ANDROID_LOG_INFO);
+}
+
+// Converts a prediction of some polar (r, phi) to Cartesian (x, y) when applied to an axis.
+TfLiteMotionPredictorSample::Point convertPrediction(
+        const TfLiteMotionPredictorSample::Point& axisFrom,
+        const TfLiteMotionPredictorSample::Point& axisTo, float r, float phi) {
+    const TfLiteMotionPredictorSample::Point axis = axisTo - axisFrom;
+    const float axis_phi = std::atan2(axis.y, axis.x);
+    const float x_delta = r * std::cos(axis_phi + phi);
+    const float y_delta = r * std::sin(axis_phi + phi);
+    return {.x = axisTo.x + x_delta, .y = axisTo.y + y_delta};
+}
+
+} // namespace
+
+// --- MotionPredictor ---
+
+MotionPredictor::MotionPredictor(nsecs_t predictionTimestampOffsetNanos,
+                                 std::function<bool()> checkMotionPredictionEnabled)
+      : mPredictionTimestampOffsetNanos(predictionTimestampOffsetNanos),
+        mCheckMotionPredictionEnabled(std::move(checkMotionPredictionEnabled)) {}
+
+android::base::Result<void> MotionPredictor::record(const MotionEvent& event) {
+    if (mLastEvent && mLastEvent->getDeviceId() != event.getDeviceId()) {
+        // We still have an active gesture for another device. The provided MotionEvent is not
+        // consistent the previous gesture.
+        LOG(ERROR) << "Inconsistent event stream: last event is " << *mLastEvent << ", but "
+                   << __func__ << " is called with " << event;
+        return android::base::Error()
+                << "Inconsistent event stream: still have an active gesture from device "
+                << mLastEvent->getDeviceId() << ", but received " << event;
+    }
+    if (!isPredictionAvailable(event.getDeviceId(), event.getSource())) {
+        ALOGE("Prediction not supported for device %d's %s source", event.getDeviceId(),
+              inputEventSourceToString(event.getSource()).c_str());
+        return {};
+    }
+
+    // Initialise the model now that it's likely to be used.
+    if (!mModel) {
+        mModel = TfLiteMotionPredictorModel::create();
+    }
+
+    if (mBuffers == nullptr) {
+        mBuffers = std::make_unique<TfLiteMotionPredictorBuffers>(mModel->inputLength());
+    }
+
+    const int32_t action = event.getActionMasked();
+    if (action == AMOTION_EVENT_ACTION_UP || action == AMOTION_EVENT_ACTION_CANCEL) {
+        ALOGD_IF(isDebug(), "End of event stream");
+        mBuffers->reset();
+        mLastEvent.reset();
+        return {};
+    } else if (action != AMOTION_EVENT_ACTION_DOWN && action != AMOTION_EVENT_ACTION_MOVE) {
+        ALOGD_IF(isDebug(), "Skipping unsupported %s action",
+                 MotionEvent::actionToString(action).c_str());
+        return {};
+    }
+
+    if (event.getPointerCount() != 1) {
+        ALOGD_IF(isDebug(), "Prediction not supported for multiple pointers");
+        return {};
+    }
+
+    const ToolType toolType = event.getPointerProperties(0)->toolType;
+    if (toolType != ToolType::STYLUS) {
+        ALOGD_IF(isDebug(), "Prediction not supported for non-stylus tool: %s",
+                 ftl::enum_string(toolType).c_str());
+        return {};
+    }
+
+    for (size_t i = 0; i <= event.getHistorySize(); ++i) {
+        if (event.isResampled(0, i)) {
+            continue;
+        }
+        const PointerCoords* coords = event.getHistoricalRawPointerCoords(0, i);
+        mBuffers->pushSample(event.getHistoricalEventTime(i),
+                             {
+                                     .position.x = coords->getAxisValue(AMOTION_EVENT_AXIS_X),
+                                     .position.y = coords->getAxisValue(AMOTION_EVENT_AXIS_Y),
+                                     .pressure = event.getHistoricalPressure(0, i),
+                                     .tilt = event.getHistoricalAxisValue(AMOTION_EVENT_AXIS_TILT,
+                                                                          0, i),
+                                     .orientation = event.getHistoricalOrientation(0, i),
+                             });
+    }
+
+    if (!mLastEvent) {
+        mLastEvent = MotionEvent();
+    }
+    mLastEvent->copyFrom(&event, /*keepHistory=*/false);
+    return {};
+}
+
+std::unique_ptr<MotionEvent> MotionPredictor::predict(nsecs_t timestamp) {
+    if (mBuffers == nullptr || !mBuffers->isReady()) {
+        return nullptr;
+    }
+
+    LOG_ALWAYS_FATAL_IF(!mModel);
+    mBuffers->copyTo(*mModel);
+    LOG_ALWAYS_FATAL_IF(!mModel->invoke());
+
+    // Read out the predictions.
+    const std::span<const float> predictedR = mModel->outputR();
+    const std::span<const float> predictedPhi = mModel->outputPhi();
+    const std::span<const float> predictedPressure = mModel->outputPressure();
+
+    TfLiteMotionPredictorSample::Point axisFrom = mBuffers->axisFrom().position;
+    TfLiteMotionPredictorSample::Point axisTo = mBuffers->axisTo().position;
+
+    if (isDebug()) {
+        ALOGD("axisFrom: %f, %f", axisFrom.x, axisFrom.y);
+        ALOGD("axisTo: %f, %f", axisTo.x, axisTo.y);
+        ALOGD("mInputR: %s", base::Join(mModel->inputR(), ", ").c_str());
+        ALOGD("mInputPhi: %s", base::Join(mModel->inputPhi(), ", ").c_str());
+        ALOGD("mInputPressure: %s", base::Join(mModel->inputPressure(), ", ").c_str());
+        ALOGD("mInputTilt: %s", base::Join(mModel->inputTilt(), ", ").c_str());
+        ALOGD("mInputOrientation: %s", base::Join(mModel->inputOrientation(), ", ").c_str());
+        ALOGD("predictedR: %s", base::Join(predictedR, ", ").c_str());
+        ALOGD("predictedPhi: %s", base::Join(predictedPhi, ", ").c_str());
+        ALOGD("predictedPressure: %s", base::Join(predictedPressure, ", ").c_str());
+    }
+
+    LOG_ALWAYS_FATAL_IF(!mLastEvent);
+    const MotionEvent& event = *mLastEvent;
+    bool hasPredictions = false;
+    std::unique_ptr<MotionEvent> prediction = std::make_unique<MotionEvent>();
+    int64_t predictionTime = mBuffers->lastTimestamp();
+    const int64_t futureTime = timestamp + mPredictionTimestampOffsetNanos;
+
+    for (int i = 0; i < predictedR.size() && predictionTime <= futureTime; ++i) {
+        const TfLiteMotionPredictorSample::Point point =
+                convertPrediction(axisFrom, axisTo, predictedR[i], predictedPhi[i]);
+        // TODO(b/266747654): Stop predictions if confidence is < some threshold.
+
+        ALOGD_IF(isDebug(), "prediction %d: %f, %f", i, point.x, point.y);
+        PointerCoords coords;
+        coords.clear();
+        coords.setAxisValue(AMOTION_EVENT_AXIS_X, point.x);
+        coords.setAxisValue(AMOTION_EVENT_AXIS_Y, point.y);
+        // TODO(b/266747654): Stop predictions if predicted pressure is < some threshold.
+        coords.setAxisValue(AMOTION_EVENT_AXIS_PRESSURE, predictedPressure[i]);
+
+        predictionTime += PREDICTION_INTERVAL_NANOS;
+        if (i == 0) {
+            hasPredictions = true;
+            prediction->initialize(InputEvent::nextId(), event.getDeviceId(), event.getSource(),
+                                   event.getDisplayId(), INVALID_HMAC, AMOTION_EVENT_ACTION_MOVE,
+                                   event.getActionButton(), event.getFlags(), event.getEdgeFlags(),
+                                   event.getMetaState(), event.getButtonState(),
+                                   event.getClassification(), event.getTransform(),
+                                   event.getXPrecision(), event.getYPrecision(),
+                                   event.getRawXCursorPosition(), event.getRawYCursorPosition(),
+                                   event.getRawTransform(), event.getDownTime(), predictionTime,
+                                   event.getPointerCount(), event.getPointerProperties(), &coords);
+        } else {
+            prediction->addSample(predictionTime, &coords);
+        }
+
+        axisFrom = axisTo;
+        axisTo = point;
+    }
+    // TODO(b/266747511): Interpolate to futureTime?
+    if (!hasPredictions) {
+        return nullptr;
+    }
+    return prediction;
+}
+
+bool MotionPredictor::isPredictionAvailable(int32_t /*deviceId*/, int32_t source) {
+    // Global flag override
+    if (!mCheckMotionPredictionEnabled()) {
+        ALOGD_IF(isDebug(), "Prediction not available due to flag override");
+        return false;
+    }
+
+    // Prediction is only supported for stylus sources.
+    if (!isFromSource(source, AINPUT_SOURCE_STYLUS)) {
+        ALOGD_IF(isDebug(), "Prediction not available for non-stylus source: %s",
+                 inputEventSourceToString(source).c_str());
+        return false;
+    }
+    return true;
+}
+
+} // namespace android
diff --git a/libs/input/PropertyMap.cpp b/libs/input/PropertyMap.cpp
index fc020ca..315f5a6 100644
--- a/libs/input/PropertyMap.cpp
+++ b/libs/input/PropertyMap.cpp
@@ -16,7 +16,10 @@
 
 #define LOG_TAG "PropertyMap"
 
+#include <cstdlib>
+
 #include <input/PropertyMap.h>
+#include <log/log.h>
 
 // Enables debug output for the parser.
 #define DEBUG_PARSER 0
@@ -39,71 +42,85 @@
     mProperties.clear();
 }
 
-void PropertyMap::addProperty(const String8& key, const String8& value) {
-    mProperties.add(key, value);
+void PropertyMap::addProperty(const std::string& key, const std::string& value) {
+    mProperties.emplace(key, value);
 }
 
-bool PropertyMap::hasProperty(const String8& key) const {
-    return mProperties.indexOfKey(key) >= 0;
-}
-
-bool PropertyMap::tryGetProperty(const String8& key, String8& outValue) const {
-    ssize_t index = mProperties.indexOfKey(key);
-    if (index < 0) {
-        return false;
+std::unordered_set<std::string> PropertyMap::getKeysWithPrefix(const std::string& prefix) const {
+    std::unordered_set<std::string> keys;
+    for (const auto& [key, _] : mProperties) {
+        if (key.starts_with(prefix)) {
+            keys.insert(key);
+        }
     }
-
-    outValue = mProperties.valueAt(index);
-    return true;
+    return keys;
 }
 
-bool PropertyMap::tryGetProperty(const String8& key, bool& outValue) const {
-    int32_t intValue;
-    if (!tryGetProperty(key, intValue)) {
-        return false;
-    }
-
-    outValue = intValue;
-    return true;
+bool PropertyMap::hasProperty(const std::string& key) const {
+    return mProperties.find(key) != mProperties.end();
 }
 
-bool PropertyMap::tryGetProperty(const String8& key, int32_t& outValue) const {
-    String8 stringValue;
-    if (!tryGetProperty(key, stringValue) || stringValue.length() == 0) {
-        return false;
+std::optional<std::string> PropertyMap::getString(const std::string& key) const {
+    auto it = mProperties.find(key);
+    return it != mProperties.end() ? std::make_optional(it->second) : std::nullopt;
+}
+
+std::optional<bool> PropertyMap::getBool(const std::string& key) const {
+    std::optional<int32_t> intValue = getInt(key);
+    return intValue.has_value() ? std::make_optional(*intValue != 0) : std::nullopt;
+}
+
+std::optional<int32_t> PropertyMap::getInt(const std::string& key) const {
+    std::optional<std::string> stringValue = getString(key);
+    if (!stringValue.has_value() || stringValue->length() == 0) {
+        return std::nullopt;
     }
 
     char* end;
-    int value = strtol(stringValue.c_str(), &end, 10);
+    int32_t value = static_cast<int32_t>(strtol(stringValue->c_str(), &end, 10));
     if (*end != '\0') {
         ALOGW("Property key '%s' has invalid value '%s'.  Expected an integer.", key.c_str(),
-              stringValue.c_str());
-        return false;
+              stringValue->c_str());
+        return std::nullopt;
     }
-    outValue = value;
-    return true;
+    return value;
 }
 
-bool PropertyMap::tryGetProperty(const String8& key, float& outValue) const {
-    String8 stringValue;
-    if (!tryGetProperty(key, stringValue) || stringValue.length() == 0) {
-        return false;
+std::optional<float> PropertyMap::getFloat(const std::string& key) const {
+    std::optional<std::string> stringValue = getString(key);
+    if (!stringValue.has_value() || stringValue->length() == 0) {
+        return std::nullopt;
     }
 
     char* end;
-    float value = strtof(stringValue.c_str(), &end);
+    float value = strtof(stringValue->c_str(), &end);
     if (*end != '\0') {
         ALOGW("Property key '%s' has invalid value '%s'.  Expected a float.", key.c_str(),
-              stringValue.c_str());
-        return false;
+              stringValue->c_str());
+        return std::nullopt;
     }
-    outValue = value;
-    return true;
+    return value;
+}
+
+std::optional<double> PropertyMap::getDouble(const std::string& key) const {
+    std::optional<std::string> stringValue = getString(key);
+    if (!stringValue.has_value() || stringValue->length() == 0) {
+        return std::nullopt;
+    }
+
+    char* end;
+    double value = strtod(stringValue->c_str(), &end);
+    if (*end != '\0') {
+        ALOGW("Property key '%s' has invalid value '%s'.  Expected a double.", key.c_str(),
+              stringValue->c_str());
+        return std::nullopt;
+    }
+    return value;
 }
 
 void PropertyMap::addAll(const PropertyMap* map) {
-    for (size_t i = 0; i < map->mProperties.size(); i++) {
-        mProperties.add(map->mProperties.keyAt(i), map->mProperties.valueAt(i));
+    for (const auto& [key, value] : map->mProperties) {
+        mProperties.emplace(key, value);
     }
 }
 
@@ -115,25 +132,24 @@
 
     Tokenizer* rawTokenizer;
     status_t status = Tokenizer::open(String8(filename), &rawTokenizer);
-    std::unique_ptr<Tokenizer> tokenizer(rawTokenizer);
     if (status) {
-        ALOGE("Error %d opening property file %s.", status, filename);
-    } else {
-#if DEBUG_PARSER_PERFORMANCE
-            nsecs_t startTime = systemTime(SYSTEM_TIME_MONOTONIC);
-#endif
-            Parser parser(outMap.get(), tokenizer.get());
-            status = parser.parse();
-#if DEBUG_PARSER_PERFORMANCE
-            nsecs_t elapsedTime = systemTime(SYSTEM_TIME_MONOTONIC) - startTime;
-            ALOGD("Parsed property file '%s' %d lines in %0.3fms.",
-                  tokenizer->getFilename().c_str(), tokenizer->getLineNumber(),
-                  elapsedTime / 1000000.0);
-#endif
-            if (status) {
-                return android::base::Error(BAD_VALUE) << "Could not parse " << filename;
-            }
+        return android::base::Error(-status) << "Could not open file: " << filename;
     }
+#if DEBUG_PARSER_PERFORMANCE
+    nsecs_t startTime = systemTime(SYSTEM_TIME_MONOTONIC);
+#endif
+    std::unique_ptr<Tokenizer> tokenizer(rawTokenizer);
+    Parser parser(outMap.get(), tokenizer.get());
+    status = parser.parse();
+#if DEBUG_PARSER_PERFORMANCE
+    nsecs_t elapsedTime = systemTime(SYSTEM_TIME_MONOTONIC) - startTime;
+    ALOGD("Parsed property file '%s' %d lines in %0.3fms.", tokenizer->getFilename().string(),
+          tokenizer->getLineNumber(), elapsedTime / 1000000.0);
+#endif
+    if (status) {
+        return android::base::Error(BAD_VALUE) << "Could not parse " << filename;
+    }
+
     return std::move(outMap);
 }
 
@@ -184,13 +200,13 @@
                 return BAD_VALUE;
             }
 
-            if (mMap->hasProperty(keyToken)) {
+            if (mMap->hasProperty(keyToken.string())) {
                 ALOGE("%s: Duplicate property value for key '%s'.",
                       mTokenizer->getLocation().c_str(), keyToken.c_str());
                 return BAD_VALUE;
             }
 
-            mMap->addProperty(keyToken, valueToken);
+            mMap->addProperty(keyToken.string(), valueToken.string());
         }
 
         mTokenizer->nextLine();
diff --git a/libs/input/PropertyMap_fuzz.cpp b/libs/input/PropertyMap_fuzz.cpp
old mode 100755
new mode 100644
index afb97a1..6299ca8
--- a/libs/input/PropertyMap_fuzz.cpp
+++ b/libs/input/PropertyMap_fuzz.cpp
@@ -17,32 +17,21 @@
 #include "android-base/file.h"
 #include "fuzzer/FuzzedDataProvider.h"
 #include "input/PropertyMap.h"
-#include "utils/String8.h"
 
 static constexpr int MAX_FILE_SIZE = 256;
 static constexpr int MAX_STR_LEN = 2048;
 static constexpr int MAX_OPERATIONS = 1000;
 
-static const std::vector<std::function<void(FuzzedDataProvider*, android::PropertyMap)>>
+static const std::vector<std::function<void(FuzzedDataProvider*, android::PropertyMap&)>>
         operations = {
-                [](FuzzedDataProvider*, android::PropertyMap propertyMap) -> void {
-                    propertyMap.getProperties();
-                },
-                [](FuzzedDataProvider*, android::PropertyMap propertyMap) -> void {
+                [](FuzzedDataProvider*, android::PropertyMap& propertyMap) -> void {
                     propertyMap.clear();
                 },
-                [](FuzzedDataProvider* dataProvider, android::PropertyMap propertyMap) -> void {
-                    std::string keyStr = dataProvider->ConsumeRandomLengthString(MAX_STR_LEN);
-                    android::String8 key = android::String8(keyStr.c_str());
-                    propertyMap.hasProperty(key);
+                [](FuzzedDataProvider* dataProvider, android::PropertyMap& propertyMap) -> void {
+                    std::string key = dataProvider->ConsumeRandomLengthString(MAX_STR_LEN);
+                    propertyMap.getString(key);
                 },
-                [](FuzzedDataProvider* dataProvider, android::PropertyMap propertyMap) -> void {
-                    std::string keyStr = dataProvider->ConsumeRandomLengthString(MAX_STR_LEN);
-                    android::String8 key = android::String8(keyStr.c_str());
-                    android::String8 out;
-                    propertyMap.tryGetProperty(key, out);
-                },
-                [](FuzzedDataProvider* dataProvider, android::PropertyMap /*unused*/) -> void {
+                [](FuzzedDataProvider* dataProvider, android::PropertyMap& /*unused*/) -> void {
                     TemporaryFile tf;
                     // Generate file contents
                     std::string contents = dataProvider->ConsumeRandomLengthString(MAX_FILE_SIZE);
@@ -54,17 +43,15 @@
                     }
                     android::PropertyMap::load(tf.path);
                 },
-                [](FuzzedDataProvider* dataProvider, android::PropertyMap propertyMap) -> void {
-                    std::string keyStr = dataProvider->ConsumeRandomLengthString(MAX_STR_LEN);
-                    std::string valStr = dataProvider->ConsumeRandomLengthString(MAX_STR_LEN);
-                    android::String8 key = android::String8(keyStr.c_str());
-                    android::String8 val = android::String8(valStr.c_str());
+                [](FuzzedDataProvider* dataProvider, android::PropertyMap& propertyMap) -> void {
+                    std::string key = dataProvider->ConsumeRandomLengthString(MAX_STR_LEN);
+                    std::string val = dataProvider->ConsumeRandomLengthString(MAX_STR_LEN);
                     propertyMap.addProperty(key, val);
                 },
 };
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
     FuzzedDataProvider dataProvider(data, size);
-    android::PropertyMap propertyMap = android::PropertyMap();
+    android::PropertyMap propertyMap;
 
     int opsRun = 0;
     while (dataProvider.remaining_bytes() > 0 && opsRun++ < MAX_OPERATIONS) {
diff --git a/libs/input/TfLiteMotionPredictor.cpp b/libs/input/TfLiteMotionPredictor.cpp
new file mode 100644
index 0000000..85fa176
--- /dev/null
+++ b/libs/input/TfLiteMotionPredictor.cpp
@@ -0,0 +1,385 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TfLiteMotionPredictor"
+#include <input/TfLiteMotionPredictor.h>
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <span>
+#include <type_traits>
+#include <utility>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/mapped_file.h>
+#define ATRACE_TAG ATRACE_TAG_INPUT
+#include <cutils/trace.h>
+#include <log/log.h>
+
+#include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/core/api/op_resolver.h"
+#include "tensorflow/lite/interpreter.h"
+#include "tensorflow/lite/kernels/builtin_op_kernels.h"
+#include "tensorflow/lite/model.h"
+#include "tensorflow/lite/mutable_op_resolver.h"
+
+namespace android {
+namespace {
+
+constexpr char SIGNATURE_KEY[] = "serving_default";
+
+// Input tensor names.
+constexpr char INPUT_R[] = "r";
+constexpr char INPUT_PHI[] = "phi";
+constexpr char INPUT_PRESSURE[] = "pressure";
+constexpr char INPUT_TILT[] = "tilt";
+constexpr char INPUT_ORIENTATION[] = "orientation";
+
+// Output tensor names.
+constexpr char OUTPUT_R[] = "r";
+constexpr char OUTPUT_PHI[] = "phi";
+constexpr char OUTPUT_PRESSURE[] = "pressure";
+
+// Ideally, we would just use std::filesystem::exists here, but it requires libc++fs, which causes
+// build issues in other parts of the system.
+#if defined(__ANDROID__)
+bool fileExists(const char* filename) {
+    struct stat buffer;
+    return stat(filename, &buffer) == 0;
+}
+#endif
+
+std::string getModelPath() {
+#if defined(__ANDROID__)
+    static const char* oemModel = "/vendor/etc/motion_predictor_model.fb";
+    if (fileExists(oemModel)) {
+        return oemModel;
+    }
+    return "/system/etc/motion_predictor_model.fb";
+#else
+    return base::GetExecutableDirectory() + "/motion_predictor_model.fb";
+#endif
+}
+
+// A TFLite ErrorReporter that logs to logcat.
+class LoggingErrorReporter : public tflite::ErrorReporter {
+public:
+    int Report(const char* format, va_list args) override {
+        return LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+    }
+};
+
+// Searches a runner for an input tensor.
+TfLiteTensor* findInputTensor(const char* name, tflite::SignatureRunner* runner) {
+    TfLiteTensor* tensor = runner->input_tensor(name);
+    LOG_ALWAYS_FATAL_IF(!tensor, "Failed to find input tensor '%s'", name);
+    return tensor;
+}
+
+// Searches a runner for an output tensor.
+const TfLiteTensor* findOutputTensor(const char* name, tflite::SignatureRunner* runner) {
+    const TfLiteTensor* tensor = runner->output_tensor(name);
+    LOG_ALWAYS_FATAL_IF(!tensor, "Failed to find output tensor '%s'", name);
+    return tensor;
+}
+
+// Returns the buffer for a tensor of type T.
+template <typename T>
+std::span<T> getTensorBuffer(typename std::conditional<std::is_const<T>::value, const TfLiteTensor*,
+                                                       TfLiteTensor*>::type tensor) {
+    LOG_ALWAYS_FATAL_IF(!tensor);
+
+    const TfLiteType type = tflite::typeToTfLiteType<typename std::remove_cv<T>::type>();
+    LOG_ALWAYS_FATAL_IF(tensor->type != type, "Unexpected type for '%s' tensor: %s (expected %s)",
+                        tensor->name, TfLiteTypeGetName(tensor->type), TfLiteTypeGetName(type));
+
+    LOG_ALWAYS_FATAL_IF(!tensor->data.data);
+    return {reinterpret_cast<T*>(tensor->data.data),
+            static_cast<typename std::span<T>::index_type>(tensor->bytes / sizeof(T))};
+}
+
+// Verifies that a tensor exists and has an underlying buffer of type T.
+template <typename T>
+void checkTensor(const TfLiteTensor* tensor) {
+    LOG_ALWAYS_FATAL_IF(!tensor);
+
+    const auto buffer = getTensorBuffer<const T>(tensor);
+    LOG_ALWAYS_FATAL_IF(buffer.empty(), "No buffer for tensor '%s'", tensor->name);
+}
+
+std::unique_ptr<tflite::OpResolver> createOpResolver() {
+    auto resolver = std::make_unique<tflite::MutableOpResolver>();
+    resolver->AddBuiltin(::tflite::BuiltinOperator_CONCATENATION,
+                         ::tflite::ops::builtin::Register_CONCATENATION());
+    resolver->AddBuiltin(::tflite::BuiltinOperator_FULLY_CONNECTED,
+                         ::tflite::ops::builtin::Register_FULLY_CONNECTED());
+    return resolver;
+}
+
+} // namespace
+
+TfLiteMotionPredictorBuffers::TfLiteMotionPredictorBuffers(size_t inputLength)
+      : mInputR(inputLength, 0),
+        mInputPhi(inputLength, 0),
+        mInputPressure(inputLength, 0),
+        mInputTilt(inputLength, 0),
+        mInputOrientation(inputLength, 0) {
+    LOG_ALWAYS_FATAL_IF(inputLength == 0, "Buffer input size must be greater than 0");
+}
+
+void TfLiteMotionPredictorBuffers::reset() {
+    std::fill(mInputR.begin(), mInputR.end(), 0);
+    std::fill(mInputPhi.begin(), mInputPhi.end(), 0);
+    std::fill(mInputPressure.begin(), mInputPressure.end(), 0);
+    std::fill(mInputTilt.begin(), mInputTilt.end(), 0);
+    std::fill(mInputOrientation.begin(), mInputOrientation.end(), 0);
+    mAxisFrom.reset();
+    mAxisTo.reset();
+}
+
+void TfLiteMotionPredictorBuffers::copyTo(TfLiteMotionPredictorModel& model) const {
+    LOG_ALWAYS_FATAL_IF(mInputR.size() != model.inputLength(),
+                        "Buffer length %zu doesn't match model input length %zu", mInputR.size(),
+                        model.inputLength());
+    LOG_ALWAYS_FATAL_IF(!isReady(), "Buffers are incomplete");
+
+    std::copy(mInputR.begin(), mInputR.end(), model.inputR().begin());
+    std::copy(mInputPhi.begin(), mInputPhi.end(), model.inputPhi().begin());
+    std::copy(mInputPressure.begin(), mInputPressure.end(), model.inputPressure().begin());
+    std::copy(mInputTilt.begin(), mInputTilt.end(), model.inputTilt().begin());
+    std::copy(mInputOrientation.begin(), mInputOrientation.end(), model.inputOrientation().begin());
+}
+
+void TfLiteMotionPredictorBuffers::pushSample(int64_t timestamp,
+                                              const TfLiteMotionPredictorSample sample) {
+    // Convert the sample (x, y) into polar (r, φ) based on a reference axis
+    // from the preceding two points (mAxisFrom/mAxisTo).
+
+    mTimestamp = timestamp;
+
+    if (!mAxisTo) { // First point.
+        mAxisTo = sample;
+        return;
+    }
+
+    // Vector from the last point to the current sample point.
+    const TfLiteMotionPredictorSample::Point v = sample.position - mAxisTo->position;
+
+    const float r = std::hypot(v.x, v.y);
+    float phi = 0;
+    float orientation = 0;
+
+    // Ignore the sample if there is no movement. These samples can occur when there's change to a
+    // property other than the coordinates and pollute the input to the model.
+    if (r == 0) {
+        return;
+    }
+
+    if (!mAxisFrom) { // Second point.
+        // We can only determine the distance from the first point, and not any
+        // angle. However, if the second point forms an axis, the orientation can
+        // be transformed relative to that axis.
+        const float axisPhi = std::atan2(v.y, v.x);
+        // A MotionEvent's orientation is measured clockwise from the vertical
+        // axis, but axisPhi is measured counter-clockwise from the horizontal
+        // axis.
+        orientation = M_PI_2 - sample.orientation - axisPhi;
+    } else {
+        const TfLiteMotionPredictorSample::Point axis = mAxisTo->position - mAxisFrom->position;
+        const float axisPhi = std::atan2(axis.y, axis.x);
+        phi = std::atan2(v.y, v.x) - axisPhi;
+
+        if (std::hypot(axis.x, axis.y) > 0) {
+            // See note above.
+            orientation = M_PI_2 - sample.orientation - axisPhi;
+        }
+    }
+
+    // Update the axis for the next point.
+    mAxisFrom = mAxisTo;
+    mAxisTo = sample;
+
+    // Push the current sample onto the end of the input buffers.
+    mInputR.pushBack(r);
+    mInputPhi.pushBack(phi);
+    mInputPressure.pushBack(sample.pressure);
+    mInputTilt.pushBack(sample.tilt);
+    mInputOrientation.pushBack(orientation);
+}
+
+std::unique_ptr<TfLiteMotionPredictorModel> TfLiteMotionPredictorModel::create() {
+    const std::string modelPath = getModelPath();
+    android::base::unique_fd fd(open(modelPath.c_str(), O_RDONLY));
+    if (fd == -1) {
+        PLOG(FATAL) << "Could not read model from " << modelPath;
+    }
+
+    const off_t fdSize = lseek(fd, 0, SEEK_END);
+    if (fdSize == -1) {
+        PLOG(FATAL) << "Failed to determine file size";
+    }
+
+    std::unique_ptr<android::base::MappedFile> modelBuffer =
+            android::base::MappedFile::FromFd(fd, /*offset=*/0, fdSize, PROT_READ);
+    if (!modelBuffer) {
+        PLOG(FATAL) << "Failed to mmap model";
+    }
+
+    return std::unique_ptr<TfLiteMotionPredictorModel>(
+            new TfLiteMotionPredictorModel(std::move(modelBuffer)));
+}
+
+TfLiteMotionPredictorModel::TfLiteMotionPredictorModel(
+        std::unique_ptr<android::base::MappedFile> model)
+      : mFlatBuffer(std::move(model)) {
+    CHECK(mFlatBuffer);
+    mErrorReporter = std::make_unique<LoggingErrorReporter>();
+    mModel = tflite::FlatBufferModel::VerifyAndBuildFromBuffer(mFlatBuffer->data(),
+                                                               mFlatBuffer->size(),
+                                                               /*extra_verifier=*/nullptr,
+                                                               mErrorReporter.get());
+    LOG_ALWAYS_FATAL_IF(!mModel);
+
+    auto resolver = createOpResolver();
+    tflite::InterpreterBuilder builder(*mModel, *resolver);
+
+    if (builder(&mInterpreter) != kTfLiteOk || !mInterpreter) {
+        LOG_ALWAYS_FATAL("Failed to build interpreter");
+    }
+
+    mRunner = mInterpreter->GetSignatureRunner(SIGNATURE_KEY);
+    LOG_ALWAYS_FATAL_IF(!mRunner, "Failed to find runner for signature '%s'", SIGNATURE_KEY);
+
+    allocateTensors();
+}
+
+TfLiteMotionPredictorModel::~TfLiteMotionPredictorModel() {}
+
+void TfLiteMotionPredictorModel::allocateTensors() {
+    if (mRunner->AllocateTensors() != kTfLiteOk) {
+        LOG_ALWAYS_FATAL("Failed to allocate tensors");
+    }
+
+    attachInputTensors();
+    attachOutputTensors();
+
+    checkTensor<float>(mInputR);
+    checkTensor<float>(mInputPhi);
+    checkTensor<float>(mInputPressure);
+    checkTensor<float>(mInputTilt);
+    checkTensor<float>(mInputOrientation);
+    checkTensor<float>(mOutputR);
+    checkTensor<float>(mOutputPhi);
+    checkTensor<float>(mOutputPressure);
+
+    const auto checkInputTensorSize = [this](const TfLiteTensor* tensor) {
+        const size_t size = getTensorBuffer<const float>(tensor).size();
+        LOG_ALWAYS_FATAL_IF(size != inputLength(),
+                            "Tensor '%s' length %zu does not match input length %zu", tensor->name,
+                            size, inputLength());
+    };
+
+    checkInputTensorSize(mInputR);
+    checkInputTensorSize(mInputPhi);
+    checkInputTensorSize(mInputPressure);
+    checkInputTensorSize(mInputTilt);
+    checkInputTensorSize(mInputOrientation);
+}
+
+void TfLiteMotionPredictorModel::attachInputTensors() {
+    mInputR = findInputTensor(INPUT_R, mRunner);
+    mInputPhi = findInputTensor(INPUT_PHI, mRunner);
+    mInputPressure = findInputTensor(INPUT_PRESSURE, mRunner);
+    mInputTilt = findInputTensor(INPUT_TILT, mRunner);
+    mInputOrientation = findInputTensor(INPUT_ORIENTATION, mRunner);
+}
+
+void TfLiteMotionPredictorModel::attachOutputTensors() {
+    mOutputR = findOutputTensor(OUTPUT_R, mRunner);
+    mOutputPhi = findOutputTensor(OUTPUT_PHI, mRunner);
+    mOutputPressure = findOutputTensor(OUTPUT_PRESSURE, mRunner);
+}
+
+bool TfLiteMotionPredictorModel::invoke() {
+    ATRACE_BEGIN("TfLiteMotionPredictorModel::invoke");
+    TfLiteStatus result = mRunner->Invoke();
+    ATRACE_END();
+
+    if (result != kTfLiteOk) {
+        return false;
+    }
+
+    // Invoke() might reallocate tensors, so they need to be reattached.
+    attachInputTensors();
+    attachOutputTensors();
+
+    if (outputR().size() != outputPhi().size() || outputR().size() != outputPressure().size()) {
+        LOG_ALWAYS_FATAL("Output size mismatch: (r: %zu, phi: %zu, pressure: %zu)",
+                         outputR().size(), outputPhi().size(), outputPressure().size());
+    }
+
+    return true;
+}
+
+size_t TfLiteMotionPredictorModel::inputLength() const {
+    return getTensorBuffer<const float>(mInputR).size();
+}
+
+size_t TfLiteMotionPredictorModel::outputLength() const {
+    return getTensorBuffer<const float>(mOutputR).size();
+}
+
+std::span<float> TfLiteMotionPredictorModel::inputR() {
+    return getTensorBuffer<float>(mInputR);
+}
+
+std::span<float> TfLiteMotionPredictorModel::inputPhi() {
+    return getTensorBuffer<float>(mInputPhi);
+}
+
+std::span<float> TfLiteMotionPredictorModel::inputPressure() {
+    return getTensorBuffer<float>(mInputPressure);
+}
+
+std::span<float> TfLiteMotionPredictorModel::inputTilt() {
+    return getTensorBuffer<float>(mInputTilt);
+}
+
+std::span<float> TfLiteMotionPredictorModel::inputOrientation() {
+    return getTensorBuffer<float>(mInputOrientation);
+}
+
+std::span<const float> TfLiteMotionPredictorModel::outputR() const {
+    return getTensorBuffer<const float>(mOutputR);
+}
+
+std::span<const float> TfLiteMotionPredictorModel::outputPhi() const {
+    return getTensorBuffer<const float>(mOutputPhi);
+}
+
+std::span<const float> TfLiteMotionPredictorModel::outputPressure() const {
+    return getTensorBuffer<const float>(mOutputPressure);
+}
+
+} // namespace android
diff --git a/libs/input/TouchVideoFrame.cpp b/libs/input/TouchVideoFrame.cpp
index c62e098..6d7d561 100644
--- a/libs/input/TouchVideoFrame.cpp
+++ b/libs/input/TouchVideoFrame.cpp
@@ -40,16 +40,19 @@
 
 const struct timeval& TouchVideoFrame::getTimestamp() const { return mTimestamp; }
 
-void TouchVideoFrame::rotate(int32_t orientation) {
+void TouchVideoFrame::rotate(ui::Rotation orientation) {
     switch (orientation) {
-        case DISPLAY_ORIENTATION_90:
-            rotateQuarterTurn(false /*clockwise*/);
+        case ui::ROTATION_90:
+            rotateQuarterTurn(/*clockwise=*/false);
             break;
-        case DISPLAY_ORIENTATION_180:
+        case ui::ROTATION_180:
             rotate180();
             break;
-        case DISPLAY_ORIENTATION_270:
-            rotateQuarterTurn(true /*clockwise*/);
+        case ui::ROTATION_270:
+            rotateQuarterTurn(/*clockwise=*/true);
+            break;
+        case ui::ROTATION_0:
+            // No need to rotate if there's no rotation.
             break;
     }
 }
diff --git a/libs/input/VelocityControl.cpp b/libs/input/VelocityControl.cpp
index 6e991e9..5720099 100644
--- a/libs/input/VelocityControl.cpp
+++ b/libs/input/VelocityControl.cpp
@@ -37,6 +37,10 @@
     reset();
 }
 
+VelocityControlParameters& VelocityControl::getParameters() {
+    return mParameters;
+}
+
 void VelocityControl::setParameters(const VelocityControlParameters& parameters) {
     mParameters = parameters;
     reset();
@@ -44,8 +48,8 @@
 
 void VelocityControl::reset() {
     mLastMovementTime = LLONG_MIN;
-    mRawPosition.x = 0;
-    mRawPosition.y = 0;
+    mRawPositionX = 0;
+    mRawPositionY = 0;
     mVelocityTracker.clear();
 }
 
@@ -61,17 +65,21 @@
 
         mLastMovementTime = eventTime;
         if (deltaX) {
-            mRawPosition.x += *deltaX;
+            mRawPositionX += *deltaX;
         }
         if (deltaY) {
-            mRawPosition.y += *deltaY;
+            mRawPositionY += *deltaY;
         }
-        mVelocityTracker.addMovement(eventTime, BitSet32(BitSet32::valueForBit(0)), {mRawPosition});
+        mVelocityTracker.addMovement(eventTime, /*pointerId=*/0, AMOTION_EVENT_AXIS_X,
+                                     mRawPositionX);
+        mVelocityTracker.addMovement(eventTime, /*pointerId=*/0, AMOTION_EVENT_AXIS_Y,
+                                     mRawPositionY);
 
-        float vx, vy;
+        std::optional<float> vx = mVelocityTracker.getVelocity(AMOTION_EVENT_AXIS_X, 0);
+        std::optional<float> vy = mVelocityTracker.getVelocity(AMOTION_EVENT_AXIS_Y, 0);
         float scale = mParameters.scale;
-        if (mVelocityTracker.getVelocity(0, &vx, &vy)) {
-            float speed = hypotf(vx, vy) * scale;
+        if (vx && vy) {
+            float speed = hypotf(*vx, *vy) * scale;
             if (speed >= mParameters.highThreshold) {
                 // Apply full acceleration above the high speed threshold.
                 scale *= mParameters.acceleration;
@@ -85,10 +93,9 @@
 
             if (DEBUG_ACCELERATION) {
                 ALOGD("VelocityControl(%0.3f, %0.3f, %0.3f, %0.3f): "
-                        "vx=%0.3f, vy=%0.3f, speed=%0.3f, accel=%0.3f",
-                        mParameters.scale, mParameters.lowThreshold, mParameters.highThreshold,
-                        mParameters.acceleration,
-                        vx, vy, speed, scale / mParameters.scale);
+                      "vx=%0.3f, vy=%0.3f, speed=%0.3f, accel=%0.3f",
+                      mParameters.scale, mParameters.lowThreshold, mParameters.highThreshold,
+                      mParameters.acceleration, *vx, *vy, speed, scale / mParameters.scale);
             }
 
         } else {
diff --git a/libs/input/VelocityTracker.cpp b/libs/input/VelocityTracker.cpp
index 7f427f2..8551e5f 100644
--- a/libs/input/VelocityTracker.cpp
+++ b/libs/input/VelocityTracker.cpp
@@ -23,10 +23,13 @@
 #include <optional>
 
 #include <android-base/stringprintf.h>
+#include <input/PrintTools.h>
 #include <input/VelocityTracker.h>
 #include <utils/BitSet.h>
 #include <utils/Timers.h>
 
+using std::literals::chrono_literals::operator""ms;
+
 namespace android {
 
 /**
@@ -53,12 +56,34 @@
 // Nanoseconds per milliseconds.
 static const nsecs_t NANOS_PER_MS = 1000000;
 
+// All axes supported for velocity tracking, mapped to their default strategies.
+// Although other strategies are available for testing and comparison purposes,
+// the default strategy is the one that applications will actually use.  Be very careful
+// when adjusting the default strategy because it can dramatically affect
+// (often in a bad way) the user experience.
+static const std::map<int32_t, VelocityTracker::Strategy> DEFAULT_STRATEGY_BY_AXIS =
+        {{AMOTION_EVENT_AXIS_X, VelocityTracker::Strategy::LSQ2},
+         {AMOTION_EVENT_AXIS_Y, VelocityTracker::Strategy::LSQ2},
+         {AMOTION_EVENT_AXIS_SCROLL, VelocityTracker::Strategy::IMPULSE}};
+
+// Axes specifying location on a 2D plane (i.e. X and Y).
+static const std::set<int32_t> PLANAR_AXES = {AMOTION_EVENT_AXIS_X, AMOTION_EVENT_AXIS_Y};
+
+// Axes whose motion values are differential values (i.e. deltas).
+static const std::set<int32_t> DIFFERENTIAL_AXES = {AMOTION_EVENT_AXIS_SCROLL};
+
 // Threshold for determining that a pointer has stopped moving.
 // Some input devices do not send ACTION_MOVE events in the case where a pointer has
 // stopped.  We need to detect this case so that we can accurately predict the
 // velocity after the pointer starts moving again.
-static const nsecs_t ASSUME_POINTER_STOPPED_TIME = 40 * NANOS_PER_MS;
+static const std::chrono::duration ASSUME_POINTER_STOPPED_TIME = 40ms;
 
+static std::string toString(std::chrono::nanoseconds t) {
+    std::stringstream stream;
+    stream.precision(1);
+    stream << std::fixed << std::chrono::duration<float, std::milli>(t).count() << " ms";
+    return stream.str();
+}
 
 static float vectorDot(const float* a, const float* b, uint32_t m) {
     float r = 0;
@@ -118,46 +143,43 @@
 // --- VelocityTracker ---
 
 VelocityTracker::VelocityTracker(const Strategy strategy)
-      : mLastEventTime(0), mCurrentPointerIdBits(0), mActivePointerId(-1) {
-    // Configure the strategy.
-    if (!configureStrategy(strategy)) {
-        ALOGE("Unrecognized velocity tracker strategy %" PRId32 ".", strategy);
-        if (!configureStrategy(VelocityTracker::DEFAULT_STRATEGY)) {
-            LOG_ALWAYS_FATAL("Could not create the default velocity tracker strategy '%" PRId32
-                             "'!",
-                             strategy);
-        }
-    }
-}
+      : mLastEventTime(0), mCurrentPointerIdBits(0), mOverrideStrategy(strategy) {}
 
 VelocityTracker::~VelocityTracker() {
 }
 
-bool VelocityTracker::configureStrategy(Strategy strategy) {
-    if (strategy == VelocityTracker::Strategy::DEFAULT) {
-        mStrategy = createStrategy(VelocityTracker::DEFAULT_STRATEGY);
+bool VelocityTracker::isAxisSupported(int32_t axis) {
+    return DEFAULT_STRATEGY_BY_AXIS.find(axis) != DEFAULT_STRATEGY_BY_AXIS.end();
+}
+
+void VelocityTracker::configureStrategy(int32_t axis) {
+    const bool isDifferentialAxis = DIFFERENTIAL_AXES.find(axis) != DIFFERENTIAL_AXES.end();
+
+    std::unique_ptr<VelocityTrackerStrategy> createdStrategy;
+    if (mOverrideStrategy != VelocityTracker::Strategy::DEFAULT) {
+        createdStrategy = createStrategy(mOverrideStrategy, /*deltaValues=*/isDifferentialAxis);
     } else {
-        mStrategy = createStrategy(strategy);
+        createdStrategy = createStrategy(DEFAULT_STRATEGY_BY_AXIS.at(axis),
+                                         /*deltaValues=*/isDifferentialAxis);
     }
-    return mStrategy != nullptr;
+
+    LOG_ALWAYS_FATAL_IF(createdStrategy == nullptr,
+                        "Could not create velocity tracker strategy for axis '%" PRId32 "'!", axis);
+    mConfiguredStrategies[axis] = std::move(createdStrategy);
 }
 
 std::unique_ptr<VelocityTrackerStrategy> VelocityTracker::createStrategy(
-        VelocityTracker::Strategy strategy) {
+        VelocityTracker::Strategy strategy, bool deltaValues) {
     switch (strategy) {
         case VelocityTracker::Strategy::IMPULSE:
-            if (DEBUG_STRATEGY) {
-                ALOGI("Initializing impulse strategy");
-            }
-            return std::make_unique<ImpulseVelocityTrackerStrategy>();
+            ALOGI_IF(DEBUG_STRATEGY, "Initializing impulse strategy");
+            return std::make_unique<ImpulseVelocityTrackerStrategy>(deltaValues);
 
         case VelocityTracker::Strategy::LSQ1:
             return std::make_unique<LeastSquaresVelocityTrackerStrategy>(1);
 
         case VelocityTracker::Strategy::LSQ2:
-            if (DEBUG_STRATEGY && !DEBUG_IMPULSE) {
-                ALOGI("Initializing lsq2 strategy");
-            }
+            ALOGI_IF(DEBUG_STRATEGY && !DEBUG_IMPULSE, "Initializing lsq2 strategy");
             return std::make_unique<LeastSquaresVelocityTrackerStrategy>(2);
 
         case VelocityTracker::Strategy::LSQ3:
@@ -167,17 +189,17 @@
             return std::make_unique<
                     LeastSquaresVelocityTrackerStrategy>(2,
                                                          LeastSquaresVelocityTrackerStrategy::
-                                                                 WEIGHTING_DELTA);
+                                                                 Weighting::DELTA);
         case VelocityTracker::Strategy::WLSQ2_CENTRAL:
             return std::make_unique<
                     LeastSquaresVelocityTrackerStrategy>(2,
                                                          LeastSquaresVelocityTrackerStrategy::
-                                                                 WEIGHTING_CENTRAL);
+                                                                 Weighting::CENTRAL);
         case VelocityTracker::Strategy::WLSQ2_RECENT:
             return std::make_unique<
                     LeastSquaresVelocityTrackerStrategy>(2,
                                                          LeastSquaresVelocityTrackerStrategy::
-                                                                 WEIGHTING_RECENT);
+                                                                 Weighting::RECENT);
 
         case VelocityTracker::Strategy::INT1:
             return std::make_unique<IntegratingVelocityTrackerStrategy>(1);
@@ -196,194 +218,211 @@
 
 void VelocityTracker::clear() {
     mCurrentPointerIdBits.clear();
-    mActivePointerId = -1;
-
-    mStrategy->clear();
+    mActivePointerId = std::nullopt;
+    mConfiguredStrategies.clear();
 }
 
-void VelocityTracker::clearPointers(BitSet32 idBits) {
-    BitSet32 remainingIdBits(mCurrentPointerIdBits.value & ~idBits.value);
-    mCurrentPointerIdBits = remainingIdBits;
+void VelocityTracker::clearPointer(int32_t pointerId) {
+    mCurrentPointerIdBits.clearBit(pointerId);
 
-    if (mActivePointerId >= 0 && idBits.hasBit(mActivePointerId)) {
-        mActivePointerId = !remainingIdBits.isEmpty() ? remainingIdBits.firstMarkedBit() : -1;
-    }
-
-    mStrategy->clearPointers(idBits);
-}
-
-void VelocityTracker::addMovement(nsecs_t eventTime, BitSet32 idBits,
-                                  const std::vector<VelocityTracker::Position>& positions) {
-    LOG_ALWAYS_FATAL_IF(idBits.count() != positions.size(),
-                        "Mismatching number of pointers, idBits=%" PRIu32 ", positions=%zu",
-                        idBits.count(), positions.size());
-    while (idBits.count() > MAX_POINTERS) {
-        idBits.clearLastMarkedBit();
-    }
-
-    if ((mCurrentPointerIdBits.value & idBits.value)
-            && eventTime >= mLastEventTime + ASSUME_POINTER_STOPPED_TIME) {
-        if (DEBUG_VELOCITY) {
-            ALOGD("VelocityTracker: stopped for %0.3f ms, clearing state.",
-                  (eventTime - mLastEventTime) * 0.000001f);
+    if (mActivePointerId && *mActivePointerId == pointerId) {
+        // The active pointer id is being removed. Mark it invalid and try to find a new one
+        // from the remaining pointers.
+        mActivePointerId = std::nullopt;
+        if (!mCurrentPointerIdBits.isEmpty()) {
+            mActivePointerId = mCurrentPointerIdBits.firstMarkedBit();
         }
+    }
+
+    for (const auto& [_, strategy] : mConfiguredStrategies) {
+        strategy->clearPointer(pointerId);
+    }
+}
+
+void VelocityTracker::addMovement(nsecs_t eventTime, int32_t pointerId, int32_t axis,
+                                  float position) {
+    if (mCurrentPointerIdBits.hasBit(pointerId) &&
+        std::chrono::nanoseconds(eventTime - mLastEventTime) > ASSUME_POINTER_STOPPED_TIME) {
+        ALOGD_IF(DEBUG_VELOCITY, "VelocityTracker: stopped for %s, clearing state.",
+                 toString(std::chrono::nanoseconds(eventTime - mLastEventTime)).c_str());
+
         // We have not received any movements for too long.  Assume that all pointers
         // have stopped.
-        mStrategy->clear();
+        mConfiguredStrategies.clear();
     }
     mLastEventTime = eventTime;
 
-    mCurrentPointerIdBits = idBits;
-    if (mActivePointerId < 0 || !idBits.hasBit(mActivePointerId)) {
-        mActivePointerId = idBits.isEmpty() ? -1 : idBits.firstMarkedBit();
+    mCurrentPointerIdBits.markBit(pointerId);
+    if (!mActivePointerId) {
+        // Let this be the new active pointer if no active pointer is currently set
+        mActivePointerId = pointerId;
     }
 
-    mStrategy->addMovement(eventTime, idBits, positions);
+    if (mConfiguredStrategies.find(axis) == mConfiguredStrategies.end()) {
+        configureStrategy(axis);
+    }
+    mConfiguredStrategies[axis]->addMovement(eventTime, pointerId, position);
 
     if (DEBUG_VELOCITY) {
-        ALOGD("VelocityTracker: addMovement eventTime=%" PRId64
-              ", idBits=0x%08x, activePointerId=%d",
-              eventTime, idBits.value, mActivePointerId);
-        for (BitSet32 iterBits(idBits); !iterBits.isEmpty();) {
-            uint32_t id = iterBits.firstMarkedBit();
-            uint32_t index = idBits.getIndexOfBit(id);
-            iterBits.clearBit(id);
-            Estimator estimator;
-            getEstimator(id, &estimator);
-            ALOGD("  %d: position (%0.3f, %0.3f), "
-                  "estimator (degree=%d, xCoeff=%s, yCoeff=%s, confidence=%f)",
-                  id, positions[index].x, positions[index].y, int(estimator.degree),
-                  vectorToString(estimator.xCoeff, estimator.degree + 1).c_str(),
-                  vectorToString(estimator.yCoeff, estimator.degree + 1).c_str(),
-                  estimator.confidence);
-        }
+        ALOGD("VelocityTracker: addMovement eventTime=%" PRId64 ", pointerId=%" PRId32
+              ", activePointerId=%s",
+              eventTime, pointerId, toString(mActivePointerId).c_str());
+
+        std::optional<Estimator> estimator = getEstimator(axis, pointerId);
+        ALOGD("  %d: axis=%d, position=%0.3f, "
+              "estimator (degree=%d, coeff=%s, confidence=%f)",
+              pointerId, axis, position, int((*estimator).degree),
+              vectorToString((*estimator).coeff.data(), (*estimator).degree + 1).c_str(),
+              (*estimator).confidence);
     }
 }
 
 void VelocityTracker::addMovement(const MotionEvent* event) {
+    // Stores data about which axes to process based on the incoming motion event.
+    std::set<int32_t> axesToProcess;
     int32_t actionMasked = event->getActionMasked();
 
     switch (actionMasked) {
-    case AMOTION_EVENT_ACTION_DOWN:
-    case AMOTION_EVENT_ACTION_HOVER_ENTER:
-        // Clear all pointers on down before adding the new movement.
-        clear();
-        break;
-    case AMOTION_EVENT_ACTION_POINTER_DOWN: {
-        // Start a new movement trace for a pointer that just went down.
-        // We do this on down instead of on up because the client may want to query the
-        // final velocity for a pointer that just went up.
-        BitSet32 downIdBits;
-        downIdBits.markBit(event->getPointerId(event->getActionIndex()));
-        clearPointers(downIdBits);
-        break;
-    }
-    case AMOTION_EVENT_ACTION_MOVE:
-    case AMOTION_EVENT_ACTION_HOVER_MOVE:
-        break;
-    default:
-        // Ignore all other actions because they do not convey any new information about
-        // pointer movement.  We also want to preserve the last known velocity of the pointers.
-        // Note that ACTION_UP and ACTION_POINTER_UP always report the last known position
-        // of the pointers that went up.  ACTION_POINTER_UP does include the new position of
-        // pointers that remained down but we will also receive an ACTION_MOVE with this
-        // information if any of them actually moved.  Since we don't know how many pointers
-        // will be going up at once it makes sense to just wait for the following ACTION_MOVE
-        // before adding the movement.
-        return;
-    }
-
-    size_t pointerCount = event->getPointerCount();
-    if (pointerCount > MAX_POINTERS) {
-        pointerCount = MAX_POINTERS;
-    }
-
-    BitSet32 idBits;
-    for (size_t i = 0; i < pointerCount; i++) {
-        idBits.markBit(event->getPointerId(i));
-    }
-
-    uint32_t pointerIndex[MAX_POINTERS];
-    for (size_t i = 0; i < pointerCount; i++) {
-        pointerIndex[i] = idBits.getIndexOfBit(event->getPointerId(i));
-    }
-
-    std::vector<Position> positions;
-    positions.resize(pointerCount);
-
-    size_t historySize = event->getHistorySize();
-    for (size_t h = 0; h <= historySize; h++) {
-        nsecs_t eventTime = event->getHistoricalEventTime(h);
-        for (size_t i = 0; i < pointerCount; i++) {
-            uint32_t index = pointerIndex[i];
-            positions[index].x = event->getHistoricalX(i, h);
-            positions[index].y = event->getHistoricalY(i, h);
+        case AMOTION_EVENT_ACTION_DOWN:
+        case AMOTION_EVENT_ACTION_HOVER_ENTER:
+            // Clear all pointers on down before adding the new movement.
+            clear();
+            axesToProcess.insert(PLANAR_AXES.begin(), PLANAR_AXES.end());
+            break;
+        case AMOTION_EVENT_ACTION_POINTER_DOWN: {
+            // Start a new movement trace for a pointer that just went down.
+            // We do this on down instead of on up because the client may want to query the
+            // final velocity for a pointer that just went up.
+            clearPointer(event->getPointerId(event->getActionIndex()));
+            axesToProcess.insert(PLANAR_AXES.begin(), PLANAR_AXES.end());
+            break;
         }
-        addMovement(eventTime, idBits, positions);
+        case AMOTION_EVENT_ACTION_MOVE:
+        case AMOTION_EVENT_ACTION_HOVER_MOVE:
+            axesToProcess.insert(PLANAR_AXES.begin(), PLANAR_AXES.end());
+            break;
+        case AMOTION_EVENT_ACTION_POINTER_UP:
+        case AMOTION_EVENT_ACTION_UP: {
+            std::chrono::nanoseconds delaySinceLastEvent(event->getEventTime() - mLastEventTime);
+            if (delaySinceLastEvent > ASSUME_POINTER_STOPPED_TIME) {
+                ALOGD_IF(DEBUG_VELOCITY,
+                         "VelocityTracker: stopped for %s, clearing state upon pointer liftoff.",
+                         toString(delaySinceLastEvent).c_str());
+                // We have not received any movements for too long.  Assume that all pointers
+                // have stopped.
+                for (int32_t axis : PLANAR_AXES) {
+                    mConfiguredStrategies.erase(axis);
+                }
+            }
+            // These actions because they do not convey any new information about
+            // pointer movement.  We also want to preserve the last known velocity of the pointers.
+            // Note that ACTION_UP and ACTION_POINTER_UP always report the last known position
+            // of the pointers that went up.  ACTION_POINTER_UP does include the new position of
+            // pointers that remained down but we will also receive an ACTION_MOVE with this
+            // information if any of them actually moved.  Since we don't know how many pointers
+            // will be going up at once it makes sense to just wait for the following ACTION_MOVE
+            // before adding the movement.
+            return;
+        }
+        case AMOTION_EVENT_ACTION_SCROLL:
+            axesToProcess.insert(AMOTION_EVENT_AXIS_SCROLL);
+            break;
+        default:
+            // Ignore all other actions.
+            return;
+    }
+
+    const size_t historySize = event->getHistorySize();
+    for (size_t h = 0; h <= historySize; h++) {
+        const nsecs_t eventTime = event->getHistoricalEventTime(h);
+        for (size_t i = 0; i < event->getPointerCount(); i++) {
+            if (event->isResampled(i, h)) {
+                continue; // skip resampled samples
+            }
+            const int32_t pointerId = event->getPointerId(i);
+            for (int32_t axis : axesToProcess) {
+                const float position = event->getHistoricalAxisValue(axis, i, h);
+                addMovement(eventTime, pointerId, axis, position);
+            }
+        }
     }
 }
 
-bool VelocityTracker::getVelocity(uint32_t id, float* outVx, float* outVy) const {
-    Estimator estimator;
-    if (getEstimator(id, &estimator) && estimator.degree >= 1) {
-        *outVx = estimator.xCoeff[1];
-        *outVy = estimator.yCoeff[1];
-        return true;
+std::optional<float> VelocityTracker::getVelocity(int32_t axis, int32_t pointerId) const {
+    std::optional<Estimator> estimator = getEstimator(axis, pointerId);
+    if (estimator && (*estimator).degree >= 1) {
+        return (*estimator).coeff[1];
     }
-    *outVx = 0;
-    *outVy = 0;
-    return false;
+    return {};
 }
 
-bool VelocityTracker::getEstimator(uint32_t id, Estimator* outEstimator) const {
-    return mStrategy->getEstimator(id, outEstimator);
+VelocityTracker::ComputedVelocity VelocityTracker::getComputedVelocity(int32_t units,
+                                                                       float maxVelocity) {
+    ComputedVelocity computedVelocity;
+    for (const auto& [axis, _] : mConfiguredStrategies) {
+        BitSet32 copyIdBits = BitSet32(mCurrentPointerIdBits);
+        while (!copyIdBits.isEmpty()) {
+            uint32_t id = copyIdBits.clearFirstMarkedBit();
+            std::optional<float> velocity = getVelocity(axis, id);
+            if (velocity) {
+                float adjustedVelocity =
+                        std::clamp(*velocity * units / 1000, -maxVelocity, maxVelocity);
+                computedVelocity.addVelocity(axis, id, adjustedVelocity);
+            }
+        }
+    }
+    return computedVelocity;
 }
 
+std::optional<VelocityTracker::Estimator> VelocityTracker::getEstimator(int32_t axis,
+                                                                        int32_t pointerId) const {
+    const auto& it = mConfiguredStrategies.find(axis);
+    if (it == mConfiguredStrategies.end()) {
+        return std::nullopt;
+    }
+    return it->second->getEstimator(pointerId);
+}
 
 // --- LeastSquaresVelocityTrackerStrategy ---
 
-LeastSquaresVelocityTrackerStrategy::LeastSquaresVelocityTrackerStrategy(
-        uint32_t degree, Weighting weighting) :
-        mDegree(degree), mWeighting(weighting) {
-    clear();
-}
+LeastSquaresVelocityTrackerStrategy::LeastSquaresVelocityTrackerStrategy(uint32_t degree,
+                                                                         Weighting weighting)
+      : mDegree(degree), mWeighting(weighting) {}
 
 LeastSquaresVelocityTrackerStrategy::~LeastSquaresVelocityTrackerStrategy() {
 }
 
-void LeastSquaresVelocityTrackerStrategy::clear() {
-    mIndex = 0;
-    mMovements[0].idBits.clear();
+void LeastSquaresVelocityTrackerStrategy::clearPointer(int32_t pointerId) {
+    mIndex.erase(pointerId);
+    mMovements.erase(pointerId);
 }
 
-void LeastSquaresVelocityTrackerStrategy::clearPointers(BitSet32 idBits) {
-    BitSet32 remainingIdBits(mMovements[mIndex].idBits.value & ~idBits.value);
-    mMovements[mIndex].idBits = remainingIdBits;
-}
-
-void LeastSquaresVelocityTrackerStrategy::addMovement(
-        nsecs_t eventTime, BitSet32 idBits,
-        const std::vector<VelocityTracker::Position>& positions) {
-    if (mMovements[mIndex].eventTime != eventTime) {
+void LeastSquaresVelocityTrackerStrategy::addMovement(nsecs_t eventTime, int32_t pointerId,
+                                                      float position) {
+    // If data for this pointer already exists, we have a valid entry at the position of
+    // mIndex[pointerId] and mMovements[pointerId]. In that case, we need to advance the index
+    // to the next position in the circular buffer and write the new Movement there. Otherwise,
+    // if this is a first movement for this pointer, we initialize the maps mIndex and mMovements
+    // for this pointer and write to the first position.
+    auto [movementIt, inserted] = mMovements.insert({pointerId, {}});
+    auto [indexIt, _] = mIndex.insert({pointerId, 0});
+    size_t& index = indexIt->second;
+    if (!inserted && movementIt->second[index].eventTime != eventTime) {
         // When ACTION_POINTER_DOWN happens, we will first receive ACTION_MOVE with the coordinates
         // of the existing pointers, and then ACTION_POINTER_DOWN with the coordinates that include
         // the new pointer. If the eventtimes for both events are identical, just update the data
         // for this time.
         // We only compare against the last value, as it is likely that addMovement is called
         // in chronological order as events occur.
-        mIndex++;
+        index++;
     }
-    if (mIndex == HISTORY_SIZE) {
-        mIndex = 0;
+    if (index == HISTORY_SIZE) {
+        index = 0;
     }
 
-    Movement& movement = mMovements[mIndex];
+    Movement& movement = movementIt->second[index];
     movement.eventTime = eventTime;
-    movement.idBits = idBits;
-    uint32_t count = idBits.count();
-    for (uint32_t i = 0; i < count; i++) {
-        movement.positions[i] = positions[i];
-    }
+    movement.position = position;
 }
 
 /**
@@ -436,12 +475,14 @@
  * http://en.wikipedia.org/wiki/Gram-Schmidt
  */
 static bool solveLeastSquares(const std::vector<float>& x, const std::vector<float>& y,
-                              const std::vector<float>& w, uint32_t n, float* outB, float* outDet) {
+                              const std::vector<float>& w, uint32_t n,
+                              std::array<float, VelocityTracker::Estimator::MAX_DEGREE + 1>& outB,
+                              float* outDet) {
     const size_t m = x.size();
-    if (DEBUG_STRATEGY) {
-        ALOGD("solveLeastSquares: m=%d, n=%d, x=%s, y=%s, w=%s", int(m), int(n),
-              vectorToString(x).c_str(), vectorToString(y).c_str(), vectorToString(w).c_str());
-    }
+
+    ALOGD_IF(DEBUG_STRATEGY, "solveLeastSquares: m=%d, n=%d, x=%s, y=%s, w=%s", int(m), int(n),
+             vectorToString(x).c_str(), vectorToString(y).c_str(), vectorToString(w).c_str());
+
     LOG_ALWAYS_FATAL_IF(m != y.size() || m != w.size(), "Mismatched vector sizes");
 
     // Expand the X vector to a matrix A, pre-multiplied by the weights.
@@ -452,9 +493,9 @@
             a[i][h] = a[i - 1][h] * x[h];
         }
     }
-    if (DEBUG_STRATEGY) {
-        ALOGD("  - a=%s", matrixToString(&a[0][0], m, n, false /*rowMajor*/).c_str());
-    }
+
+    ALOGD_IF(DEBUG_STRATEGY, "  - a=%s",
+             matrixToString(&a[0][0], m, n, /*rowMajor=*/false).c_str());
 
     // Apply the Gram-Schmidt process to A to obtain its QR decomposition.
     float q[n][m]; // orthonormal basis, column-major order
@@ -473,9 +514,7 @@
         float norm = vectorNorm(&q[j][0], m);
         if (norm < 0.000001f) {
             // vectors are linearly dependent or zero so no solution
-            if (DEBUG_STRATEGY) {
-                ALOGD("  - no solution, norm=%f", norm);
-            }
+            ALOGD_IF(DEBUG_STRATEGY, "  - no solution, norm=%f", norm);
             return false;
         }
 
@@ -488,8 +527,8 @@
         }
     }
     if (DEBUG_STRATEGY) {
-        ALOGD("  - q=%s", matrixToString(&q[0][0], m, n, false /*rowMajor*/).c_str());
-        ALOGD("  - r=%s", matrixToString(&r[0][0], n, n, true /*rowMajor*/).c_str());
+        ALOGD("  - q=%s", matrixToString(&q[0][0], m, n, /*rowMajor=*/false).c_str());
+        ALOGD("  - r=%s", matrixToString(&r[0][0], n, n, /*rowMajor=*/true).c_str());
 
         // calculate QR, if we factored A correctly then QR should equal A
         float qr[n][m];
@@ -501,7 +540,7 @@
                 }
             }
         }
-        ALOGD("  - qr=%s", matrixToString(&qr[0][0], m, n, false /*rowMajor*/).c_str());
+        ALOGD("  - qr=%s", matrixToString(&qr[0][0], m, n, /*rowMajor=*/false).c_str());
     }
 
     // Solve R B = Qt W Y to find B.  This is easy because R is upper triangular.
@@ -518,9 +557,8 @@
         }
         outB[i] /= r[i][i];
     }
-    if (DEBUG_STRATEGY) {
-        ALOGD("  - b=%s", vectorToString(outB, n).c_str());
-    }
+
+    ALOGD_IF(DEBUG_STRATEGY, "  - b=%s", vectorToString(outB.data(), n).c_str());
 
     // Calculate the coefficient of determination as 1 - (SSerr / SStot) where
     // SSerr is the residual sum of squares (variance of the error),
@@ -546,11 +584,11 @@
         sstot += w[h] * w[h] * var * var;
     }
     *outDet = sstot > 0.000001f ? 1.0f - (sserr / sstot) : 1;
-    if (DEBUG_STRATEGY) {
-        ALOGD("  - sserr=%f", sserr);
-        ALOGD("  - sstot=%f", sstot);
-        ALOGD("  - det=%f", *outDet);
-    }
+
+    ALOGD_IF(DEBUG_STRATEGY, "  - sserr=%f", sserr);
+    ALOGD_IF(DEBUG_STRATEGY, "  - sstot=%f", sstot);
+    ALOGD_IF(DEBUG_STRATEGY, "  - det=%f", *outDet);
+
     return true;
 }
 
@@ -608,40 +646,47 @@
     return std::make_optional(std::array<float, 3>({c, b, a}));
 }
 
-bool LeastSquaresVelocityTrackerStrategy::getEstimator(uint32_t id,
-        VelocityTracker::Estimator* outEstimator) const {
-    outEstimator->clear();
-
+std::optional<VelocityTracker::Estimator> LeastSquaresVelocityTrackerStrategy::getEstimator(
+        int32_t pointerId) const {
+    const auto movementIt = mMovements.find(pointerId);
+    if (movementIt == mMovements.end()) {
+        return std::nullopt; // no data
+    }
     // Iterate over movement samples in reverse time order and collect samples.
-    std::vector<float> x;
-    std::vector<float> y;
+    std::vector<float> positions;
     std::vector<float> w;
     std::vector<float> time;
 
-    uint32_t index = mIndex;
-    const Movement& newestMovement = mMovements[mIndex];
+    uint32_t index = mIndex.at(pointerId);
+    const Movement& newestMovement = movementIt->second[index];
     do {
-        const Movement& movement = mMovements[index];
-        if (!movement.idBits.hasBit(id)) {
-            break;
-        }
+        const Movement& movement = movementIt->second[index];
 
         nsecs_t age = newestMovement.eventTime - movement.eventTime;
         if (age > HORIZON) {
             break;
         }
-
-        const VelocityTracker::Position& position = movement.getPosition(id);
-        x.push_back(position.x);
-        y.push_back(position.y);
-        w.push_back(chooseWeight(index));
+        if (movement.eventTime == 0 && index != 0) {
+            // All eventTime's are initialized to 0. In this fixed-width circular buffer, it's
+            // possible that not all entries are valid. We use a time=0 as a signal for those
+            // uninitialized values. If we encounter a time of 0 in a position
+            // that's > 0, it means that we hit the block where the data wasn't initialized.
+            // We still don't know whether the value at index=0, with eventTime=0 is valid.
+            // However, that's only possible when the value is by itself. So there's no hard in
+            // processing it anyways, since the velocity for a single point is zero, and this
+            // situation will only be encountered in artificial circumstances (in tests).
+            // In practice, time will never be 0.
+            break;
+        }
+        positions.push_back(movement.position);
+        w.push_back(chooseWeight(pointerId, index));
         time.push_back(-age * 0.000000001f);
         index = (index == 0 ? HISTORY_SIZE : index) - 1;
-    } while (x.size() < HISTORY_SIZE);
+    } while (positions.size() < HISTORY_SIZE);
 
-    const size_t m = x.size();
+    const size_t m = positions.size();
     if (m == 0) {
-        return false; // no data
+        return std::nullopt; // no data
     }
 
     // Calculate a least squares polynomial fit.
@@ -650,115 +695,116 @@
         degree = m - 1;
     }
 
-    if (degree == 2 && mWeighting == WEIGHTING_NONE) {
+    if (degree == 2 && mWeighting == Weighting::NONE) {
         // Optimize unweighted, quadratic polynomial fit
-        std::optional<std::array<float, 3>> xCoeff = solveUnweightedLeastSquaresDeg2(time, x);
-        std::optional<std::array<float, 3>> yCoeff = solveUnweightedLeastSquaresDeg2(time, y);
-        if (xCoeff && yCoeff) {
-            outEstimator->time = newestMovement.eventTime;
-            outEstimator->degree = 2;
-            outEstimator->confidence = 1;
-            for (size_t i = 0; i <= outEstimator->degree; i++) {
-                outEstimator->xCoeff[i] = (*xCoeff)[i];
-                outEstimator->yCoeff[i] = (*yCoeff)[i];
+        std::optional<std::array<float, 3>> coeff =
+                solveUnweightedLeastSquaresDeg2(time, positions);
+        if (coeff) {
+            VelocityTracker::Estimator estimator;
+            estimator.time = newestMovement.eventTime;
+            estimator.degree = 2;
+            estimator.confidence = 1;
+            for (size_t i = 0; i <= estimator.degree; i++) {
+                estimator.coeff[i] = (*coeff)[i];
             }
-            return true;
+            return estimator;
         }
     } else if (degree >= 1) {
         // General case for an Nth degree polynomial fit
-        float xdet, ydet;
+        float det;
         uint32_t n = degree + 1;
-        if (solveLeastSquares(time, x, w, n, outEstimator->xCoeff, &xdet) &&
-            solveLeastSquares(time, y, w, n, outEstimator->yCoeff, &ydet)) {
-            outEstimator->time = newestMovement.eventTime;
-            outEstimator->degree = degree;
-            outEstimator->confidence = xdet * ydet;
-            if (DEBUG_STRATEGY) {
-                ALOGD("estimate: degree=%d, xCoeff=%s, yCoeff=%s, confidence=%f",
-                      int(outEstimator->degree), vectorToString(outEstimator->xCoeff, n).c_str(),
-                      vectorToString(outEstimator->yCoeff, n).c_str(), outEstimator->confidence);
-            }
-            return true;
+        VelocityTracker::Estimator estimator;
+        if (solveLeastSquares(time, positions, w, n, estimator.coeff, &det)) {
+            estimator.time = newestMovement.eventTime;
+            estimator.degree = degree;
+            estimator.confidence = det;
+
+            ALOGD_IF(DEBUG_STRATEGY, "estimate: degree=%d, coeff=%s, confidence=%f",
+                     int(estimator.degree), vectorToString(estimator.coeff.data(), n).c_str(),
+                     estimator.confidence);
+
+            return estimator;
         }
     }
 
     // No velocity data available for this pointer, but we do have its current position.
-    outEstimator->xCoeff[0] = x[0];
-    outEstimator->yCoeff[0] = y[0];
-    outEstimator->time = newestMovement.eventTime;
-    outEstimator->degree = 0;
-    outEstimator->confidence = 1;
-    return true;
+    VelocityTracker::Estimator estimator;
+    estimator.coeff[0] = positions[0];
+    estimator.time = newestMovement.eventTime;
+    estimator.degree = 0;
+    estimator.confidence = 1;
+    return estimator;
 }
 
-float LeastSquaresVelocityTrackerStrategy::chooseWeight(uint32_t index) const {
+float LeastSquaresVelocityTrackerStrategy::chooseWeight(int32_t pointerId, uint32_t index) const {
+    const std::array<Movement, HISTORY_SIZE>& movements = mMovements.at(pointerId);
     switch (mWeighting) {
-    case WEIGHTING_DELTA: {
-        // Weight points based on how much time elapsed between them and the next
-        // point so that points that "cover" a shorter time span are weighed less.
-        //   delta  0ms: 0.5
-        //   delta 10ms: 1.0
-        if (index == mIndex) {
+        case Weighting::DELTA: {
+            // Weight points based on how much time elapsed between them and the next
+            // point so that points that "cover" a shorter time span are weighed less.
+            //   delta  0ms: 0.5
+            //   delta 10ms: 1.0
+            if (index == mIndex.at(pointerId)) {
+                return 1.0f;
+            }
+            uint32_t nextIndex = (index + 1) % HISTORY_SIZE;
+            float deltaMillis =
+                    (movements[nextIndex].eventTime - movements[index].eventTime) * 0.000001f;
+            if (deltaMillis < 0) {
+                return 0.5f;
+            }
+            if (deltaMillis < 10) {
+                return 0.5f + deltaMillis * 0.05;
+            }
             return 1.0f;
         }
-        uint32_t nextIndex = (index + 1) % HISTORY_SIZE;
-        float deltaMillis = (mMovements[nextIndex].eventTime- mMovements[index].eventTime)
-                * 0.000001f;
-        if (deltaMillis < 0) {
+
+        case Weighting::CENTRAL: {
+            // Weight points based on their age, weighing very recent and very old points less.
+            //   age  0ms: 0.5
+            //   age 10ms: 1.0
+            //   age 50ms: 1.0
+            //   age 60ms: 0.5
+            float ageMillis =
+                    (movements[mIndex.at(pointerId)].eventTime - movements[index].eventTime) *
+                    0.000001f;
+            if (ageMillis < 0) {
+                return 0.5f;
+            }
+            if (ageMillis < 10) {
+                return 0.5f + ageMillis * 0.05;
+            }
+            if (ageMillis < 50) {
+                return 1.0f;
+            }
+            if (ageMillis < 60) {
+                return 0.5f + (60 - ageMillis) * 0.05;
+            }
             return 0.5f;
         }
-        if (deltaMillis < 10) {
-            return 0.5f + deltaMillis * 0.05;
-        }
-        return 1.0f;
-    }
 
-    case WEIGHTING_CENTRAL: {
-        // Weight points based on their age, weighing very recent and very old points less.
-        //   age  0ms: 0.5
-        //   age 10ms: 1.0
-        //   age 50ms: 1.0
-        //   age 60ms: 0.5
-        float ageMillis = (mMovements[mIndex].eventTime - mMovements[index].eventTime)
-                * 0.000001f;
-        if (ageMillis < 0) {
+        case Weighting::RECENT: {
+            // Weight points based on their age, weighing older points less.
+            //   age   0ms: 1.0
+            //   age  50ms: 1.0
+            //   age 100ms: 0.5
+            float ageMillis =
+                    (movements[mIndex.at(pointerId)].eventTime - movements[index].eventTime) *
+                    0.000001f;
+            if (ageMillis < 50) {
+                return 1.0f;
+            }
+            if (ageMillis < 100) {
+                return 0.5f + (100 - ageMillis) * 0.01f;
+            }
             return 0.5f;
         }
-        if (ageMillis < 10) {
-            return 0.5f + ageMillis * 0.05;
-        }
-        if (ageMillis < 50) {
-            return 1.0f;
-        }
-        if (ageMillis < 60) {
-            return 0.5f + (60 - ageMillis) * 0.05;
-        }
-        return 0.5f;
-    }
 
-    case WEIGHTING_RECENT: {
-        // Weight points based on their age, weighing older points less.
-        //   age   0ms: 1.0
-        //   age  50ms: 1.0
-        //   age 100ms: 0.5
-        float ageMillis = (mMovements[mIndex].eventTime - mMovements[index].eventTime)
-                * 0.000001f;
-        if (ageMillis < 50) {
+        case Weighting::NONE:
             return 1.0f;
-        }
-        if (ageMillis < 100) {
-            return 0.5f + (100 - ageMillis) * 0.01f;
-        }
-        return 0.5f;
-    }
-
-    case WEIGHTING_NONE:
-    default:
-        return 1.0f;
     }
 }
 
-
 // --- IntegratingVelocityTrackerStrategy ---
 
 IntegratingVelocityTrackerStrategy::IntegratingVelocityTrackerStrategy(uint32_t degree) :
@@ -768,60 +814,46 @@
 IntegratingVelocityTrackerStrategy::~IntegratingVelocityTrackerStrategy() {
 }
 
-void IntegratingVelocityTrackerStrategy::clear() {
-    mPointerIdBits.clear();
+void IntegratingVelocityTrackerStrategy::clearPointer(int32_t pointerId) {
+    mPointerIdBits.clearBit(pointerId);
 }
 
-void IntegratingVelocityTrackerStrategy::clearPointers(BitSet32 idBits) {
-    mPointerIdBits.value &= ~idBits.value;
-}
-
-void IntegratingVelocityTrackerStrategy::addMovement(
-        nsecs_t eventTime, BitSet32 idBits,
-        const std::vector<VelocityTracker::Position>& positions) {
-    uint32_t index = 0;
-    for (BitSet32 iterIdBits(idBits); !iterIdBits.isEmpty();) {
-        uint32_t id = iterIdBits.clearFirstMarkedBit();
-        State& state = mPointerState[id];
-        const VelocityTracker::Position& position = positions[index++];
-        if (mPointerIdBits.hasBit(id)) {
-            updateState(state, eventTime, position.x, position.y);
-        } else {
-            initState(state, eventTime, position.x, position.y);
-        }
+void IntegratingVelocityTrackerStrategy::addMovement(nsecs_t eventTime, int32_t pointerId,
+                                                     float position) {
+    State& state = mPointerState[pointerId];
+    if (mPointerIdBits.hasBit(pointerId)) {
+        updateState(state, eventTime, position);
+    } else {
+        initState(state, eventTime, position);
     }
 
-    mPointerIdBits = idBits;
+    mPointerIdBits.markBit(pointerId);
 }
 
-bool IntegratingVelocityTrackerStrategy::getEstimator(uint32_t id,
-        VelocityTracker::Estimator* outEstimator) const {
-    outEstimator->clear();
-
-    if (mPointerIdBits.hasBit(id)) {
-        const State& state = mPointerState[id];
-        populateEstimator(state, outEstimator);
-        return true;
+std::optional<VelocityTracker::Estimator> IntegratingVelocityTrackerStrategy::getEstimator(
+        int32_t pointerId) const {
+    if (mPointerIdBits.hasBit(pointerId)) {
+        const State& state = mPointerState[pointerId];
+        VelocityTracker::Estimator estimator;
+        populateEstimator(state, &estimator);
+        return estimator;
     }
 
-    return false;
+    return std::nullopt;
 }
 
-void IntegratingVelocityTrackerStrategy::initState(State& state,
-        nsecs_t eventTime, float xpos, float ypos) const {
+void IntegratingVelocityTrackerStrategy::initState(State& state, nsecs_t eventTime,
+                                                   float pos) const {
     state.updateTime = eventTime;
     state.degree = 0;
 
-    state.xpos = xpos;
-    state.xvel = 0;
-    state.xaccel = 0;
-    state.ypos = ypos;
-    state.yvel = 0;
-    state.yaccel = 0;
+    state.pos = pos;
+    state.accel = 0;
+    state.vel = 0;
 }
 
-void IntegratingVelocityTrackerStrategy::updateState(State& state,
-        nsecs_t eventTime, float xpos, float ypos) const {
+void IntegratingVelocityTrackerStrategy::updateState(State& state, nsecs_t eventTime,
+                                                     float pos) const {
     const nsecs_t MIN_TIME_DELTA = 2 * NANOS_PER_MS;
     const float FILTER_TIME_CONSTANT = 0.010f; // 10 milliseconds
 
@@ -832,34 +864,26 @@
     float dt = (eventTime - state.updateTime) * 0.000000001f;
     state.updateTime = eventTime;
 
-    float xvel = (xpos - state.xpos) / dt;
-    float yvel = (ypos - state.ypos) / dt;
+    float vel = (pos - state.pos) / dt;
     if (state.degree == 0) {
-        state.xvel = xvel;
-        state.yvel = yvel;
+        state.vel = vel;
         state.degree = 1;
     } else {
         float alpha = dt / (FILTER_TIME_CONSTANT + dt);
         if (mDegree == 1) {
-            state.xvel += (xvel - state.xvel) * alpha;
-            state.yvel += (yvel - state.yvel) * alpha;
+            state.vel += (vel - state.vel) * alpha;
         } else {
-            float xaccel = (xvel - state.xvel) / dt;
-            float yaccel = (yvel - state.yvel) / dt;
+            float accel = (vel - state.vel) / dt;
             if (state.degree == 1) {
-                state.xaccel = xaccel;
-                state.yaccel = yaccel;
+                state.accel = accel;
                 state.degree = 2;
             } else {
-                state.xaccel += (xaccel - state.xaccel) * alpha;
-                state.yaccel += (yaccel - state.yaccel) * alpha;
+                state.accel += (accel - state.accel) * alpha;
             }
-            state.xvel += (state.xaccel * dt) * alpha;
-            state.yvel += (state.yaccel * dt) * alpha;
+            state.vel += (state.accel * dt) * alpha;
         }
     }
-    state.xpos = xpos;
-    state.ypos = ypos;
+    state.pos = pos;
 }
 
 void IntegratingVelocityTrackerStrategy::populateEstimator(const State& state,
@@ -867,68 +891,68 @@
     outEstimator->time = state.updateTime;
     outEstimator->confidence = 1.0f;
     outEstimator->degree = state.degree;
-    outEstimator->xCoeff[0] = state.xpos;
-    outEstimator->xCoeff[1] = state.xvel;
-    outEstimator->xCoeff[2] = state.xaccel / 2;
-    outEstimator->yCoeff[0] = state.ypos;
-    outEstimator->yCoeff[1] = state.yvel;
-    outEstimator->yCoeff[2] = state.yaccel / 2;
+    outEstimator->coeff[0] = state.pos;
+    outEstimator->coeff[1] = state.vel;
+    outEstimator->coeff[2] = state.accel / 2;
 }
 
 
 // --- LegacyVelocityTrackerStrategy ---
 
-LegacyVelocityTrackerStrategy::LegacyVelocityTrackerStrategy() {
-    clear();
-}
+LegacyVelocityTrackerStrategy::LegacyVelocityTrackerStrategy() {}
 
 LegacyVelocityTrackerStrategy::~LegacyVelocityTrackerStrategy() {
 }
 
-void LegacyVelocityTrackerStrategy::clear() {
-    mIndex = 0;
-    mMovements[0].idBits.clear();
+void LegacyVelocityTrackerStrategy::clearPointer(int32_t pointerId) {
+    mIndex.erase(pointerId);
+    mMovements.erase(pointerId);
 }
 
-void LegacyVelocityTrackerStrategy::clearPointers(BitSet32 idBits) {
-    BitSet32 remainingIdBits(mMovements[mIndex].idBits.value & ~idBits.value);
-    mMovements[mIndex].idBits = remainingIdBits;
-}
-
-void LegacyVelocityTrackerStrategy::addMovement(
-        nsecs_t eventTime, BitSet32 idBits,
-        const std::vector<VelocityTracker::Position>& positions) {
-    if (++mIndex == HISTORY_SIZE) {
-        mIndex = 0;
+void LegacyVelocityTrackerStrategy::addMovement(nsecs_t eventTime, int32_t pointerId,
+                                                float position) {
+    // If data for this pointer already exists, we have a valid entry at the position of
+    // mIndex[pointerId] and mMovements[pointerId]. In that case, we need to advance the index
+    // to the next position in the circular buffer and write the new Movement there. Otherwise,
+    // if this is a first movement for this pointer, we initialize the maps mIndex and mMovements
+    // for this pointer and write to the first position.
+    auto [movementIt, inserted] = mMovements.insert({pointerId, {}});
+    auto [indexIt, _] = mIndex.insert({pointerId, 0});
+    size_t& index = indexIt->second;
+    if (!inserted && movementIt->second[index].eventTime != eventTime) {
+        // When ACTION_POINTER_DOWN happens, we will first receive ACTION_MOVE with the coordinates
+        // of the existing pointers, and then ACTION_POINTER_DOWN with the coordinates that include
+        // the new pointer. If the eventtimes for both events are identical, just update the data
+        // for this time.
+        // We only compare against the last value, as it is likely that addMovement is called
+        // in chronological order as events occur.
+        index++;
+    }
+    if (index == HISTORY_SIZE) {
+        index = 0;
     }
 
-    Movement& movement = mMovements[mIndex];
+    Movement& movement = movementIt->second[index];
     movement.eventTime = eventTime;
-    movement.idBits = idBits;
-    uint32_t count = idBits.count();
-    for (uint32_t i = 0; i < count; i++) {
-        movement.positions[i] = positions[i];
-    }
+    movement.position = position;
 }
 
-bool LegacyVelocityTrackerStrategy::getEstimator(uint32_t id,
-        VelocityTracker::Estimator* outEstimator) const {
-    outEstimator->clear();
-
-    const Movement& newestMovement = mMovements[mIndex];
-    if (!newestMovement.idBits.hasBit(id)) {
-        return false; // no data
+std::optional<VelocityTracker::Estimator> LegacyVelocityTrackerStrategy::getEstimator(
+        int32_t pointerId) const {
+    const auto movementIt = mMovements.find(pointerId);
+    if (movementIt == mMovements.end()) {
+        return std::nullopt; // no data
     }
+    const Movement& newestMovement = movementIt->second[mIndex.at(pointerId)];
 
     // Find the oldest sample that contains the pointer and that is not older than HORIZON.
     nsecs_t minTime = newestMovement.eventTime - HORIZON;
-    uint32_t oldestIndex = mIndex;
+    uint32_t oldestIndex = mIndex.at(pointerId);
     uint32_t numTouches = 1;
     do {
         uint32_t nextOldestIndex = (oldestIndex == 0 ? HISTORY_SIZE : oldestIndex) - 1;
-        const Movement& nextOldestMovement = mMovements[nextOldestIndex];
-        if (!nextOldestMovement.idBits.hasBit(id)
-                || nextOldestMovement.eventTime < minTime) {
+        const Movement& nextOldestMovement = mMovements.at(pointerId)[nextOldestIndex];
+        if (nextOldestMovement.eventTime < minTime) {
             break;
         }
         oldestIndex = nextOldestIndex;
@@ -945,94 +969,87 @@
     // overestimate the velocity at that time point.  Most samples might be measured
     // 16ms apart but some consecutive samples could be only 0.5sm apart because
     // the hardware or driver reports them irregularly or in bursts.
-    float accumVx = 0;
-    float accumVy = 0;
+    float accumV = 0;
     uint32_t index = oldestIndex;
     uint32_t samplesUsed = 0;
-    const Movement& oldestMovement = mMovements[oldestIndex];
-    const VelocityTracker::Position& oldestPosition = oldestMovement.getPosition(id);
+    const Movement& oldestMovement = mMovements.at(pointerId)[oldestIndex];
+    float oldestPosition = oldestMovement.position;
     nsecs_t lastDuration = 0;
 
     while (numTouches-- > 1) {
         if (++index == HISTORY_SIZE) {
             index = 0;
         }
-        const Movement& movement = mMovements[index];
+        const Movement& movement = mMovements.at(pointerId)[index];
         nsecs_t duration = movement.eventTime - oldestMovement.eventTime;
 
         // If the duration between samples is small, we may significantly overestimate
         // the velocity.  Consequently, we impose a minimum duration constraint on the
         // samples that we include in the calculation.
         if (duration >= MIN_DURATION) {
-            const VelocityTracker::Position& position = movement.getPosition(id);
+            float position = movement.position;
             float scale = 1000000000.0f / duration; // one over time delta in seconds
-            float vx = (position.x - oldestPosition.x) * scale;
-            float vy = (position.y - oldestPosition.y) * scale;
-            accumVx = (accumVx * lastDuration + vx * duration) / (duration + lastDuration);
-            accumVy = (accumVy * lastDuration + vy * duration) / (duration + lastDuration);
+            float v = (position - oldestPosition) * scale;
+            accumV = (accumV * lastDuration + v * duration) / (duration + lastDuration);
             lastDuration = duration;
             samplesUsed += 1;
         }
     }
 
     // Report velocity.
-    const VelocityTracker::Position& newestPosition = newestMovement.getPosition(id);
-    outEstimator->time = newestMovement.eventTime;
-    outEstimator->confidence = 1;
-    outEstimator->xCoeff[0] = newestPosition.x;
-    outEstimator->yCoeff[0] = newestPosition.y;
+    float newestPosition = newestMovement.position;
+    VelocityTracker::Estimator estimator;
+    estimator.time = newestMovement.eventTime;
+    estimator.confidence = 1;
+    estimator.coeff[0] = newestPosition;
     if (samplesUsed) {
-        outEstimator->xCoeff[1] = accumVx;
-        outEstimator->yCoeff[1] = accumVy;
-        outEstimator->degree = 1;
+        estimator.coeff[1] = accumV;
+        estimator.degree = 1;
     } else {
-        outEstimator->degree = 0;
+        estimator.degree = 0;
     }
-    return true;
+    return estimator;
 }
 
 // --- ImpulseVelocityTrackerStrategy ---
 
-ImpulseVelocityTrackerStrategy::ImpulseVelocityTrackerStrategy() {
-    clear();
-}
+ImpulseVelocityTrackerStrategy::ImpulseVelocityTrackerStrategy(bool deltaValues)
+      : mDeltaValues(deltaValues) {}
 
 ImpulseVelocityTrackerStrategy::~ImpulseVelocityTrackerStrategy() {
 }
 
-void ImpulseVelocityTrackerStrategy::clear() {
-    mIndex = 0;
-    mMovements[0].idBits.clear();
+void ImpulseVelocityTrackerStrategy::clearPointer(int32_t pointerId) {
+    mIndex.erase(pointerId);
+    mMovements.erase(pointerId);
 }
 
-void ImpulseVelocityTrackerStrategy::clearPointers(BitSet32 idBits) {
-    BitSet32 remainingIdBits(mMovements[mIndex].idBits.value & ~idBits.value);
-    mMovements[mIndex].idBits = remainingIdBits;
-}
-
-void ImpulseVelocityTrackerStrategy::addMovement(
-        nsecs_t eventTime, BitSet32 idBits,
-        const std::vector<VelocityTracker::Position>& positions) {
-    if (mMovements[mIndex].eventTime != eventTime) {
+void ImpulseVelocityTrackerStrategy::addMovement(nsecs_t eventTime, int32_t pointerId,
+                                                 float position) {
+    // If data for this pointer already exists, we have a valid entry at the position of
+    // mIndex[pointerId] and mMovements[pointerId]. In that case, we need to advance the index
+    // to the next position in the circular buffer and write the new Movement there. Otherwise,
+    // if this is a first movement for this pointer, we initialize the maps mIndex and mMovements
+    // for this pointer and write to the first position.
+    auto [movementIt, inserted] = mMovements.insert({pointerId, {}});
+    auto [indexIt, _] = mIndex.insert({pointerId, 0});
+    size_t& index = indexIt->second;
+    if (!inserted && movementIt->second[index].eventTime != eventTime) {
         // When ACTION_POINTER_DOWN happens, we will first receive ACTION_MOVE with the coordinates
         // of the existing pointers, and then ACTION_POINTER_DOWN with the coordinates that include
         // the new pointer. If the eventtimes for both events are identical, just update the data
         // for this time.
         // We only compare against the last value, as it is likely that addMovement is called
         // in chronological order as events occur.
-        mIndex++;
+        index++;
     }
-    if (mIndex == HISTORY_SIZE) {
-        mIndex = 0;
+    if (index == HISTORY_SIZE) {
+        index = 0;
     }
 
-    Movement& movement = mMovements[mIndex];
+    Movement& movement = movementIt->second[index];
     movement.eventTime = eventTime;
-    movement.idBits = idBits;
-    uint32_t count = idBits.count();
-    for (uint32_t i = 0; i < count; i++) {
-        movement.positions[i] = positions[i];
-    }
+    movement.position = position;
 }
 
 /**
@@ -1109,7 +1126,8 @@
     return (work < 0 ? -1.0 : 1.0) * sqrtf(fabsf(work)) * sqrt2;
 }
 
-static float calculateImpulseVelocity(const nsecs_t* t, const float* x, size_t count) {
+static float calculateImpulseVelocity(const nsecs_t* t, const float* x, size_t count,
+                                      bool deltaValues) {
     // The input should be in reversed time order (most recent sample at index i=0)
     // t[i] is in nanoseconds, but due to FP arithmetic, convert to seconds inside this function
     static constexpr float SECONDS_PER_NANO = 1E-9;
@@ -1120,12 +1138,26 @@
     if (t[1] > t[0]) { // Algorithm will still work, but not perfectly
         ALOGE("Samples provided to calculateImpulseVelocity in the wrong order");
     }
+
+    // If the data values are delta values, we do not have to calculate deltas here.
+    // We can use the delta values directly, along with the calculated time deltas.
+    // Since the data value input is in reversed time order:
+    //      [a] for non-delta inputs, instantenous velocity = (x[i] - x[i-1])/(t[i] - t[i-1])
+    //      [b] for delta inputs, instantenous velocity = -x[i-1]/(t[i] - t[i - 1])
+    // e.g., let the non-delta values are: V = [2, 3, 7], the equivalent deltas are D = [2, 1, 4].
+    // Since the input is in reversed time order, the input values for this function would be
+    // V'=[7, 3, 2] and D'=[4, 1, 2] for the non-delta and delta values, respectively.
+    //
+    // The equivalent of {(V'[2] - V'[1]) = 2 - 3 = -1} would be {-D'[1] = -1}
+    // Similarly, the equivalent of {(V'[1] - V'[0]) = 3 - 7 = -4} would be {-D'[0] = -4}
+
     if (count == 2) { // if 2 points, basic linear calculation
         if (t[1] == t[0]) {
             ALOGE("Events have identical time stamps t=%" PRId64 ", setting velocity = 0", t[0]);
             return 0;
         }
-        return (x[1] - x[0]) / (SECONDS_PER_NANO * (t[1] - t[0]));
+        const float deltaX = deltaValues ? -x[0] : x[1] - x[0];
+        return deltaX / (SECONDS_PER_NANO * (t[1] - t[0]));
     }
     // Guaranteed to have at least 3 points here
     float work = 0;
@@ -1135,7 +1167,8 @@
             continue;
         }
         float vprev = kineticEnergyToVelocity(work); // v[i-1]
-        float vcurr = (x[i] - x[i-1]) / (SECONDS_PER_NANO * (t[i] - t[i-1])); // v[i]
+        const float deltaX = deltaValues ? -x[i-1] : x[i] - x[i-1];
+        float vcurr = deltaX / (SECONDS_PER_NANO * (t[i] - t[i-1])); // v[i]
         work += (vcurr - vprev) * fabsf(vcurr);
         if (i == count - 1) {
             work *= 0.5; // initial condition, case 2) above
@@ -1144,69 +1177,70 @@
     return kineticEnergyToVelocity(work);
 }
 
-bool ImpulseVelocityTrackerStrategy::getEstimator(uint32_t id,
-        VelocityTracker::Estimator* outEstimator) const {
-    outEstimator->clear();
+std::optional<VelocityTracker::Estimator> ImpulseVelocityTrackerStrategy::getEstimator(
+        int32_t pointerId) const {
+    const auto movementIt = mMovements.find(pointerId);
+    if (movementIt == mMovements.end()) {
+        return std::nullopt; // no data
+    }
 
     // Iterate over movement samples in reverse time order and collect samples.
-    float x[HISTORY_SIZE];
-    float y[HISTORY_SIZE];
+    float positions[HISTORY_SIZE];
     nsecs_t time[HISTORY_SIZE];
     size_t m = 0; // number of points that will be used for fitting
-    size_t index = mIndex;
-    const Movement& newestMovement = mMovements[mIndex];
+    size_t index = mIndex.at(pointerId);
+    const Movement& newestMovement = movementIt->second[index];
     do {
-        const Movement& movement = mMovements[index];
-        if (!movement.idBits.hasBit(id)) {
-            break;
-        }
+        const Movement& movement = movementIt->second[index];
 
         nsecs_t age = newestMovement.eventTime - movement.eventTime;
         if (age > HORIZON) {
             break;
         }
+        if (movement.eventTime == 0 && index != 0) {
+            // All eventTime's are initialized to 0. If we encounter a time of 0 in a position
+            // that's >0, it means that we hit the block where the data wasn't initialized.
+            // It's also possible that the sample at 0 would be invalid, but there's no harm in
+            // processing it, since it would be just a single point, and will only be encountered
+            // in artificial circumstances (in tests).
+            break;
+        }
 
-        const VelocityTracker::Position& position = movement.getPosition(id);
-        x[m] = position.x;
-        y[m] = position.y;
+        positions[m] = movement.position;
         time[m] = movement.eventTime;
         index = (index == 0 ? HISTORY_SIZE : index) - 1;
     } while (++m < HISTORY_SIZE);
 
     if (m == 0) {
-        return false; // no data
+        return std::nullopt; // no data
     }
-    outEstimator->xCoeff[0] = 0;
-    outEstimator->yCoeff[0] = 0;
-    outEstimator->xCoeff[1] = calculateImpulseVelocity(time, x, m);
-    outEstimator->yCoeff[1] = calculateImpulseVelocity(time, y, m);
-    outEstimator->xCoeff[2] = 0;
-    outEstimator->yCoeff[2] = 0;
-    outEstimator->time = newestMovement.eventTime;
-    outEstimator->degree = 2; // similar results to 2nd degree fit
-    outEstimator->confidence = 1;
-    if (DEBUG_STRATEGY) {
-        ALOGD("velocity: (%.1f, %.1f)", outEstimator->xCoeff[1], outEstimator->yCoeff[1]);
-    }
+    VelocityTracker::Estimator estimator;
+    estimator.coeff[0] = 0;
+    estimator.coeff[1] = calculateImpulseVelocity(time, positions, m, mDeltaValues);
+    estimator.coeff[2] = 0;
+
+    estimator.time = newestMovement.eventTime;
+    estimator.degree = 2; // similar results to 2nd degree fit
+    estimator.confidence = 1;
+
+    ALOGD_IF(DEBUG_STRATEGY, "velocity: %.1f", estimator.coeff[1]);
+
     if (DEBUG_IMPULSE) {
         // TODO(b/134179997): delete this block once the switch to 'impulse' is complete.
-        // Calculate the lsq2 velocity for the same inputs to allow runtime comparisons
+        // Calculate the lsq2 velocity for the same inputs to allow runtime comparisons.
+        // X axis chosen arbitrarily for velocity comparisons.
         VelocityTracker lsq2(VelocityTracker::Strategy::LSQ2);
-        BitSet32 idBits;
-        const uint32_t pointerId = 0;
-        idBits.markBit(pointerId);
         for (ssize_t i = m - 1; i >= 0; i--) {
-            lsq2.addMovement(time[i], idBits, {{x[i], y[i]}});
+            lsq2.addMovement(time[i], pointerId, AMOTION_EVENT_AXIS_X, positions[i]);
         }
-        float outVx = 0, outVy = 0;
-        const bool computed = lsq2.getVelocity(pointerId, &outVx, &outVy);
-        if (computed) {
-            ALOGD("lsq2 velocity: (%.1f, %.1f)", outVx, outVy);
+        std::optional<float> v = lsq2.getVelocity(AMOTION_EVENT_AXIS_X, pointerId);
+        if (v) {
+            ALOGD("lsq2 velocity: %.1f", *v);
         } else {
             ALOGD("lsq2 velocity: could not compute velocity");
         }
     }
-    return true;
+    return estimator;
 }
 
 } // namespace android
diff --git a/libs/input/VirtualInputDevice.cpp b/libs/input/VirtualInputDevice.cpp
new file mode 100644
index 0000000..9a459b1
--- /dev/null
+++ b/libs/input/VirtualInputDevice.cpp
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "VirtualInputDevice"
+
+#include <android/input.h>
+#include <android/keycodes.h>
+#include <fcntl.h>
+#include <input/Input.h>
+#include <input/VirtualInputDevice.h>
+#include <linux/uinput.h>
+#include <math.h>
+#include <utils/Log.h>
+
+#include <map>
+#include <string>
+
+using android::base::unique_fd;
+
+/**
+ * Log debug messages about native virtual input devices.
+ * Enable this via "adb shell setprop log.tag.VirtualInputDevice DEBUG"
+ */
+static bool isDebug() {
+    return __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG, ANDROID_LOG_INFO);
+}
+
+namespace android {
+VirtualInputDevice::VirtualInputDevice(unique_fd fd) : mFd(std::move(fd)) {}
+VirtualInputDevice::~VirtualInputDevice() {
+    ioctl(mFd, UI_DEV_DESTROY);
+}
+
+bool VirtualInputDevice::writeInputEvent(uint16_t type, uint16_t code, int32_t value,
+                                         std::chrono::nanoseconds eventTime) {
+    std::chrono::seconds seconds = std::chrono::duration_cast<std::chrono::seconds>(eventTime);
+    std::chrono::microseconds microseconds =
+            std::chrono::duration_cast<std::chrono::microseconds>(eventTime - seconds);
+    struct input_event ev = {.type = type, .code = code, .value = value};
+    ev.input_event_sec = static_cast<decltype(ev.input_event_sec)>(seconds.count());
+    ev.input_event_usec = static_cast<decltype(ev.input_event_usec)>(microseconds.count());
+
+    return TEMP_FAILURE_RETRY(write(mFd, &ev, sizeof(struct input_event))) == sizeof(ev);
+}
+
+/** Utility method to write keyboard key events or mouse button events. */
+bool VirtualInputDevice::writeEvKeyEvent(int32_t androidCode, int32_t androidAction,
+                                         const std::map<int, int>& evKeyCodeMapping,
+                                         const std::map<int, UinputAction>& actionMapping,
+                                         std::chrono::nanoseconds eventTime) {
+    auto evKeyCodeIterator = evKeyCodeMapping.find(androidCode);
+    if (evKeyCodeIterator == evKeyCodeMapping.end()) {
+        ALOGE("Unsupported native EV keycode for android code %d", androidCode);
+        return false;
+    }
+    auto actionIterator = actionMapping.find(androidAction);
+    if (actionIterator == actionMapping.end()) {
+        return false;
+    }
+    if (!writeInputEvent(EV_KEY, static_cast<uint16_t>(evKeyCodeIterator->second),
+                         static_cast<int32_t>(actionIterator->second), eventTime)) {
+        return false;
+    }
+    if (!writeInputEvent(EV_SYN, SYN_REPORT, 0, eventTime)) {
+        return false;
+    }
+    return true;
+}
+
+// --- VirtualKeyboard ---
+const std::map<int, UinputAction> VirtualKeyboard::KEY_ACTION_MAPPING = {
+        {AKEY_EVENT_ACTION_DOWN, UinputAction::PRESS},
+        {AKEY_EVENT_ACTION_UP, UinputAction::RELEASE},
+};
+// Keycode mapping from https://source.android.com/devices/input/keyboard-devices
+const std::map<int, int> VirtualKeyboard::KEY_CODE_MAPPING = {
+        {AKEYCODE_0, KEY_0},
+        {AKEYCODE_1, KEY_1},
+        {AKEYCODE_2, KEY_2},
+        {AKEYCODE_3, KEY_3},
+        {AKEYCODE_4, KEY_4},
+        {AKEYCODE_5, KEY_5},
+        {AKEYCODE_6, KEY_6},
+        {AKEYCODE_7, KEY_7},
+        {AKEYCODE_8, KEY_8},
+        {AKEYCODE_9, KEY_9},
+        {AKEYCODE_A, KEY_A},
+        {AKEYCODE_B, KEY_B},
+        {AKEYCODE_C, KEY_C},
+        {AKEYCODE_D, KEY_D},
+        {AKEYCODE_E, KEY_E},
+        {AKEYCODE_F, KEY_F},
+        {AKEYCODE_G, KEY_G},
+        {AKEYCODE_H, KEY_H},
+        {AKEYCODE_I, KEY_I},
+        {AKEYCODE_J, KEY_J},
+        {AKEYCODE_K, KEY_K},
+        {AKEYCODE_L, KEY_L},
+        {AKEYCODE_M, KEY_M},
+        {AKEYCODE_N, KEY_N},
+        {AKEYCODE_O, KEY_O},
+        {AKEYCODE_P, KEY_P},
+        {AKEYCODE_Q, KEY_Q},
+        {AKEYCODE_R, KEY_R},
+        {AKEYCODE_S, KEY_S},
+        {AKEYCODE_T, KEY_T},
+        {AKEYCODE_U, KEY_U},
+        {AKEYCODE_V, KEY_V},
+        {AKEYCODE_W, KEY_W},
+        {AKEYCODE_X, KEY_X},
+        {AKEYCODE_Y, KEY_Y},
+        {AKEYCODE_Z, KEY_Z},
+        {AKEYCODE_GRAVE, KEY_GRAVE},
+        {AKEYCODE_MINUS, KEY_MINUS},
+        {AKEYCODE_EQUALS, KEY_EQUAL},
+        {AKEYCODE_LEFT_BRACKET, KEY_LEFTBRACE},
+        {AKEYCODE_RIGHT_BRACKET, KEY_RIGHTBRACE},
+        {AKEYCODE_BACKSLASH, KEY_BACKSLASH},
+        {AKEYCODE_SEMICOLON, KEY_SEMICOLON},
+        {AKEYCODE_APOSTROPHE, KEY_APOSTROPHE},
+        {AKEYCODE_COMMA, KEY_COMMA},
+        {AKEYCODE_PERIOD, KEY_DOT},
+        {AKEYCODE_SLASH, KEY_SLASH},
+        {AKEYCODE_ALT_LEFT, KEY_LEFTALT},
+        {AKEYCODE_ALT_RIGHT, KEY_RIGHTALT},
+        {AKEYCODE_CTRL_LEFT, KEY_LEFTCTRL},
+        {AKEYCODE_CTRL_RIGHT, KEY_RIGHTCTRL},
+        {AKEYCODE_SHIFT_LEFT, KEY_LEFTSHIFT},
+        {AKEYCODE_SHIFT_RIGHT, KEY_RIGHTSHIFT},
+        {AKEYCODE_META_LEFT, KEY_LEFTMETA},
+        {AKEYCODE_META_RIGHT, KEY_RIGHTMETA},
+        {AKEYCODE_CAPS_LOCK, KEY_CAPSLOCK},
+        {AKEYCODE_SCROLL_LOCK, KEY_SCROLLLOCK},
+        {AKEYCODE_NUM_LOCK, KEY_NUMLOCK},
+        {AKEYCODE_ENTER, KEY_ENTER},
+        {AKEYCODE_TAB, KEY_TAB},
+        {AKEYCODE_SPACE, KEY_SPACE},
+        {AKEYCODE_DPAD_DOWN, KEY_DOWN},
+        {AKEYCODE_DPAD_UP, KEY_UP},
+        {AKEYCODE_DPAD_LEFT, KEY_LEFT},
+        {AKEYCODE_DPAD_RIGHT, KEY_RIGHT},
+        {AKEYCODE_MOVE_END, KEY_END},
+        {AKEYCODE_MOVE_HOME, KEY_HOME},
+        {AKEYCODE_PAGE_DOWN, KEY_PAGEDOWN},
+        {AKEYCODE_PAGE_UP, KEY_PAGEUP},
+        {AKEYCODE_DEL, KEY_BACKSPACE},
+        {AKEYCODE_FORWARD_DEL, KEY_DELETE},
+        {AKEYCODE_INSERT, KEY_INSERT},
+        {AKEYCODE_ESCAPE, KEY_ESC},
+        {AKEYCODE_BREAK, KEY_PAUSE},
+        {AKEYCODE_F1, KEY_F1},
+        {AKEYCODE_F2, KEY_F2},
+        {AKEYCODE_F3, KEY_F3},
+        {AKEYCODE_F4, KEY_F4},
+        {AKEYCODE_F5, KEY_F5},
+        {AKEYCODE_F6, KEY_F6},
+        {AKEYCODE_F7, KEY_F7},
+        {AKEYCODE_F8, KEY_F8},
+        {AKEYCODE_F9, KEY_F9},
+        {AKEYCODE_F10, KEY_F10},
+        {AKEYCODE_F11, KEY_F11},
+        {AKEYCODE_F12, KEY_F12},
+        {AKEYCODE_BACK, KEY_BACK},
+        {AKEYCODE_FORWARD, KEY_FORWARD},
+        {AKEYCODE_NUMPAD_1, KEY_KP1},
+        {AKEYCODE_NUMPAD_2, KEY_KP2},
+        {AKEYCODE_NUMPAD_3, KEY_KP3},
+        {AKEYCODE_NUMPAD_4, KEY_KP4},
+        {AKEYCODE_NUMPAD_5, KEY_KP5},
+        {AKEYCODE_NUMPAD_6, KEY_KP6},
+        {AKEYCODE_NUMPAD_7, KEY_KP7},
+        {AKEYCODE_NUMPAD_8, KEY_KP8},
+        {AKEYCODE_NUMPAD_9, KEY_KP9},
+        {AKEYCODE_NUMPAD_0, KEY_KP0},
+        {AKEYCODE_NUMPAD_ADD, KEY_KPPLUS},
+        {AKEYCODE_NUMPAD_SUBTRACT, KEY_KPMINUS},
+        {AKEYCODE_NUMPAD_MULTIPLY, KEY_KPASTERISK},
+        {AKEYCODE_NUMPAD_DIVIDE, KEY_KPSLASH},
+        {AKEYCODE_NUMPAD_DOT, KEY_KPDOT},
+        {AKEYCODE_NUMPAD_ENTER, KEY_KPENTER},
+        {AKEYCODE_NUMPAD_EQUALS, KEY_KPEQUAL},
+        {AKEYCODE_NUMPAD_COMMA, KEY_KPCOMMA},
+};
+VirtualKeyboard::VirtualKeyboard(unique_fd fd) : VirtualInputDevice(std::move(fd)) {}
+VirtualKeyboard::~VirtualKeyboard() {}
+
+bool VirtualKeyboard::writeKeyEvent(int32_t androidKeyCode, int32_t androidAction,
+                                    std::chrono::nanoseconds eventTime) {
+    return writeEvKeyEvent(androidKeyCode, androidAction, KEY_CODE_MAPPING, KEY_ACTION_MAPPING,
+                           eventTime);
+}
+
+// --- VirtualDpad ---
+// Dpad keycode mapping from https://source.android.com/devices/input/keyboard-devices
+const std::map<int, int> VirtualDpad::DPAD_KEY_CODE_MAPPING = {
+        // clang-format off
+        {AKEYCODE_DPAD_DOWN, KEY_DOWN},
+        {AKEYCODE_DPAD_UP, KEY_UP},
+        {AKEYCODE_DPAD_LEFT, KEY_LEFT},
+        {AKEYCODE_DPAD_RIGHT, KEY_RIGHT},
+        {AKEYCODE_DPAD_CENTER, KEY_SELECT},
+        {AKEYCODE_BACK, KEY_BACK},
+        // clang-format on
+};
+
+VirtualDpad::VirtualDpad(unique_fd fd) : VirtualInputDevice(std::move(fd)) {}
+
+VirtualDpad::~VirtualDpad() {}
+
+bool VirtualDpad::writeDpadKeyEvent(int32_t androidKeyCode, int32_t androidAction,
+                                    std::chrono::nanoseconds eventTime) {
+    return writeEvKeyEvent(androidKeyCode, androidAction, DPAD_KEY_CODE_MAPPING,
+                           VirtualKeyboard::KEY_ACTION_MAPPING, eventTime);
+}
+
+// --- VirtualMouse ---
+const std::map<int, UinputAction> VirtualMouse::BUTTON_ACTION_MAPPING = {
+        {AMOTION_EVENT_ACTION_BUTTON_PRESS, UinputAction::PRESS},
+        {AMOTION_EVENT_ACTION_BUTTON_RELEASE, UinputAction::RELEASE},
+};
+
+// Button code mapping from https://source.android.com/devices/input/touch-devices
+const std::map<int, int> VirtualMouse::BUTTON_CODE_MAPPING = {
+        // clang-format off
+        {AMOTION_EVENT_BUTTON_PRIMARY, BTN_LEFT},
+        {AMOTION_EVENT_BUTTON_SECONDARY, BTN_RIGHT},
+        {AMOTION_EVENT_BUTTON_TERTIARY, BTN_MIDDLE},
+        {AMOTION_EVENT_BUTTON_BACK, BTN_BACK},
+        {AMOTION_EVENT_BUTTON_FORWARD, BTN_FORWARD},
+        // clang-format on
+};
+
+VirtualMouse::VirtualMouse(unique_fd fd) : VirtualInputDevice(std::move(fd)) {}
+
+VirtualMouse::~VirtualMouse() {}
+
+bool VirtualMouse::writeButtonEvent(int32_t androidButtonCode, int32_t androidAction,
+                                    std::chrono::nanoseconds eventTime) {
+    return writeEvKeyEvent(androidButtonCode, androidAction, BUTTON_CODE_MAPPING,
+                           BUTTON_ACTION_MAPPING, eventTime);
+}
+
+bool VirtualMouse::writeRelativeEvent(float relativeX, float relativeY,
+                                      std::chrono::nanoseconds eventTime) {
+    return writeInputEvent(EV_REL, REL_X, relativeX, eventTime) &&
+            writeInputEvent(EV_REL, REL_Y, relativeY, eventTime) &&
+            writeInputEvent(EV_SYN, SYN_REPORT, 0, eventTime);
+}
+
+bool VirtualMouse::writeScrollEvent(float xAxisMovement, float yAxisMovement,
+                                    std::chrono::nanoseconds eventTime) {
+    return writeInputEvent(EV_REL, REL_HWHEEL, xAxisMovement, eventTime) &&
+            writeInputEvent(EV_REL, REL_WHEEL, yAxisMovement, eventTime) &&
+            writeInputEvent(EV_SYN, SYN_REPORT, 0, eventTime);
+}
+
+// --- VirtualTouchscreen ---
+const std::map<int, UinputAction> VirtualTouchscreen::TOUCH_ACTION_MAPPING = {
+        {AMOTION_EVENT_ACTION_DOWN, UinputAction::PRESS},
+        {AMOTION_EVENT_ACTION_UP, UinputAction::RELEASE},
+        {AMOTION_EVENT_ACTION_MOVE, UinputAction::MOVE},
+        {AMOTION_EVENT_ACTION_CANCEL, UinputAction::CANCEL},
+};
+// Tool type mapping from https://source.android.com/devices/input/touch-devices
+const std::map<int, int> VirtualTouchscreen::TOOL_TYPE_MAPPING = {
+        {AMOTION_EVENT_TOOL_TYPE_FINGER, MT_TOOL_FINGER},
+        {AMOTION_EVENT_TOOL_TYPE_PALM, MT_TOOL_PALM},
+};
+
+VirtualTouchscreen::VirtualTouchscreen(unique_fd fd) : VirtualInputDevice(std::move(fd)) {}
+
+VirtualTouchscreen::~VirtualTouchscreen() {}
+
+bool VirtualTouchscreen::isValidPointerId(int32_t pointerId, UinputAction uinputAction) {
+    if (pointerId < -1 || pointerId >= (int)MAX_POINTERS) {
+        ALOGE("Virtual touch event has invalid pointer id %d; value must be between -1 and %zu",
+              pointerId, MAX_POINTERS - 0);
+        return false;
+    }
+
+    if (uinputAction == UinputAction::PRESS && mActivePointers.test(pointerId)) {
+        ALOGE("Repetitive action DOWN event received on a pointer %d that is already down.",
+              pointerId);
+        return false;
+    }
+    if (uinputAction == UinputAction::RELEASE && !mActivePointers.test(pointerId)) {
+        ALOGE("PointerId %d action UP received with no prior action DOWN on touchscreen %d.",
+              pointerId, mFd.get());
+        return false;
+    }
+    return true;
+}
+
+bool VirtualTouchscreen::writeTouchEvent(int32_t pointerId, int32_t toolType, int32_t action,
+                                         float locationX, float locationY, float pressure,
+                                         float majorAxisSize, std::chrono::nanoseconds eventTime) {
+    auto actionIterator = TOUCH_ACTION_MAPPING.find(action);
+    if (actionIterator == TOUCH_ACTION_MAPPING.end()) {
+        return false;
+    }
+    UinputAction uinputAction = actionIterator->second;
+    if (!isValidPointerId(pointerId, uinputAction)) {
+        return false;
+    }
+    if (!writeInputEvent(EV_ABS, ABS_MT_SLOT, pointerId, eventTime)) {
+        return false;
+    }
+    auto toolTypeIterator = TOOL_TYPE_MAPPING.find(toolType);
+    if (toolTypeIterator == TOOL_TYPE_MAPPING.end()) {
+        return false;
+    }
+    if (!writeInputEvent(EV_ABS, ABS_MT_TOOL_TYPE, static_cast<int32_t>(toolTypeIterator->second),
+                         eventTime)) {
+        return false;
+    }
+    if (uinputAction == UinputAction::PRESS && !handleTouchDown(pointerId, eventTime)) {
+        return false;
+    }
+    if (uinputAction == UinputAction::RELEASE && !handleTouchUp(pointerId, eventTime)) {
+        return false;
+    }
+    if (!writeInputEvent(EV_ABS, ABS_MT_POSITION_X, locationX, eventTime)) {
+        return false;
+    }
+    if (!writeInputEvent(EV_ABS, ABS_MT_POSITION_Y, locationY, eventTime)) {
+        return false;
+    }
+    if (!isnan(pressure)) {
+        if (!writeInputEvent(EV_ABS, ABS_MT_PRESSURE, pressure, eventTime)) {
+            return false;
+        }
+    }
+    if (!isnan(majorAxisSize)) {
+        if (!writeInputEvent(EV_ABS, ABS_MT_TOUCH_MAJOR, majorAxisSize, eventTime)) {
+            return false;
+        }
+    }
+    return writeInputEvent(EV_SYN, SYN_REPORT, 0, eventTime);
+}
+
+bool VirtualTouchscreen::handleTouchUp(int32_t pointerId, std::chrono::nanoseconds eventTime) {
+    if (!writeInputEvent(EV_ABS, ABS_MT_TRACKING_ID, static_cast<int32_t>(-1), eventTime)) {
+        return false;
+    }
+    // When a pointer is no longer in touch, remove the pointer id from the corresponding
+    // entry in the unreleased touches map.
+    mActivePointers.reset(pointerId);
+    ALOGD_IF(isDebug(), "Pointer %d erased from the touchscreen %d", pointerId, mFd.get());
+
+    // Only sends the BTN UP event when there's no pointers on the touchscreen.
+    if (mActivePointers.none()) {
+        if (!writeInputEvent(EV_KEY, BTN_TOUCH, static_cast<int32_t>(UinputAction::RELEASE),
+                             eventTime)) {
+            return false;
+        }
+        ALOGD_IF(isDebug(), "No pointers on touchscreen %d, BTN UP event sent.", mFd.get());
+    }
+    return true;
+}
+
+bool VirtualTouchscreen::handleTouchDown(int32_t pointerId, std::chrono::nanoseconds eventTime) {
+    // When a new pointer is down on the touchscreen, add the pointer id in the corresponding
+    // entry in the unreleased touches map.
+    if (mActivePointers.none()) {
+        // Only sends the BTN Down event when the first pointer on the touchscreen is down.
+        if (!writeInputEvent(EV_KEY, BTN_TOUCH, static_cast<int32_t>(UinputAction::PRESS),
+                             eventTime)) {
+            return false;
+        }
+        ALOGD_IF(isDebug(), "First pointer %d down under touchscreen %d, BTN DOWN event sent",
+                 pointerId, mFd.get());
+    }
+
+    mActivePointers.set(pointerId);
+    ALOGD_IF(isDebug(), "Added pointer %d under touchscreen %d in the map", pointerId, mFd.get());
+    if (!writeInputEvent(EV_ABS, ABS_MT_TRACKING_ID, static_cast<int32_t>(pointerId), eventTime)) {
+        return false;
+    }
+    return true;
+}
+
+} // namespace android
diff --git a/libs/input/android/os/IInputConstants.aidl b/libs/input/android/os/IInputConstants.aidl
index 5ce10a4..dab843b 100644
--- a/libs/input/android/os/IInputConstants.aidl
+++ b/libs/input/android/os/IInputConstants.aidl
@@ -35,6 +35,14 @@
     const int INVALID_INPUT_EVENT_ID = 0;
 
     /**
+     * Every input device has an id. This constant value is used when a valid input device id is not
+     * available.
+     * The virtual keyboard uses -1 as the input device id. Therefore, we use -2 as the value for
+     * an invalid input device.
+     */
+    const int INVALID_INPUT_DEVICE_ID = -2;
+
+    /**
      * The input event was injected from accessibility. Used in policyFlags for input event
      * injection.
      */
diff --git a/libs/input/android/os/InputConfig.aidl b/libs/input/android/os/InputConfig.aidl
index 6d1b396..4e644ff 100644
--- a/libs/input/android/os/InputConfig.aidl
+++ b/libs/input/android/os/InputConfig.aidl
@@ -144,4 +144,10 @@
      * It is not valid to set this configuration if {@link #TRUSTED_OVERLAY} is not set.
      */
     INTERCEPTS_STYLUS            = 1 << 15,
+
+    /**
+     * The window is a clone of another window. This may be treated differently since there's
+     * likely a duplicate window with the same client token, but different bounds.
+     */
+    CLONE                        = 1 << 16,
 }
diff --git a/libs/input/android/os/InputEventInjectionResult.aidl b/libs/input/android/os/InputEventInjectionResult.aidl
index 3bc7068..e80c2a5 100644
--- a/libs/input/android/os/InputEventInjectionResult.aidl
+++ b/libs/input/android/os/InputEventInjectionResult.aidl
@@ -37,4 +37,7 @@
 
     /* Injection failed due to a timeout. */
     TIMED_OUT = 3,
+
+    ftl_first=PENDING,
+    ftl_last=TIMED_OUT,
 }
diff --git a/libs/input/android/os/InputEventInjectionSync.aidl b/libs/input/android/os/InputEventInjectionSync.aidl
index 95d24cb..2d225fa 100644
--- a/libs/input/android/os/InputEventInjectionSync.aidl
+++ b/libs/input/android/os/InputEventInjectionSync.aidl
@@ -33,4 +33,7 @@
 
     /* Waits for the input event to be completely processed. */
     WAIT_FOR_FINISHED = 2,
+
+    ftl_first = NONE,
+    ftl_last = WAIT_FOR_FINISHED,
 }
diff --git a/libs/input/tests/Android.bp b/libs/input/tests/Android.bp
index d947cd9..42bdf57 100644
--- a/libs/input/tests/Android.bp
+++ b/libs/input/tests/Android.bp
@@ -10,35 +10,62 @@
 
 cc_test {
     name: "libinput_tests",
+    cpp_std: "c++20",
+    host_supported: true,
     srcs: [
         "IdGenerator_test.cpp",
         "InputChannel_test.cpp",
         "InputDevice_test.cpp",
         "InputEvent_test.cpp",
         "InputPublisherAndConsumer_test.cpp",
+        "MotionPredictor_test.cpp",
+        "RingBuffer_test.cpp",
+        "TfLiteMotionPredictor_test.cpp",
+        "TouchResampling_test.cpp",
         "TouchVideoFrame_test.cpp",
         "VelocityTracker_test.cpp",
         "VerifiedInputEvent_test.cpp",
     ],
+    header_libs: [
+        "flatbuffer_headers",
+        "tensorflow_headers",
+    ],
     static_libs: [
+        "libgmock",
         "libgui_window_info_static",
         "libinput",
+        "libtflite_static",
+        "libui-types",
     ],
     cflags: [
         "-Wall",
         "-Wextra",
         "-Werror",
+        "-Wno-unused-parameter",
     ],
+    sanitize: {
+        undefined: true,
+        all_undefined: true,
+        diag: {
+            undefined: true,
+        },
+    },
     shared_libs: [
         "libbase",
         "libbinder",
         "libcutils",
         "liblog",
-        "libui",
+        "libPlatformProperties",
         "libutils",
         "libvintf",
     ],
-    data: ["data/*"],
+    data: [
+        "data/*",
+        ":motion_predictor_model.fb",
+    ],
+    test_options: {
+        unit_test: true,
+    },
     test_suites: ["device-tests"],
 }
 
@@ -59,7 +86,6 @@
         "libcutils",
         "libutils",
         "libbinder",
-        "libui",
         "libbase",
     ],
 }
diff --git a/libs/input/tests/InputDevice_test.cpp b/libs/input/tests/InputDevice_test.cpp
index e872fa4..ee961f0 100644
--- a/libs/input/tests/InputDevice_test.cpp
+++ b/libs/input/tests/InputDevice_test.cpp
@@ -65,6 +65,9 @@
     }
 
     void SetUp() override {
+#if !defined(__ANDROID__)
+        GTEST_SKIP() << "b/253299089 Generic files are currently read directly from device.";
+#endif
         loadKeyLayout("Generic");
         loadKeyCharacterMap("Generic");
     }
@@ -130,7 +133,24 @@
     ASSERT_EQ(*mKeyMap.keyCharacterMap, *frenchOverlaidKeyCharacterMap);
 }
 
+TEST_F(InputDeviceKeyMapTest, keyCharacteMapBadAxisLabel) {
+    std::string klPath = base::GetExecutableDirectory() + "/data/bad_axis_label.kl";
+
+    base::Result<std::shared_ptr<KeyLayoutMap>> ret = KeyLayoutMap::load(klPath);
+    ASSERT_FALSE(ret.ok()) << "Should not be able to load KeyLayout at " << klPath;
+}
+
+TEST_F(InputDeviceKeyMapTest, keyCharacteMapBadLedLabel) {
+    std::string klPath = base::GetExecutableDirectory() + "/data/bad_led_label.kl";
+
+    base::Result<std::shared_ptr<KeyLayoutMap>> ret = KeyLayoutMap::load(klPath);
+    ASSERT_FALSE(ret.ok()) << "Should not be able to load KeyLayout at " << klPath;
+}
+
 TEST(InputDeviceKeyLayoutTest, DoesNotLoadWhenRequiredKernelConfigIsMissing) {
+#if !defined(__ANDROID__)
+    GTEST_SKIP() << "Can't check kernel configs on host";
+#endif
     std::string klPath = base::GetExecutableDirectory() + "/data/kl_with_required_fake_config.kl";
     base::Result<std::shared_ptr<KeyLayoutMap>> ret = KeyLayoutMap::load(klPath);
     ASSERT_FALSE(ret.ok()) << "Should not be able to load KeyLayout at " << klPath;
@@ -139,6 +159,9 @@
 }
 
 TEST(InputDeviceKeyLayoutTest, LoadsWhenRequiredKernelConfigIsPresent) {
+#if !defined(__ANDROID__)
+    GTEST_SKIP() << "Can't check kernel configs on host";
+#endif
     std::string klPath = base::GetExecutableDirectory() + "/data/kl_with_required_real_config.kl";
     base::Result<std::shared_ptr<KeyLayoutMap>> ret = KeyLayoutMap::load(klPath);
     ASSERT_TRUE(ret.ok()) << "Cannot load KeyLayout at " << klPath;
diff --git a/libs/input/tests/InputEvent_test.cpp b/libs/input/tests/InputEvent_test.cpp
index 597b389..a965573 100644
--- a/libs/input/tests/InputEvent_test.cpp
+++ b/libs/input/tests/InputEvent_test.cpp
@@ -48,6 +48,7 @@
     coords.clear();
 
     ASSERT_EQ(0ULL, coords.bits);
+    ASSERT_FALSE(coords.isResampled);
 }
 
 TEST_F(PointerCoordsTest, AxisValues) {
@@ -160,11 +161,13 @@
     outCoords.readFromParcel(&parcel);
 
     ASSERT_EQ(0ULL, outCoords.bits);
+    ASSERT_FALSE(outCoords.isResampled);
 
     // Round trip with some values.
     parcel.freeData();
     inCoords.setAxisValue(2, 5);
     inCoords.setAxisValue(5, 8);
+    inCoords.isResampled = true;
 
     inCoords.writeToParcel(&parcel);
     parcel.setDataPosition(0);
@@ -173,6 +176,7 @@
     ASSERT_EQ(outCoords.bits, inCoords.bits);
     ASSERT_EQ(outCoords.values[0], inCoords.values[0]);
     ASSERT_EQ(outCoords.values[1], inCoords.values[1]);
+    ASSERT_TRUE(outCoords.isResampled);
 }
 
 
@@ -193,7 +197,7 @@
                      ARBITRARY_DOWN_TIME, ARBITRARY_EVENT_TIME);
 
     ASSERT_EQ(id, event.getId());
-    ASSERT_EQ(AINPUT_EVENT_TYPE_KEY, event.getType());
+    ASSERT_EQ(InputEventType::KEY, event.getType());
     ASSERT_EQ(2, event.getDeviceId());
     ASSERT_EQ(AINPUT_SOURCE_GAMEPAD, event.getSource());
     ASSERT_EQ(DISPLAY_ID, event.getDisplayId());
@@ -255,10 +259,10 @@
 
     mPointerProperties[0].clear();
     mPointerProperties[0].id = 1;
-    mPointerProperties[0].toolType = AMOTION_EVENT_TOOL_TYPE_FINGER;
+    mPointerProperties[0].toolType = ToolType::FINGER;
     mPointerProperties[1].clear();
     mPointerProperties[1].id = 2;
-    mPointerProperties[1].toolType = AMOTION_EVENT_TOOL_TYPE_STYLUS;
+    mPointerProperties[1].toolType = ToolType::STYLUS;
 
     mSamples[0].pointerCoords[0].clear();
     mSamples[0].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_X, 10);
@@ -270,6 +274,7 @@
     mSamples[0].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_TOOL_MAJOR, 16);
     mSamples[0].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_TOOL_MINOR, 17);
     mSamples[0].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_ORIENTATION, 18);
+    mSamples[0].pointerCoords[0].isResampled = true;
     mSamples[0].pointerCoords[1].clear();
     mSamples[0].pointerCoords[1].setAxisValue(AMOTION_EVENT_AXIS_X, 20);
     mSamples[0].pointerCoords[1].setAxisValue(AMOTION_EVENT_AXIS_Y, 21);
@@ -291,6 +296,7 @@
     mSamples[1].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_TOOL_MAJOR, 116);
     mSamples[1].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_TOOL_MINOR, 117);
     mSamples[1].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_ORIENTATION, 118);
+    mSamples[1].pointerCoords[0].isResampled = true;
     mSamples[1].pointerCoords[1].clear();
     mSamples[1].pointerCoords[1].setAxisValue(AMOTION_EVENT_AXIS_X, 120);
     mSamples[1].pointerCoords[1].setAxisValue(AMOTION_EVENT_AXIS_Y, 121);
@@ -301,6 +307,7 @@
     mSamples[1].pointerCoords[1].setAxisValue(AMOTION_EVENT_AXIS_TOOL_MAJOR, 126);
     mSamples[1].pointerCoords[1].setAxisValue(AMOTION_EVENT_AXIS_TOOL_MINOR, 127);
     mSamples[1].pointerCoords[1].setAxisValue(AMOTION_EVENT_AXIS_ORIENTATION, 128);
+    mSamples[1].pointerCoords[1].isResampled = true;
 
     mSamples[2].pointerCoords[0].clear();
     mSamples[2].pointerCoords[0].setAxisValue(AMOTION_EVENT_AXIS_X, 210);
@@ -339,7 +346,7 @@
 void MotionEventTest::assertEqualsEventWithHistory(const MotionEvent* event) {
     // Check properties.
     ASSERT_EQ(mId, event->getId());
-    ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, event->getType());
+    ASSERT_EQ(InputEventType::MOTION, event->getType());
     ASSERT_EQ(2, event->getDeviceId());
     ASSERT_EQ(AINPUT_SOURCE_TOUCHSCREEN, event->getSource());
     ASSERT_EQ(DISPLAY_ID, event->getDisplayId());
@@ -359,9 +366,9 @@
 
     ASSERT_EQ(2U, event->getPointerCount());
     ASSERT_EQ(1, event->getPointerId(0));
-    ASSERT_EQ(AMOTION_EVENT_TOOL_TYPE_FINGER, event->getToolType(0));
+    ASSERT_EQ(ToolType::FINGER, event->getToolType(0));
     ASSERT_EQ(2, event->getPointerId(1));
-    ASSERT_EQ(AMOTION_EVENT_TOOL_TYPE_STYLUS, event->getToolType(1));
+    ASSERT_EQ(ToolType::STYLUS, event->getToolType(1));
 
     ASSERT_EQ(2U, event->getHistorySize());
 
@@ -485,6 +492,13 @@
     ASSERT_EQ(toScaledOrientation(128), event->getHistoricalOrientation(1, 1));
     ASSERT_EQ(toScaledOrientation(218), event->getOrientation(0));
     ASSERT_EQ(toScaledOrientation(228), event->getOrientation(1));
+
+    ASSERT_TRUE(event->isResampled(0, 0));
+    ASSERT_FALSE(event->isResampled(1, 0));
+    ASSERT_TRUE(event->isResampled(0, 1));
+    ASSERT_TRUE(event->isResampled(1, 1));
+    ASSERT_FALSE(event->isResampled(0, 2));
+    ASSERT_FALSE(event->isResampled(1, 2));
 }
 
 TEST_F(MotionEventTest, Properties) {
@@ -517,7 +531,7 @@
     initializeEventWithHistory(&event);
 
     MotionEvent copy;
-    copy.copyFrom(&event, true /*keepHistory*/);
+    copy.copyFrom(&event, /*keepHistory=*/true);
 
     ASSERT_NO_FATAL_FAILURE(assertEqualsEventWithHistory(&event));
 }
@@ -527,7 +541,7 @@
     initializeEventWithHistory(&event);
 
     MotionEvent copy;
-    copy.copyFrom(&event, false /*keepHistory*/);
+    copy.copyFrom(&event, /*keepHistory=*/false);
 
     ASSERT_EQ(event.getPointerCount(), copy.getPointerCount());
     ASSERT_EQ(0U, copy.getHistorySize());
@@ -628,12 +642,12 @@
     }
     MotionEvent event;
     ui::Transform identityTransform;
-    event.initialize(InputEvent::nextId(), 0 /*deviceId*/, AINPUT_SOURCE_TOUCHSCREEN, DISPLAY_ID,
-                     INVALID_HMAC, AMOTION_EVENT_ACTION_MOVE, 0 /*actionButton*/, 0 /*flags*/,
-                     AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, 0 /*buttonState*/,
-                     MotionClassification::NONE, identityTransform, 0 /*xPrecision*/,
-                     0 /*yPrecision*/, 3 + RADIUS /*xCursorPosition*/, 2 /*yCursorPosition*/,
-                     identityTransform, 0 /*downTime*/, 0 /*eventTime*/, pointerCount,
+    event.initialize(InputEvent::nextId(), /*deviceId=*/0, AINPUT_SOURCE_TOUCHSCREEN, DISPLAY_ID,
+                     INVALID_HMAC, AMOTION_EVENT_ACTION_MOVE, /*actionButton=*/0, /*flags=*/0,
+                     AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, /*buttonState=*/0,
+                     MotionClassification::NONE, identityTransform, /*xPrecision=*/0,
+                     /*yPrecision=*/0, /*xCursorPosition=*/3 + RADIUS, /*yCursorPosition=*/2,
+                     identityTransform, /*downTime=*/0, /*eventTime=*/0, pointerCount,
                      pointerProperties, pointerCoords);
     float originalRawX = 0 + 3;
     float originalRawY = -RADIUS + 2;
@@ -678,7 +692,7 @@
 MotionEvent createMotionEvent(int32_t source, uint32_t action, float x, float y, float dx, float dy,
                               const ui::Transform& transform, const ui::Transform& rawTransform) {
     std::vector<PointerProperties> pointerProperties;
-    pointerProperties.push_back(PointerProperties{/* id */ 0, AMOTION_EVENT_TOOL_TYPE_FINGER});
+    pointerProperties.push_back(PointerProperties{/*id=*/0, ToolType::FINGER});
     std::vector<PointerCoords> pointerCoords;
     pointerCoords.emplace_back().clear();
     pointerCoords.back().setAxisValue(AMOTION_EVENT_AXIS_X, x);
@@ -834,12 +848,12 @@
 
     ui::Transform identityTransform;
     for (MotionClassification classification : classifications) {
-        event.initialize(InputEvent::nextId(), 0 /*deviceId*/, AINPUT_SOURCE_TOUCHSCREEN,
+        event.initialize(InputEvent::nextId(), /*deviceId=*/0, AINPUT_SOURCE_TOUCHSCREEN,
                          DISPLAY_ID, INVALID_HMAC, AMOTION_EVENT_ACTION_DOWN, 0, 0,
                          AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, 0, classification,
                          identityTransform, 0, 0, AMOTION_EVENT_INVALID_CURSOR_POSITION,
-                         AMOTION_EVENT_INVALID_CURSOR_POSITION, identityTransform, 0 /*downTime*/,
-                         0 /*eventTime*/, pointerCount, pointerProperties, pointerCoords);
+                         AMOTION_EVENT_INVALID_CURSOR_POSITION, identityTransform, /*downTime=*/0,
+                         /*eventTime=*/0, pointerCount, pointerProperties, pointerCoords);
         ASSERT_EQ(classification, event.getClassification());
     }
 }
@@ -856,11 +870,11 @@
     }
 
     ui::Transform identityTransform;
-    event.initialize(InputEvent::nextId(), 0 /*deviceId*/, AINPUT_SOURCE_MOUSE, DISPLAY_ID,
+    event.initialize(InputEvent::nextId(), /*deviceId=*/0, AINPUT_SOURCE_MOUSE, DISPLAY_ID,
                      INVALID_HMAC, AMOTION_EVENT_ACTION_DOWN, 0, 0, AMOTION_EVENT_EDGE_FLAG_NONE,
                      AMETA_NONE, 0, MotionClassification::NONE, identityTransform, 0, 0,
-                     280 /*xCursorPosition*/, 540 /*yCursorPosition*/, identityTransform,
-                     0 /*downTime*/, 0 /*eventTime*/, pointerCount, pointerProperties,
+                     /*xCursorPosition=*/280, /*yCursorPosition=*/540, identityTransform,
+                     /*downTime=*/0, /*eventTime=*/0, pointerCount, pointerProperties,
                      pointerCoords);
     event.offsetLocation(20, 60);
     ASSERT_EQ(280, event.getRawXCursorPosition());
diff --git a/libs/input/tests/InputPublisherAndConsumer_test.cpp b/libs/input/tests/InputPublisherAndConsumer_test.cpp
index 8393e99..3ecf8ee 100644
--- a/libs/input/tests/InputPublisherAndConsumer_test.cpp
+++ b/libs/input/tests/InputPublisherAndConsumer_test.cpp
@@ -16,17 +16,10 @@
 
 #include "TestHelpers.h"
 
-#include <unistd.h>
-#include <sys/mman.h>
-#include <time.h>
-
 #include <attestation/HmacKeyManager.h>
-#include <cutils/ashmem.h>
 #include <gtest/gtest.h>
 #include <gui/constants.h>
 #include <input/InputTransport.h>
-#include <utils/StopWatch.h>
-#include <utils/Timers.h>
 
 using android::base::Result;
 
@@ -99,14 +92,13 @@
 
     uint32_t consumeSeq;
     InputEvent* event;
-    status = mConsumer->consume(&mEventFactory, true /*consumeBatches*/, -1, &consumeSeq, &event);
+    status = mConsumer->consume(&mEventFactory, /*consumeBatches=*/true, -1, &consumeSeq, &event);
     ASSERT_EQ(OK, status)
             << "consumer consume should return OK";
 
     ASSERT_TRUE(event != nullptr)
             << "consumer should have returned non-NULL event";
-    ASSERT_EQ(AINPUT_EVENT_TYPE_KEY, event->getType())
-            << "consumer should have returned a key event";
+    ASSERT_EQ(InputEventType::KEY, event->getType()) << "consumer should have returned a key event";
 
     KeyEvent* keyEvent = static_cast<KeyEvent*>(event);
     EXPECT_EQ(seq, consumeSeq);
@@ -179,7 +171,7 @@
     for (size_t i = 0; i < pointerCount; i++) {
         pointerProperties[i].clear();
         pointerProperties[i].id = (i + 2) % pointerCount;
-        pointerProperties[i].toolType = AMOTION_EVENT_TOOL_TYPE_FINGER;
+        pointerProperties[i].toolType = ToolType::FINGER;
 
         pointerCoords[i].clear();
         pointerCoords[i].setAxisValue(AMOTION_EVENT_AXIS_X, 100 * i);
@@ -208,13 +200,13 @@
 
     uint32_t consumeSeq;
     InputEvent* event;
-    status = mConsumer->consume(&mEventFactory, true /*consumeBatches*/, -1, &consumeSeq, &event);
+    status = mConsumer->consume(&mEventFactory, /*consumeBatches=*/true, -1, &consumeSeq, &event);
     ASSERT_EQ(OK, status)
             << "consumer consume should return OK";
 
     ASSERT_TRUE(event != nullptr)
             << "consumer should have returned non-NULL event";
-    ASSERT_EQ(AINPUT_EVENT_TYPE_MOTION, event->getType())
+    ASSERT_EQ(InputEventType::MOTION, event->getType())
             << "consumer should have returned a motion event";
 
     MotionEvent* motionEvent = static_cast<MotionEvent*>(event);
@@ -301,11 +293,11 @@
 
     uint32_t consumeSeq;
     InputEvent* event;
-    status = mConsumer->consume(&mEventFactory, true /*consumeBatches*/, -1, &consumeSeq, &event);
+    status = mConsumer->consume(&mEventFactory, /*consumeBatches=*/true, -1, &consumeSeq, &event);
     ASSERT_EQ(OK, status) << "consumer consume should return OK";
 
     ASSERT_TRUE(event != nullptr) << "consumer should have returned non-NULL event";
-    ASSERT_EQ(AINPUT_EVENT_TYPE_FOCUS, event->getType())
+    ASSERT_EQ(InputEventType::FOCUS, event->getType())
             << "consumer should have returned a focus event";
 
     FocusEvent* focusEvent = static_cast<FocusEvent*>(event);
@@ -342,11 +334,11 @@
 
     uint32_t consumeSeq;
     InputEvent* event;
-    status = mConsumer->consume(&mEventFactory, true /*consumeBatches*/, -1, &consumeSeq, &event);
+    status = mConsumer->consume(&mEventFactory, /*consumeBatches=*/true, -1, &consumeSeq, &event);
     ASSERT_EQ(OK, status) << "consumer consume should return OK";
 
     ASSERT_TRUE(event != nullptr) << "consumer should have returned non-NULL event";
-    ASSERT_EQ(AINPUT_EVENT_TYPE_CAPTURE, event->getType())
+    ASSERT_EQ(InputEventType::CAPTURE, event->getType())
             << "consumer should have returned a capture event";
 
     const CaptureEvent* captureEvent = static_cast<CaptureEvent*>(event);
@@ -384,11 +376,11 @@
 
     uint32_t consumeSeq;
     InputEvent* event;
-    status = mConsumer->consume(&mEventFactory, true /*consumeBatches*/, -1, &consumeSeq, &event);
+    status = mConsumer->consume(&mEventFactory, /*consumeBatches=*/true, -1, &consumeSeq, &event);
     ASSERT_EQ(OK, status) << "consumer consume should return OK";
 
     ASSERT_TRUE(event != nullptr) << "consumer should have returned non-NULL event";
-    ASSERT_EQ(AINPUT_EVENT_TYPE_DRAG, event->getType())
+    ASSERT_EQ(InputEventType::DRAG, event->getType())
             << "consumer should have returned a drag event";
 
     const DragEvent& dragEvent = static_cast<const DragEvent&>(*event);
@@ -426,11 +418,11 @@
 
     uint32_t consumeSeq;
     InputEvent* event;
-    status = mConsumer->consume(&mEventFactory, true /*consumeBatches*/, -1, &consumeSeq, &event);
+    status = mConsumer->consume(&mEventFactory, /*consumeBatches=*/true, -1, &consumeSeq, &event);
     ASSERT_EQ(OK, status) << "consumer consume should return OK";
 
     ASSERT_TRUE(event != nullptr) << "consumer should have returned non-NULL event";
-    ASSERT_EQ(AINPUT_EVENT_TYPE_TOUCH_MODE, event->getType())
+    ASSERT_EQ(InputEventType::TOUCH_MODE, event->getType())
             << "consumer should have returned a touch mode event";
 
     const TouchModeEvent& touchModeEvent = static_cast<const TouchModeEvent&>(*event);
diff --git a/libs/input/tests/MotionPredictor_test.cpp b/libs/input/tests/MotionPredictor_test.cpp
new file mode 100644
index 0000000..7a62f5e
--- /dev/null
+++ b/libs/input/tests/MotionPredictor_test.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <chrono>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <gui/constants.h>
+#include <input/Input.h>
+#include <input/MotionPredictor.h>
+
+using namespace std::literals::chrono_literals;
+
+namespace android {
+
+using ::testing::IsEmpty;
+using ::testing::SizeIs;
+using ::testing::UnorderedElementsAre;
+
+constexpr int32_t DOWN = AMOTION_EVENT_ACTION_DOWN;
+constexpr int32_t MOVE = AMOTION_EVENT_ACTION_MOVE;
+constexpr int32_t UP = AMOTION_EVENT_ACTION_UP;
+constexpr nsecs_t NSEC_PER_MSEC = 1'000'000;
+
+static MotionEvent getMotionEvent(int32_t action, float x, float y,
+                                  std::chrono::nanoseconds eventTime, int32_t deviceId = 0) {
+    MotionEvent event;
+    constexpr size_t pointerCount = 1;
+    std::vector<PointerProperties> pointerProperties;
+    std::vector<PointerCoords> pointerCoords;
+    for (size_t i = 0; i < pointerCount; i++) {
+        PointerProperties properties;
+        properties.clear();
+        properties.id = i;
+        properties.toolType = ToolType::STYLUS;
+        pointerProperties.push_back(properties);
+        PointerCoords coords;
+        coords.clear();
+        coords.setAxisValue(AMOTION_EVENT_AXIS_X, x);
+        coords.setAxisValue(AMOTION_EVENT_AXIS_Y, y);
+        pointerCoords.push_back(coords);
+    }
+
+    ui::Transform identityTransform;
+    event.initialize(InputEvent::nextId(), deviceId, AINPUT_SOURCE_STYLUS, ADISPLAY_ID_DEFAULT, {0},
+                     action, /*actionButton=*/0, /*flags=*/0, AMOTION_EVENT_EDGE_FLAG_NONE,
+                     AMETA_NONE, /*buttonState=*/0, MotionClassification::NONE, identityTransform,
+                     /*xPrecision=*/0.1,
+                     /*yPrecision=*/0.2, /*xCursorPosition=*/280, /*yCursorPosition=*/540,
+                     identityTransform, /*downTime=*/100, eventTime.count(), pointerCount,
+                     pointerProperties.data(), pointerCoords.data());
+    return event;
+}
+
+TEST(MotionPredictorTest, IsPredictionAvailable) {
+    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
+                              []() { return true /*enable prediction*/; });
+    ASSERT_TRUE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_STYLUS));
+    ASSERT_FALSE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_TOUCHSCREEN));
+}
+
+TEST(MotionPredictorTest, Offset) {
+    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/1,
+                              []() { return true /*enable prediction*/; });
+    predictor.record(getMotionEvent(DOWN, 0, 1, 30ms));
+    predictor.record(getMotionEvent(MOVE, 0, 2, 35ms));
+    std::unique_ptr<MotionEvent> predicted = predictor.predict(40 * NSEC_PER_MSEC);
+    ASSERT_NE(nullptr, predicted);
+    ASSERT_GE(predicted->getEventTime(), 41);
+}
+
+TEST(MotionPredictorTest, FollowsGesture) {
+    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
+                              []() { return true /*enable prediction*/; });
+
+    // MOVE without a DOWN is ignored.
+    predictor.record(getMotionEvent(MOVE, 1, 3, 10ms));
+    EXPECT_EQ(nullptr, predictor.predict(20 * NSEC_PER_MSEC));
+
+    predictor.record(getMotionEvent(DOWN, 2, 5, 20ms));
+    predictor.record(getMotionEvent(MOVE, 2, 7, 30ms));
+    predictor.record(getMotionEvent(MOVE, 3, 9, 40ms));
+    EXPECT_NE(nullptr, predictor.predict(50 * NSEC_PER_MSEC));
+
+    predictor.record(getMotionEvent(UP, 4, 11, 50ms));
+    EXPECT_EQ(nullptr, predictor.predict(20 * NSEC_PER_MSEC));
+}
+
+TEST(MotionPredictorTest, MultipleDevicesNotSupported) {
+    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
+                              []() { return true /*enable prediction*/; });
+
+    ASSERT_TRUE(predictor.record(getMotionEvent(DOWN, 1, 3, 0ms, /*deviceId=*/0)).ok());
+    ASSERT_TRUE(predictor.record(getMotionEvent(MOVE, 1, 3, 10ms, /*deviceId=*/0)).ok());
+    ASSERT_TRUE(predictor.record(getMotionEvent(MOVE, 2, 5, 20ms, /*deviceId=*/0)).ok());
+    ASSERT_TRUE(predictor.record(getMotionEvent(MOVE, 3, 7, 30ms, /*deviceId=*/0)).ok());
+
+    ASSERT_FALSE(predictor.record(getMotionEvent(DOWN, 100, 300, 40ms, /*deviceId=*/1)).ok());
+    ASSERT_FALSE(predictor.record(getMotionEvent(MOVE, 100, 300, 50ms, /*deviceId=*/1)).ok());
+}
+
+TEST(MotionPredictorTest, IndividualGesturesFromDifferentDevicesAreSupported) {
+    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
+                              []() { return true /*enable prediction*/; });
+
+    ASSERT_TRUE(predictor.record(getMotionEvent(DOWN, 1, 3, 0ms, /*deviceId=*/0)).ok());
+    ASSERT_TRUE(predictor.record(getMotionEvent(MOVE, 1, 3, 10ms, /*deviceId=*/0)).ok());
+    ASSERT_TRUE(predictor.record(getMotionEvent(MOVE, 2, 5, 20ms, /*deviceId=*/0)).ok());
+    ASSERT_TRUE(predictor.record(getMotionEvent(UP, 2, 5, 30ms, /*deviceId=*/0)).ok());
+
+    // Now, send a gesture from a different device. Since we have no active gesture, the new gesture
+    // should be processed correctly.
+    ASSERT_TRUE(predictor.record(getMotionEvent(DOWN, 100, 300, 40ms, /*deviceId=*/1)).ok());
+    ASSERT_TRUE(predictor.record(getMotionEvent(MOVE, 100, 300, 50ms, /*deviceId=*/1)).ok());
+}
+
+TEST(MotionPredictorTest, FlagDisablesPrediction) {
+    MotionPredictor predictor(/*predictionTimestampOffsetNanos=*/0,
+                              []() { return false /*disable prediction*/; });
+    predictor.record(getMotionEvent(DOWN, 0, 1, 30ms));
+    predictor.record(getMotionEvent(MOVE, 0, 1, 35ms));
+    std::unique_ptr<MotionEvent> predicted = predictor.predict(40 * NSEC_PER_MSEC);
+    ASSERT_EQ(nullptr, predicted);
+    ASSERT_FALSE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_STYLUS));
+    ASSERT_FALSE(predictor.isPredictionAvailable(/*deviceId=*/1, AINPUT_SOURCE_TOUCHSCREEN));
+}
+
+} // namespace android
diff --git a/libs/input/tests/RingBuffer_test.cpp b/libs/input/tests/RingBuffer_test.cpp
new file mode 100644
index 0000000..a2ef658
--- /dev/null
+++ b/libs/input/tests/RingBuffer_test.cpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <vector>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <input/RingBuffer.h>
+
+namespace android {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::Not;
+using ::testing::SizeIs;
+
+TEST(RingBufferTest, PushPop) {
+    RingBuffer<int> buffer(/*capacity=*/3);
+
+    buffer.pushBack(1);
+    buffer.pushBack(2);
+    buffer.pushBack(3);
+    EXPECT_THAT(buffer, ElementsAre(1, 2, 3));
+
+    buffer.pushBack(4);
+    EXPECT_THAT(buffer, ElementsAre(2, 3, 4));
+
+    buffer.pushFront(1);
+    EXPECT_THAT(buffer, ElementsAre(1, 2, 3));
+
+    EXPECT_EQ(1, buffer.popFront());
+    EXPECT_THAT(buffer, ElementsAre(2, 3));
+
+    buffer.pushBack(4);
+    EXPECT_THAT(buffer, ElementsAre(2, 3, 4));
+
+    buffer.pushBack(5);
+    EXPECT_THAT(buffer, ElementsAre(3, 4, 5));
+
+    EXPECT_EQ(5, buffer.popBack());
+    EXPECT_THAT(buffer, ElementsAre(3, 4));
+
+    EXPECT_EQ(4, buffer.popBack());
+    EXPECT_THAT(buffer, ElementsAre(3));
+
+    EXPECT_EQ(3, buffer.popBack());
+    EXPECT_THAT(buffer, ElementsAre());
+
+    buffer.pushBack(1);
+    EXPECT_THAT(buffer, ElementsAre(1));
+
+    EXPECT_EQ(1, buffer.popFront());
+    EXPECT_THAT(buffer, ElementsAre());
+}
+
+TEST(RingBufferTest, ObjectType) {
+    RingBuffer<std::unique_ptr<int>> buffer(/*capacity=*/2);
+    buffer.pushBack(std::make_unique<int>(1));
+    buffer.pushBack(std::make_unique<int>(2));
+    buffer.pushBack(std::make_unique<int>(3));
+
+    EXPECT_EQ(2, *buffer[0]);
+    EXPECT_EQ(3, *buffer[1]);
+}
+
+TEST(RingBufferTest, ConstructConstantValue) {
+    RingBuffer<int> buffer(/*count=*/3, /*value=*/10);
+    EXPECT_THAT(buffer, ElementsAre(10, 10, 10));
+    EXPECT_EQ(3u, buffer.capacity());
+}
+
+TEST(RingBufferTest, Assignment) {
+    RingBuffer<int> a(/*capacity=*/2);
+    a.pushBack(1);
+    a.pushBack(2);
+
+    RingBuffer<int> b(/*capacity=*/3);
+    b.pushBack(10);
+    b.pushBack(20);
+    b.pushBack(30);
+
+    std::swap(a, b);
+    EXPECT_THAT(a, ElementsAre(10, 20, 30));
+    EXPECT_THAT(b, ElementsAre(1, 2));
+
+    a = b;
+    EXPECT_THAT(a, ElementsAreArray(b));
+
+    RingBuffer<int> c(b);
+    EXPECT_THAT(c, ElementsAreArray(b));
+
+    RingBuffer<int> d(std::move(b));
+    EXPECT_EQ(0u, b.capacity());
+    EXPECT_THAT(b, ElementsAre());
+    EXPECT_THAT(d, ElementsAre(1, 2));
+
+    b = std::move(d);
+    EXPECT_THAT(b, ElementsAre(1, 2));
+    EXPECT_THAT(d, ElementsAre());
+    EXPECT_EQ(0u, d.capacity());
+}
+
+TEST(RingBufferTest, FrontBackAccess) {
+    RingBuffer<int> buffer(/*capacity=*/2);
+    buffer.pushBack(1);
+    EXPECT_EQ(1, buffer.front());
+    EXPECT_EQ(1, buffer.back());
+
+    buffer.pushFront(0);
+    EXPECT_EQ(0, buffer.front());
+    EXPECT_EQ(1, buffer.back());
+
+    buffer.pushFront(-1);
+    EXPECT_EQ(-1, buffer.front());
+    EXPECT_EQ(0, buffer.back());
+}
+
+TEST(RingBufferTest, Subscripting) {
+    RingBuffer<int> buffer(/*capacity=*/2);
+    buffer.pushBack(1);
+    EXPECT_EQ(1, buffer[0]);
+
+    buffer.pushFront(0);
+    EXPECT_EQ(0, buffer[0]);
+    EXPECT_EQ(1, buffer[1]);
+
+    buffer.pushFront(-1);
+    EXPECT_EQ(-1, buffer[0]);
+    EXPECT_EQ(0, buffer[1]);
+}
+
+TEST(RingBufferTest, Iterator) {
+    RingBuffer<int> buffer(/*capacity=*/3);
+    buffer.pushFront(2);
+    buffer.pushBack(3);
+
+    auto begin = buffer.begin();
+    auto end = buffer.end();
+
+    EXPECT_NE(begin, end);
+    EXPECT_LE(begin, end);
+    EXPECT_GT(end, begin);
+    EXPECT_EQ(end, begin + 2);
+    EXPECT_EQ(begin, end - 2);
+
+    EXPECT_EQ(2, end - begin);
+    EXPECT_EQ(1, end - (begin + 1));
+
+    EXPECT_EQ(2, *begin);
+    ++begin;
+    EXPECT_EQ(3, *begin);
+    --begin;
+    EXPECT_EQ(2, *begin);
+    begin += 1;
+    EXPECT_EQ(3, *begin);
+    begin += -1;
+    EXPECT_EQ(2, *begin);
+    begin -= -1;
+    EXPECT_EQ(3, *begin);
+}
+
+TEST(RingBufferTest, Clear) {
+    RingBuffer<int> buffer(/*capacity=*/2);
+    EXPECT_THAT(buffer, ElementsAre());
+
+    buffer.pushBack(1);
+    EXPECT_THAT(buffer, ElementsAre(1));
+
+    buffer.clear();
+    EXPECT_THAT(buffer, ElementsAre());
+    EXPECT_THAT(buffer, SizeIs(0));
+    EXPECT_THAT(buffer, IsEmpty());
+
+    buffer.pushFront(1);
+    EXPECT_THAT(buffer, ElementsAre(1));
+}
+
+TEST(RingBufferTest, SizeAndIsEmpty) {
+    RingBuffer<int> buffer(/*capacity=*/2);
+    EXPECT_THAT(buffer, SizeIs(0));
+    EXPECT_THAT(buffer, IsEmpty());
+
+    buffer.pushBack(1);
+    EXPECT_THAT(buffer, SizeIs(1));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.pushBack(2);
+    EXPECT_THAT(buffer, SizeIs(2));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.pushBack(3);
+    EXPECT_THAT(buffer, SizeIs(2));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.popFront();
+    EXPECT_THAT(buffer, SizeIs(1));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.popBack();
+    EXPECT_THAT(buffer, SizeIs(0));
+    EXPECT_THAT(buffer, IsEmpty());
+}
+
+} // namespace
+} // namespace android
diff --git a/libs/input/tests/StructLayout_test.cpp b/libs/input/tests/StructLayout_test.cpp
index 1c8658b..024b6d3 100644
--- a/libs/input/tests/StructLayout_test.cpp
+++ b/libs/input/tests/StructLayout_test.cpp
@@ -117,7 +117,7 @@
 
 void TestBodySize() {
     static_assert(sizeof(InputMessage::Body::Key) == 96);
-    static_assert(sizeof(InputMessage::Body::Motion::Pointer) == 136);
+    static_assert(sizeof(InputMessage::Body::Motion::Pointer) == 144);
     static_assert(sizeof(InputMessage::Body::Motion) ==
                   offsetof(InputMessage::Body::Motion, pointers) +
                           sizeof(InputMessage::Body::Motion::Pointer) * MAX_POINTERS);
@@ -137,8 +137,8 @@
     static_assert(sizeof(InputMessage::Body) ==
                   offsetof(InputMessage::Body::Motion, pointers) +
                           sizeof(InputMessage::Body::Motion::Pointer) * MAX_POINTERS);
-    static_assert(sizeof(InputMessage::Body) == 160 + 136 * 16);
-    static_assert(sizeof(InputMessage::Body) == 2336);
+    static_assert(sizeof(InputMessage::Body) == 160 + 144 * 16);
+    static_assert(sizeof(InputMessage::Body) == 2464);
 }
 
 /**
@@ -148,8 +148,8 @@
  * still helpful to compute to get an idea of the sizes that are involved.
  */
 void TestWorstCaseInputMessageSize() {
-    static_assert(sizeof(InputMessage) == /*header*/ 8 + /*body*/ 2336);
-    static_assert(sizeof(InputMessage) == 2344);
+    static_assert(sizeof(InputMessage) == /*header*/ 8 + /*body*/ 2464);
+    static_assert(sizeof(InputMessage) == 2472);
 }
 
 /**
@@ -159,8 +159,8 @@
     constexpr size_t pointerCount = 1;
     constexpr size_t bodySize = offsetof(InputMessage::Body::Motion, pointers) +
             sizeof(InputMessage::Body::Motion::Pointer) * pointerCount;
-    static_assert(bodySize == 160 + 136);
-    static_assert(bodySize == 296); // For the total message size, add the small header
+    static_assert(bodySize == 160 + 144);
+    static_assert(bodySize == 304); // For the total message size, add the small header
 }
 
 // --- VerifiedInputEvent ---
diff --git a/libs/input/tests/TfLiteMotionPredictor_test.cpp b/libs/input/tests/TfLiteMotionPredictor_test.cpp
new file mode 100644
index 0000000..b5ed9e4
--- /dev/null
+++ b/libs/input/tests/TfLiteMotionPredictor_test.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <fstream>
+#include <ios>
+#include <iterator>
+#include <string>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <input/TfLiteMotionPredictor.h>
+
+namespace android {
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::FloatNear;
+
+TEST(TfLiteMotionPredictorTest, BuffersReadiness) {
+    TfLiteMotionPredictorBuffers buffers(/*inputLength=*/5);
+    ASSERT_FALSE(buffers.isReady());
+
+    buffers.pushSample(/*timestamp=*/0, {.position = {.x = 100, .y = 100}});
+    ASSERT_FALSE(buffers.isReady());
+
+    buffers.pushSample(/*timestamp=*/1, {.position = {.x = 100, .y = 100}});
+    ASSERT_FALSE(buffers.isReady());
+
+    // Two samples with distinct positions are required.
+    buffers.pushSample(/*timestamp=*/2, {.position = {.x = 100, .y = 110}});
+    ASSERT_TRUE(buffers.isReady());
+
+    buffers.reset();
+    ASSERT_FALSE(buffers.isReady());
+}
+
+TEST(TfLiteMotionPredictorTest, BuffersRecentData) {
+    TfLiteMotionPredictorBuffers buffers(/*inputLength=*/5);
+
+    buffers.pushSample(/*timestamp=*/1, {.position = {.x = 100, .y = 200}});
+    ASSERT_EQ(buffers.lastTimestamp(), 1);
+
+    buffers.pushSample(/*timestamp=*/2, {.position = {.x = 150, .y = 250}});
+    ASSERT_EQ(buffers.lastTimestamp(), 2);
+    ASSERT_TRUE(buffers.isReady());
+    ASSERT_EQ(buffers.axisFrom().position.x, 100);
+    ASSERT_EQ(buffers.axisFrom().position.y, 200);
+    ASSERT_EQ(buffers.axisTo().position.x, 150);
+    ASSERT_EQ(buffers.axisTo().position.y, 250);
+
+    // Position doesn't change, so neither do the axes.
+    buffers.pushSample(/*timestamp=*/3, {.position = {.x = 150, .y = 250}});
+    ASSERT_EQ(buffers.lastTimestamp(), 3);
+    ASSERT_TRUE(buffers.isReady());
+    ASSERT_EQ(buffers.axisFrom().position.x, 100);
+    ASSERT_EQ(buffers.axisFrom().position.y, 200);
+    ASSERT_EQ(buffers.axisTo().position.x, 150);
+    ASSERT_EQ(buffers.axisTo().position.y, 250);
+
+    buffers.pushSample(/*timestamp=*/4, {.position = {.x = 180, .y = 280}});
+    ASSERT_EQ(buffers.lastTimestamp(), 4);
+    ASSERT_TRUE(buffers.isReady());
+    ASSERT_EQ(buffers.axisFrom().position.x, 150);
+    ASSERT_EQ(buffers.axisFrom().position.y, 250);
+    ASSERT_EQ(buffers.axisTo().position.x, 180);
+    ASSERT_EQ(buffers.axisTo().position.y, 280);
+}
+
+TEST(TfLiteMotionPredictorTest, BuffersCopyTo) {
+    std::unique_ptr<TfLiteMotionPredictorModel> model = TfLiteMotionPredictorModel::create();
+    TfLiteMotionPredictorBuffers buffers(model->inputLength());
+
+    buffers.pushSample(/*timestamp=*/1,
+                       {.position = {.x = 10, .y = 10},
+                        .pressure = 0,
+                        .orientation = 0,
+                        .tilt = 0.2});
+    buffers.pushSample(/*timestamp=*/2,
+                       {.position = {.x = 10, .y = 50},
+                        .pressure = 0.4,
+                        .orientation = M_PI / 4,
+                        .tilt = 0.3});
+    buffers.pushSample(/*timestamp=*/3,
+                       {.position = {.x = 30, .y = 50},
+                        .pressure = 0.5,
+                        .orientation = -M_PI / 4,
+                        .tilt = 0.4});
+    buffers.pushSample(/*timestamp=*/3,
+                       {.position = {.x = 30, .y = 60},
+                        .pressure = 0,
+                        .orientation = 0,
+                        .tilt = 0.5});
+    buffers.copyTo(*model);
+
+    const int zeroPadding = model->inputLength() - 3;
+    ASSERT_GE(zeroPadding, 0);
+
+    EXPECT_THAT(model->inputR().subspan(0, zeroPadding), Each(0));
+    EXPECT_THAT(model->inputPhi().subspan(0, zeroPadding), Each(0));
+    EXPECT_THAT(model->inputPressure().subspan(0, zeroPadding), Each(0));
+    EXPECT_THAT(model->inputTilt().subspan(0, zeroPadding), Each(0));
+    EXPECT_THAT(model->inputOrientation().subspan(0, zeroPadding), Each(0));
+
+    EXPECT_THAT(model->inputR().subspan(zeroPadding), ElementsAre(40, 20, 10));
+    EXPECT_THAT(model->inputPhi().subspan(zeroPadding), ElementsAre(0, -M_PI / 2, M_PI / 2));
+    EXPECT_THAT(model->inputPressure().subspan(zeroPadding), ElementsAre(0.4, 0.5, 0));
+    EXPECT_THAT(model->inputTilt().subspan(zeroPadding), ElementsAre(0.3, 0.4, 0.5));
+    EXPECT_THAT(model->inputOrientation().subspan(zeroPadding),
+                ElementsAre(FloatNear(-M_PI / 4, 1e-5), FloatNear(M_PI / 4, 1e-5),
+                            FloatNear(M_PI / 2, 1e-5)));
+}
+
+TEST(TfLiteMotionPredictorTest, ModelInputOutputLength) {
+    std::unique_ptr<TfLiteMotionPredictorModel> model = TfLiteMotionPredictorModel::create();
+    ASSERT_GT(model->inputLength(), 0u);
+
+    const int inputLength = model->inputLength();
+    ASSERT_EQ(inputLength, model->inputR().size());
+    ASSERT_EQ(inputLength, model->inputPhi().size());
+    ASSERT_EQ(inputLength, model->inputPressure().size());
+    ASSERT_EQ(inputLength, model->inputOrientation().size());
+    ASSERT_EQ(inputLength, model->inputTilt().size());
+
+    ASSERT_TRUE(model->invoke());
+
+    const int outputLength = model->outputLength();
+    ASSERT_EQ(outputLength, model->outputR().size());
+    ASSERT_EQ(outputLength, model->outputPhi().size());
+    ASSERT_EQ(outputLength, model->outputPressure().size());
+}
+
+TEST(TfLiteMotionPredictorTest, ModelOutput) {
+    std::unique_ptr<TfLiteMotionPredictorModel> model = TfLiteMotionPredictorModel::create();
+    TfLiteMotionPredictorBuffers buffers(model->inputLength());
+
+    buffers.pushSample(/*timestamp=*/1, {.position = {.x = 100, .y = 200}, .pressure = 0.2});
+    buffers.pushSample(/*timestamp=*/2, {.position = {.x = 150, .y = 250}, .pressure = 0.4});
+    buffers.pushSample(/*timestamp=*/3, {.position = {.x = 180, .y = 280}, .pressure = 0.6});
+    buffers.copyTo(*model);
+
+    ASSERT_TRUE(model->invoke());
+
+    // The actual model output is implementation-defined, but it should at least be non-zero and
+    // non-NaN.
+    const auto is_valid = [](float value) { return !isnan(value) && value != 0; };
+    ASSERT_TRUE(std::all_of(model->outputR().begin(), model->outputR().end(), is_valid));
+    ASSERT_TRUE(std::all_of(model->outputPhi().begin(), model->outputPhi().end(), is_valid));
+    ASSERT_TRUE(
+            std::all_of(model->outputPressure().begin(), model->outputPressure().end(), is_valid));
+}
+
+} // namespace
+} // namespace android
diff --git a/libs/input/tests/TouchResampling_test.cpp b/libs/input/tests/TouchResampling_test.cpp
new file mode 100644
index 0000000..655de80
--- /dev/null
+++ b/libs/input/tests/TouchResampling_test.cpp
@@ -0,0 +1,577 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TestHelpers.h"
+
+#include <chrono>
+#include <vector>
+
+#include <attestation/HmacKeyManager.h>
+#include <gtest/gtest.h>
+#include <input/InputTransport.h>
+
+using namespace std::chrono_literals;
+
+namespace android {
+
+struct Pointer {
+    int32_t id;
+    float x;
+    float y;
+    bool isResampled = false;
+};
+
+struct InputEventEntry {
+    std::chrono::nanoseconds eventTime;
+    std::vector<Pointer> pointers;
+    int32_t action;
+};
+
+class TouchResamplingTest : public testing::Test {
+protected:
+    std::unique_ptr<InputPublisher> mPublisher;
+    std::unique_ptr<InputConsumer> mConsumer;
+    PreallocatedInputEventFactory mEventFactory;
+
+    uint32_t mSeq = 1;
+
+    void SetUp() override {
+        std::unique_ptr<InputChannel> serverChannel, clientChannel;
+        status_t result =
+                InputChannel::openInputChannelPair("channel name", serverChannel, clientChannel);
+        ASSERT_EQ(OK, result);
+
+        mPublisher = std::make_unique<InputPublisher>(std::move(serverChannel));
+        mConsumer = std::make_unique<InputConsumer>(std::move(clientChannel),
+                                                    /*enableTouchResampling=*/true);
+    }
+
+    status_t publishSimpleMotionEventWithCoords(int32_t action, nsecs_t eventTime,
+                                                const std::vector<PointerProperties>& properties,
+                                                const std::vector<PointerCoords>& coords);
+    void publishSimpleMotionEvent(int32_t action, nsecs_t eventTime,
+                                  const std::vector<Pointer>& pointers);
+    void publishInputEventEntries(const std::vector<InputEventEntry>& entries);
+    void consumeInputEventEntries(const std::vector<InputEventEntry>& entries,
+                                  std::chrono::nanoseconds frameTime);
+    void receiveResponseUntilSequence(uint32_t seq);
+};
+
+status_t TouchResamplingTest::publishSimpleMotionEventWithCoords(
+        int32_t action, nsecs_t eventTime, const std::vector<PointerProperties>& properties,
+        const std::vector<PointerCoords>& coords) {
+    const ui::Transform identityTransform;
+    const nsecs_t downTime = 0;
+
+    if (action == AMOTION_EVENT_ACTION_DOWN && eventTime != 0) {
+        ADD_FAILURE() << "Downtime should be equal to 0 (hardcoded for convenience)";
+    }
+    return mPublisher->publishMotionEvent(mSeq++, InputEvent::nextId(), /*deviceId=*/1,
+                                          AINPUT_SOURCE_TOUCHSCREEN, /*displayId=*/0, INVALID_HMAC,
+                                          action, /*actionButton=*/0, /*flags=*/0, /*edgeFlags=*/0,
+                                          AMETA_NONE, /*buttonState=*/0, MotionClassification::NONE,
+                                          identityTransform, /*xPrecision=*/0, /*yPrecision=*/0,
+                                          AMOTION_EVENT_INVALID_CURSOR_POSITION,
+                                          AMOTION_EVENT_INVALID_CURSOR_POSITION, identityTransform,
+                                          downTime, eventTime, properties.size(), properties.data(),
+                                          coords.data());
+}
+
+void TouchResamplingTest::publishSimpleMotionEvent(int32_t action, nsecs_t eventTime,
+                                                   const std::vector<Pointer>& pointers) {
+    std::vector<PointerProperties> properties;
+    std::vector<PointerCoords> coords;
+
+    for (const Pointer& pointer : pointers) {
+        properties.push_back({});
+        properties.back().clear();
+        properties.back().id = pointer.id;
+        properties.back().toolType = ToolType::FINGER;
+
+        coords.push_back({});
+        coords.back().clear();
+        coords.back().setAxisValue(AMOTION_EVENT_AXIS_X, pointer.x);
+        coords.back().setAxisValue(AMOTION_EVENT_AXIS_Y, pointer.y);
+    }
+
+    status_t result = publishSimpleMotionEventWithCoords(action, eventTime, properties, coords);
+    ASSERT_EQ(OK, result);
+}
+
+/**
+ * Each entry is published separately, one entry at a time. As a result, action is used here
+ * on a per-entry basis.
+ */
+void TouchResamplingTest::publishInputEventEntries(const std::vector<InputEventEntry>& entries) {
+    for (const InputEventEntry& entry : entries) {
+        publishSimpleMotionEvent(entry.action, entry.eventTime.count(), entry.pointers);
+    }
+}
+
+/**
+ * Inside the publisher, read responses repeatedly until the desired sequence number is returned.
+ *
+ * Sometimes, when you call 'sendFinishedSignal', you would be finishing a batch which is comprised
+ * of several input events. As a result, consumer will generate multiple 'finish' signals on your
+ * behalf.
+ *
+ * In this function, we call 'receiveConsumerResponse' in a loop until the desired sequence number
+ * is returned.
+ */
+void TouchResamplingTest::receiveResponseUntilSequence(uint32_t seq) {
+    size_t consumedEvents = 0;
+    while (consumedEvents < 100) {
+        android::base::Result<InputPublisher::ConsumerResponse> response =
+                mPublisher->receiveConsumerResponse();
+        ASSERT_TRUE(response.ok());
+        ASSERT_TRUE(std::holds_alternative<InputPublisher::Finished>(*response));
+        const InputPublisher::Finished& finish = std::get<InputPublisher::Finished>(*response);
+        ASSERT_TRUE(finish.handled)
+                << "publisher receiveFinishedSignal should have set handled to consumer's reply";
+        if (finish.seq == seq) {
+            return;
+        }
+        consumedEvents++;
+    }
+    FAIL() << "Got " << consumedEvents << "events, but still no event with seq=" << seq;
+}
+
+/**
+ * All entries are compared against a single MotionEvent, but the same data structure
+ * InputEventEntry is used here for simpler code. As a result, the entire array of InputEventEntry
+ * must contain identical values for the action field.
+ */
+void TouchResamplingTest::consumeInputEventEntries(const std::vector<InputEventEntry>& entries,
+                                                   std::chrono::nanoseconds frameTime) {
+    ASSERT_GE(entries.size(), 1U) << "Must have at least 1 InputEventEntry to compare against";
+
+    uint32_t consumeSeq;
+    InputEvent* event;
+
+    status_t status = mConsumer->consume(&mEventFactory, /*consumeBatches=*/true, frameTime.count(),
+                                         &consumeSeq, &event);
+    ASSERT_EQ(OK, status);
+    MotionEvent* motionEvent = static_cast<MotionEvent*>(event);
+
+    ASSERT_EQ(entries.size() - 1, motionEvent->getHistorySize());
+    for (size_t i = 0; i < entries.size(); i++) { // most recent sample is last
+        SCOPED_TRACE(i);
+        const InputEventEntry& entry = entries[i];
+        ASSERT_EQ(entry.action, motionEvent->getAction());
+        ASSERT_EQ(entry.eventTime.count(), motionEvent->getHistoricalEventTime(i));
+        ASSERT_EQ(entry.pointers.size(), motionEvent->getPointerCount());
+
+        for (size_t p = 0; p < motionEvent->getPointerCount(); p++) {
+            SCOPED_TRACE(p);
+            // The pointers can be in any order, both in MotionEvent as well as InputEventEntry
+            ssize_t motionEventPointerIndex = motionEvent->findPointerIndex(entry.pointers[p].id);
+            ASSERT_GE(motionEventPointerIndex, 0) << "Pointer must be present in MotionEvent";
+            ASSERT_EQ(entry.pointers[p].x,
+                      motionEvent->getHistoricalAxisValue(AMOTION_EVENT_AXIS_X,
+                                                          motionEventPointerIndex, i));
+            ASSERT_EQ(entry.pointers[p].x,
+                      motionEvent->getHistoricalRawAxisValue(AMOTION_EVENT_AXIS_X,
+                                                             motionEventPointerIndex, i));
+            ASSERT_EQ(entry.pointers[p].y,
+                      motionEvent->getHistoricalAxisValue(AMOTION_EVENT_AXIS_Y,
+                                                          motionEventPointerIndex, i));
+            ASSERT_EQ(entry.pointers[p].y,
+                      motionEvent->getHistoricalRawAxisValue(AMOTION_EVENT_AXIS_Y,
+                                                             motionEventPointerIndex, i));
+            ASSERT_EQ(entry.pointers[p].isResampled,
+                      motionEvent->isResampled(motionEventPointerIndex, i));
+        }
+    }
+
+    status = mConsumer->sendFinishedSignal(consumeSeq, true);
+    ASSERT_EQ(OK, status);
+
+    receiveResponseUntilSequence(consumeSeq);
+}
+
+/**
+ * Timeline
+ * ---------+------------------+------------------+--------+-----------------+----------------------
+ *          0 ms               10 ms              20 ms    25 ms            35 ms
+ *          ACTION_DOWN       ACTION_MOVE      ACTION_MOVE  ^                ^
+ *                                                          |                |
+ *                                                         resampled value   |
+ *                                                                          frameTime
+ * Typically, the prediction is made for time frameTime - RESAMPLE_LATENCY, or 30 ms in this case
+ * However, that would be 10 ms later than the last real sample (which came in at 20 ms).
+ * Therefore, the resampling should happen at 20 ms + RESAMPLE_MAX_PREDICTION = 28 ms.
+ * In this situation, though, resample time is further limited by taking half of the difference
+ * between the last two real events, which would put this time at:
+ * 20 ms + (20 ms - 10 ms) / 2 = 25 ms.
+ */
+TEST_F(TouchResamplingTest, EventIsResampled) {
+    std::chrono::nanoseconds frameTime;
+    std::vector<InputEventEntry> entries, expectedEntries;
+
+    // Initial ACTION_DOWN should be separate, because the first consume event will only return
+    // InputEvent with a single action.
+    entries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 5ms;
+    expectedEntries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Two ACTION_MOVE events 10 ms apart that move in X direction and stay still in Y
+    entries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 35ms;
+    expectedEntries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {25ms, {{0, 35, 30, .isResampled = true}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+}
+
+/**
+ * Same as above test, but use pointer id=1 instead of 0 to make sure that system does not
+ * have these hardcoded.
+ */
+TEST_F(TouchResamplingTest, EventIsResampledWithDifferentId) {
+    std::chrono::nanoseconds frameTime;
+    std::vector<InputEventEntry> entries, expectedEntries;
+
+    // Initial ACTION_DOWN should be separate, because the first consume event will only return
+    // InputEvent with a single action.
+    entries = {
+            //      id  x   y
+            {0ms, {{1, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 5ms;
+    expectedEntries = {
+            //      id  x   y
+            {0ms, {{1, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Two ACTION_MOVE events 10 ms apart that move in X direction and stay still in Y
+    entries = {
+            //      id  x   y
+            {10ms, {{1, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{1, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 35ms;
+    expectedEntries = {
+            //      id  x   y
+            {10ms, {{1, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{1, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {25ms, {{1, 35, 30, .isResampled = true}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+}
+
+/**
+ * Event should not be resampled when sample time is equal to event time.
+ */
+TEST_F(TouchResamplingTest, SampleTimeEqualsEventTime) {
+    std::chrono::nanoseconds frameTime;
+    std::vector<InputEventEntry> entries, expectedEntries;
+
+    // Initial ACTION_DOWN should be separate, because the first consume event will only return
+    // InputEvent with a single action.
+    entries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 5ms;
+    expectedEntries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Two ACTION_MOVE events 10 ms apart that move in X direction and stay still in Y
+    entries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 20ms + 5ms /*RESAMPLE_LATENCY*/;
+    expectedEntries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            // no resampled event because the time of resample falls exactly on the existing event
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+}
+
+/**
+ * Once we send a resampled value to the app, we should continue to "lie" if the pointer
+ * does not move. So, if the pointer keeps the same coordinates, resampled value should continue
+ * to be used.
+ */
+TEST_F(TouchResamplingTest, ResampledValueIsUsedForIdenticalCoordinates) {
+    std::chrono::nanoseconds frameTime;
+    std::vector<InputEventEntry> entries, expectedEntries;
+
+    // Initial ACTION_DOWN should be separate, because the first consume event will only return
+    // InputEvent with a single action.
+    entries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 5ms;
+    expectedEntries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Two ACTION_MOVE events 10 ms apart that move in X direction and stay still in Y
+    entries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 35ms;
+    expectedEntries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {25ms, {{0, 35, 30, .isResampled = true}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Coordinate value 30 has been resampled to 35. When a new event comes in with value 30 again,
+    // the system should still report 35.
+    entries = {
+            //      id  x   y
+            {40ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 45ms + 5ms /*RESAMPLE_LATENCY*/;
+    expectedEntries = {
+            //      id  x   y
+            {40ms,
+             {{0, 35, 30, .isResampled = true}},
+             AMOTION_EVENT_ACTION_MOVE}, // original event, rewritten
+            {45ms,
+             {{0, 35, 30, .isResampled = true}},
+             AMOTION_EVENT_ACTION_MOVE}, // resampled event, rewritten
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+}
+
+TEST_F(TouchResamplingTest, OldEventReceivedAfterResampleOccurs) {
+    std::chrono::nanoseconds frameTime;
+    std::vector<InputEventEntry> entries, expectedEntries;
+
+    // Initial ACTION_DOWN should be separate, because the first consume event will only return
+    // InputEvent with a single action.
+    entries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 5ms;
+    expectedEntries = {
+            //      id  x   y
+            {0ms, {{0, 10, 20}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Two ACTION_MOVE events 10 ms apart that move in X direction and stay still in Y
+    entries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 35ms;
+    expectedEntries = {
+            //      id  x   y
+            {10ms, {{0, 20, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {20ms, {{0, 30, 30}}, AMOTION_EVENT_ACTION_MOVE},
+            {25ms, {{0, 35, 30, .isResampled = true}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+    // Above, the resampled event is at 25ms rather than at 30 ms = 35ms - RESAMPLE_LATENCY
+    // because we are further bound by how far we can extrapolate by the "last time delta".
+    // That's 50% of (20 ms - 10ms) => 5ms. So we can't predict more than 5 ms into the future
+    // from the event at 20ms, which is why the resampled event is at t = 25 ms.
+
+    // We resampled the event to 25 ms. Now, an older 'real' event comes in.
+    entries = {
+            //      id  x   y
+            {24ms, {{0, 40, 30}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 50ms;
+    expectedEntries = {
+            //      id  x   y
+            {24ms,
+             {{0, 35, 30, .isResampled = true}},
+             AMOTION_EVENT_ACTION_MOVE}, // original event, rewritten
+            {26ms,
+             {{0, 45, 30, .isResampled = true}},
+             AMOTION_EVENT_ACTION_MOVE}, // resampled event, rewritten
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+}
+
+TEST_F(TouchResamplingTest, TwoPointersAreResampledIndependently) {
+    std::chrono::nanoseconds frameTime;
+    std::vector<InputEventEntry> entries, expectedEntries;
+
+    // full action for when a pointer with id=1 appears (some other pointer must already be present)
+    constexpr int32_t actionPointer1Down =
+            AMOTION_EVENT_ACTION_POINTER_DOWN + (1 << AMOTION_EVENT_ACTION_POINTER_INDEX_SHIFT);
+
+    // full action for when a pointer with id=0 disappears (some other pointer must still remain)
+    constexpr int32_t actionPointer0Up =
+            AMOTION_EVENT_ACTION_POINTER_UP + (0 << AMOTION_EVENT_ACTION_POINTER_INDEX_SHIFT);
+
+    // Initial ACTION_DOWN should be separate, because the first consume event will only return
+    // InputEvent with a single action.
+    entries = {
+            //      id  x   y
+            {0ms, {{0, 100, 100}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 5ms;
+    expectedEntries = {
+            //      id  x   y
+            {0ms, {{0, 100, 100}}, AMOTION_EVENT_ACTION_DOWN},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    entries = {
+            //       id  x   y
+            {10ms, {{0, 100, 100}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 10ms + 5ms /*RESAMPLE_LATENCY*/;
+    expectedEntries = {
+            //       id  x   y
+            {10ms, {{0, 100, 100}}, AMOTION_EVENT_ACTION_MOVE},
+            // no resampled value because frameTime - RESAMPLE_LATENCY == eventTime
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Second pointer id=1 appears
+    entries = {
+            //      id  x    y
+            {15ms, {{0, 100, 100}, {1, 500, 500}}, actionPointer1Down},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 20ms + 5ms /*RESAMPLE_LATENCY*/;
+    expectedEntries = {
+            //      id  x    y
+            {15ms, {{0, 100, 100}, {1, 500, 500}}, actionPointer1Down},
+            // no resampled value because frameTime - RESAMPLE_LATENCY == eventTime
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Both pointers move
+    entries = {
+            //      id  x    y
+            {30ms, {{0, 100, 100}, {1, 500, 500}}, AMOTION_EVENT_ACTION_MOVE},
+            {40ms, {{0, 120, 120}, {1, 600, 600}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 45ms + 5ms /*RESAMPLE_LATENCY*/;
+    expectedEntries = {
+            //      id  x    y
+            {30ms, {{0, 100, 100}, {1, 500, 500}}, AMOTION_EVENT_ACTION_MOVE},
+            {40ms, {{0, 120, 120}, {1, 600, 600}}, AMOTION_EVENT_ACTION_MOVE},
+            {45ms,
+             {{0, 130, 130, .isResampled = true}, {1, 650, 650, .isResampled = true}},
+             AMOTION_EVENT_ACTION_MOVE},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Both pointers move again
+    entries = {
+            //      id  x    y
+            {60ms, {{0, 120, 120}, {1, 600, 600}}, AMOTION_EVENT_ACTION_MOVE},
+            {70ms, {{0, 130, 130}, {1, 700, 700}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 75ms + 5ms /*RESAMPLE_LATENCY*/;
+    /**
+     * The sample at t = 60, pointer id 0 is not equal to 120, because this value of 120 was
+     * received twice, and resampled to 130. So if we already reported it as "130", we continue
+     * to report it as such. Similar with pointer id 1.
+     */
+    expectedEntries = {
+            {60ms,
+             {{0, 130, 130, .isResampled = true}, // not 120! because it matches previous real event
+              {1, 650, 650, .isResampled = true}},
+             AMOTION_EVENT_ACTION_MOVE},
+            {70ms, {{0, 130, 130}, {1, 700, 700}}, AMOTION_EVENT_ACTION_MOVE},
+            {75ms,
+             {{0, 135, 135, .isResampled = true}, {1, 750, 750, .isResampled = true}},
+             AMOTION_EVENT_ACTION_MOVE},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // First pointer id=0 leaves the screen
+    entries = {
+            //      id  x    y
+            {80ms, {{1, 600, 600}}, actionPointer0Up},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 90ms;
+    expectedEntries = {
+            //      id  x    y
+            {80ms, {{1, 600, 600}}, actionPointer0Up},
+            // no resampled event for ACTION_POINTER_UP
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+
+    // Remaining pointer id=1 is still present, but doesn't move
+    entries = {
+            //      id  x    y
+            {90ms, {{1, 600, 600}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    publishInputEventEntries(entries);
+    frameTime = 100ms;
+    expectedEntries = {
+            //      id  x    y
+            {90ms, {{1, 600, 600}}, AMOTION_EVENT_ACTION_MOVE},
+            /**
+             * The latest event with ACTION_MOVE was at t = 70, coord = 700.
+             * Use that value for resampling here: (600 - 700) / (90 - 70) * 5 + 600
+             */
+            {95ms, {{1, 575, 575, .isResampled = true}}, AMOTION_EVENT_ACTION_MOVE},
+    };
+    consumeInputEventEntries(expectedEntries, frameTime);
+}
+
+} // namespace android
diff --git a/libs/input/tests/TouchVideoFrame_test.cpp b/libs/input/tests/TouchVideoFrame_test.cpp
index 654b236..081a995 100644
--- a/libs/input/tests/TouchVideoFrame_test.cpp
+++ b/libs/input/tests/TouchVideoFrame_test.cpp
@@ -73,38 +73,38 @@
 TEST(TouchVideoFrame, Rotate90_0x0) {
     TouchVideoFrame frame(0, 0, {}, TIMESTAMP);
     TouchVideoFrame frameRotated(0, 0, {}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_90);
+    frame.rotate(ui::ROTATION_90);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate90_1x1) {
     TouchVideoFrame frame(1, 1, {1}, TIMESTAMP);
     TouchVideoFrame frameRotated(1, 1, {1}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_90);
+    frame.rotate(ui::ROTATION_90);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate90_2x2) {
     TouchVideoFrame frame(2, 2, {1, 2, 3, 4}, TIMESTAMP);
     TouchVideoFrame frameRotated(2, 2, {2, 4, 1, 3}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_90);
+    frame.rotate(ui::ROTATION_90);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate90_3x2) {
     TouchVideoFrame frame(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
     TouchVideoFrame frameRotated(2, 3, {2, 4, 6, 1, 3, 5}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_90);
+    frame.rotate(ui::ROTATION_90);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate90_3x2_4times) {
     TouchVideoFrame frame(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
     TouchVideoFrame frameOriginal(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_90);
-    frame.rotate(DISPLAY_ORIENTATION_90);
-    frame.rotate(DISPLAY_ORIENTATION_90);
-    frame.rotate(DISPLAY_ORIENTATION_90);
+    frame.rotate(ui::ROTATION_90);
+    frame.rotate(ui::ROTATION_90);
+    frame.rotate(ui::ROTATION_90);
+    frame.rotate(ui::ROTATION_90);
     ASSERT_EQ(frame, frameOriginal);
 }
 
@@ -113,43 +113,43 @@
 TEST(TouchVideoFrame, Rotate180_0x0) {
     TouchVideoFrame frame(0, 0, {}, TIMESTAMP);
     TouchVideoFrame frameRotated(0, 0, {}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_180);
+    frame.rotate(ui::ROTATION_180);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate180_1x1) {
     TouchVideoFrame frame(1, 1, {1}, TIMESTAMP);
     TouchVideoFrame frameRotated(1, 1, {1}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_180);
+    frame.rotate(ui::ROTATION_180);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate180_2x2) {
     TouchVideoFrame frame(2, 2, {1, 2, 3, 4}, TIMESTAMP);
     TouchVideoFrame frameRotated(2, 2, {4, 3, 2, 1}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_180);
+    frame.rotate(ui::ROTATION_180);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate180_3x2) {
     TouchVideoFrame frame(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
     TouchVideoFrame frameRotated(3, 2, {6, 5, 4, 3, 2, 1}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_180);
+    frame.rotate(ui::ROTATION_180);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate180_3x2_2times) {
     TouchVideoFrame frame(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
     TouchVideoFrame frameOriginal(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_180);
-    frame.rotate(DISPLAY_ORIENTATION_180);
+    frame.rotate(ui::ROTATION_180);
+    frame.rotate(ui::ROTATION_180);
     ASSERT_EQ(frame, frameOriginal);
 }
 
 TEST(TouchVideoFrame, Rotate180_3x3) {
     TouchVideoFrame frame(3, 3, {1, 2, 3, 4, 5, 6, 7, 8, 9}, TIMESTAMP);
     TouchVideoFrame frameRotated(3, 3, {9, 8, 7, 6, 5, 4, 3, 2, 1}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_180);
+    frame.rotate(ui::ROTATION_180);
     ASSERT_EQ(frame, frameRotated);
 }
 
@@ -158,38 +158,38 @@
 TEST(TouchVideoFrame, Rotate270_0x0) {
     TouchVideoFrame frame(0, 0, {}, TIMESTAMP);
     TouchVideoFrame frameRotated(0, 0, {}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_270);
+    frame.rotate(ui::ROTATION_270);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate270_1x1) {
     TouchVideoFrame frame(1, 1, {1}, TIMESTAMP);
     TouchVideoFrame frameRotated(1, 1, {1}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_270);
+    frame.rotate(ui::ROTATION_270);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate270_2x2) {
     TouchVideoFrame frame(2, 2, {1, 2, 3, 4}, TIMESTAMP);
     TouchVideoFrame frameRotated(2, 2, {3, 1, 4, 2}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_270);
+    frame.rotate(ui::ROTATION_270);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate270_3x2) {
     TouchVideoFrame frame(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
     TouchVideoFrame frameRotated(2, 3, {5, 3, 1, 6, 4, 2}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_270);
+    frame.rotate(ui::ROTATION_270);
     ASSERT_EQ(frame, frameRotated);
 }
 
 TEST(TouchVideoFrame, Rotate270_3x2_4times) {
     TouchVideoFrame frame(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
     TouchVideoFrame frameOriginal(3, 2, {1, 2, 3, 4, 5, 6}, TIMESTAMP);
-    frame.rotate(DISPLAY_ORIENTATION_270);
-    frame.rotate(DISPLAY_ORIENTATION_270);
-    frame.rotate(DISPLAY_ORIENTATION_270);
-    frame.rotate(DISPLAY_ORIENTATION_270);
+    frame.rotate(ui::ROTATION_270);
+    frame.rotate(ui::ROTATION_270);
+    frame.rotate(ui::ROTATION_270);
+    frame.rotate(ui::ROTATION_270);
     ASSERT_EQ(frame, frameOriginal);
 }
 
diff --git a/libs/input/tests/VelocityTracker_test.cpp b/libs/input/tests/VelocityTracker_test.cpp
index a87b187..ae72109 100644
--- a/libs/input/tests/VelocityTracker_test.cpp
+++ b/libs/input/tests/VelocityTracker_test.cpp
@@ -16,9 +16,10 @@
 
 #define LOG_TAG "VelocityTracker_test"
 
+#include <math.h>
 #include <array>
 #include <chrono>
-#include <math.h>
+#include <limits>
 
 #include <android-base/stringprintf.h>
 #include <attestation/HmacKeyManager.h>
@@ -26,7 +27,9 @@
 #include <gui/constants.h>
 #include <input/VelocityTracker.h>
 
-using namespace std::chrono_literals;
+using std::literals::chrono_literals::operator""ms;
+using std::literals::chrono_literals::operator""ns;
+using std::literals::chrono_literals::operator""us;
 using android::base::StringPrintf;
 
 namespace android {
@@ -62,8 +65,15 @@
     EXPECT_NEAR(actual, target, tolerance);
 }
 
-static void checkVelocity(float Vactual, float Vtarget) {
-    EXPECT_NEAR_BY_FRACTION(Vactual, Vtarget, VELOCITY_TOLERANCE);
+static void checkVelocity(std::optional<float> Vactual, std::optional<float> Vtarget) {
+    if (Vactual != std::nullopt) {
+        if (Vtarget == std::nullopt) {
+            FAIL() << "Expected no velocity, but found " << *Vactual;
+        }
+        EXPECT_NEAR_BY_FRACTION(*Vactual, *Vtarget, VELOCITY_TOLERANCE);
+    } else if (Vtarget != std::nullopt) {
+        FAIL() << "Expected  velocity, but found no velocity";
+    }
 }
 
 static void checkCoefficient(float actual, float target) {
@@ -74,6 +84,8 @@
     float x;
     float y;
 
+    bool isResampled = false;
+
     /**
      * If both values are NAN, then this is considered to be an empty entry (no pointer data).
      * If only one of the values is NAN, this is still a valid entry,
@@ -84,7 +96,7 @@
     }
 };
 
-struct MotionEventEntry {
+struct PlanarMotionEventEntry {
     std::chrono::nanoseconds eventTime;
     std::vector<Position> positions;
 };
@@ -133,15 +145,43 @@
     return AMOTION_EVENT_ACTION_MOVE;
 }
 
-static std::vector<MotionEvent> createMotionEventStream(
-        const std::vector<MotionEventEntry>& motions) {
+static std::vector<MotionEvent> createAxisScrollMotionEventStream(
+        const std::vector<std::pair<std::chrono::nanoseconds, float>>& motions) {
+    std::vector<MotionEvent> events;
+    for (const auto& [timeStamp, value] : motions) {
+        EXPECT_TRUE(!isnan(value)) << "The entry at pointerId must be valid";
+
+        PointerCoords coords[1];
+        coords[0].setAxisValue(AMOTION_EVENT_AXIS_SCROLL, value);
+
+        PointerProperties properties[1];
+        properties[0].id = DEFAULT_POINTER_ID;
+
+        MotionEvent event;
+        ui::Transform identityTransform;
+        event.initialize(InputEvent::nextId(), /*deviceId=*/5, AINPUT_SOURCE_ROTARY_ENCODER,
+                         ADISPLAY_ID_NONE, INVALID_HMAC, AMOTION_EVENT_ACTION_SCROLL,
+                         /*actionButton=*/0, /*flags=*/0, AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE,
+                         /*buttonState=*/0, MotionClassification::NONE, identityTransform,
+                         /*xPrecision=*/0, /*yPrecision=*/0, AMOTION_EVENT_INVALID_CURSOR_POSITION,
+                         AMOTION_EVENT_INVALID_CURSOR_POSITION, identityTransform, /*downTime=*/0,
+                         timeStamp.count(), /*pointerCount=*/1, properties, coords);
+
+        events.emplace_back(event);
+    }
+
+    return events;
+}
+
+static std::vector<MotionEvent> createTouchMotionEventStream(
+        const std::vector<PlanarMotionEventEntry>& motions) {
     if (motions.empty()) {
         ADD_FAILURE() << "Need at least 1 sample to create a MotionEvent. Received empty vector.";
     }
 
     std::vector<MotionEvent> events;
     for (size_t i = 0; i < motions.size(); i++) {
-        const MotionEventEntry& entry = motions[i];
+        const PlanarMotionEventEntry& entry = motions[i];
         BitSet32 pointers = getValidPointers(entry.positions);
         const uint32_t pointerCount = pointers.count();
 
@@ -149,12 +189,11 @@
         if (i == 0) {
             action = AMOTION_EVENT_ACTION_DOWN;
             EXPECT_EQ(1U, pointerCount) << "First event should only have 1 pointer";
-        } else if (i == motions.size() - 1) {
-            EXPECT_EQ(1U, pointerCount) << "Last event should only have 1 pointer";
+        } else if ((i == motions.size() - 1) && pointerCount == 1) {
             action = AMOTION_EVENT_ACTION_UP;
         } else {
-            const MotionEventEntry& previousEntry = motions[i-1];
-            const MotionEventEntry& nextEntry = motions[i+1];
+            const PlanarMotionEventEntry& previousEntry = motions[i-1];
+            const PlanarMotionEventEntry& nextEntry = motions[i+1];
             action = resolveAction(previousEntry.positions, entry.positions, nextEntry.positions);
         }
 
@@ -166,25 +205,26 @@
 
             coords[pointerIndex].clear();
             // We are treating column positions as pointerId
-            EXPECT_TRUE(entry.positions[pointerId].isValid()) <<
-                    "The entry at pointerId must be valid";
-            coords[pointerIndex].setAxisValue(AMOTION_EVENT_AXIS_X, entry.positions[pointerId].x);
-            coords[pointerIndex].setAxisValue(AMOTION_EVENT_AXIS_Y, entry.positions[pointerId].y);
+            const Position& position = entry.positions[pointerId];
+            EXPECT_TRUE(position.isValid()) << "The entry at " << pointerId << " must be valid";
+            coords[pointerIndex].setAxisValue(AMOTION_EVENT_AXIS_X, position.x);
+            coords[pointerIndex].setAxisValue(AMOTION_EVENT_AXIS_Y, position.y);
+            coords[pointerIndex].isResampled = position.isResampled;
 
             properties[pointerIndex].id = pointerId;
-            properties[pointerIndex].toolType = AMOTION_EVENT_TOOL_TYPE_FINGER;
+            properties[pointerIndex].toolType = ToolType::FINGER;
             pointerIndex++;
         }
         EXPECT_EQ(pointerIndex, pointerCount);
 
         MotionEvent event;
         ui::Transform identityTransform;
-        event.initialize(InputEvent::nextId(), 0 /*deviceId*/, AINPUT_SOURCE_TOUCHSCREEN,
-                         DISPLAY_ID, INVALID_HMAC, action, 0 /*actionButton*/, 0 /*flags*/,
-                         AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, 0 /*buttonState*/,
-                         MotionClassification::NONE, identityTransform, 0 /*xPrecision*/,
-                         0 /*yPrecision*/, AMOTION_EVENT_INVALID_CURSOR_POSITION,
-                         AMOTION_EVENT_INVALID_CURSOR_POSITION, identityTransform, 0 /*downTime*/,
+        event.initialize(InputEvent::nextId(), /*deviceId=*/0, AINPUT_SOURCE_TOUCHSCREEN,
+                         DISPLAY_ID, INVALID_HMAC, action, /*actionButton=*/0, /*flags=*/0,
+                         AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, /*buttonState=*/0,
+                         MotionClassification::NONE, identityTransform, /*xPrecision=*/0,
+                         /*yPrecision=*/0, AMOTION_EVENT_INVALID_CURSOR_POSITION,
+                         AMOTION_EVENT_INVALID_CURSOR_POSITION, identityTransform, /*downTime=*/0,
                          entry.eventTime.count(), pointerCount, properties, coords);
 
         events.emplace_back(event);
@@ -193,54 +233,254 @@
     return events;
 }
 
-static void computeAndCheckVelocity(const VelocityTracker::Strategy strategy,
-                                    const std::vector<MotionEventEntry>& motions, int32_t axis,
-                                    float targetVelocity) {
+static std::optional<float> computePlanarVelocity(
+        const VelocityTracker::Strategy strategy,
+        const std::vector<PlanarMotionEventEntry>& motions, int32_t axis,
+        uint32_t pointerId = DEFAULT_POINTER_ID) {
     VelocityTracker vt(strategy);
-    float Vx, Vy;
 
-    std::vector<MotionEvent> events = createMotionEventStream(motions);
+    std::vector<MotionEvent> events = createTouchMotionEventStream(motions);
     for (MotionEvent event : events) {
         vt.addMovement(&event);
     }
 
-    vt.getVelocity(DEFAULT_POINTER_ID, &Vx, &Vy);
+    return vt.getVelocity(axis, pointerId);
+}
 
+static std::vector<MotionEvent> createMotionEventStream(
+        int32_t axis, const std::vector<std::pair<std::chrono::nanoseconds, float>>& motion) {
     switch (axis) {
-    case AMOTION_EVENT_AXIS_X:
-        checkVelocity(Vx, targetVelocity);
-        break;
-    case AMOTION_EVENT_AXIS_Y:
-        checkVelocity(Vy, targetVelocity);
-        break;
-    default:
-        FAIL() << "Axis must be either AMOTION_EVENT_AXIS_X or AMOTION_EVENT_AXIS_Y";
+        case AMOTION_EVENT_AXIS_SCROLL:
+            return createAxisScrollMotionEventStream(motion);
+        default:
+            ADD_FAILURE() << "Axis " << axis << " is not supported";
+            return {};
     }
 }
 
-static void computeAndCheckQuadraticEstimate(const std::vector<MotionEventEntry>& motions,
-        const std::array<float, 3>& coefficients) {
+static std::optional<float> computeVelocity(
+        const VelocityTracker::Strategy strategy,
+        const std::vector<std::pair<std::chrono::nanoseconds, float>>& motions, int32_t axis) {
+    VelocityTracker vt(strategy);
+
+    for (const MotionEvent& event : createMotionEventStream(axis, motions)) {
+        vt.addMovement(&event);
+    }
+
+    return vt.getVelocity(axis, DEFAULT_POINTER_ID);
+}
+
+static void computeAndCheckVelocity(const VelocityTracker::Strategy strategy,
+                                    const std::vector<PlanarMotionEventEntry>& motions,
+                                    int32_t axis, std::optional<float> targetVelocity,
+                                    uint32_t pointerId = DEFAULT_POINTER_ID) {
+    checkVelocity(computePlanarVelocity(strategy, motions, axis, pointerId), targetVelocity);
+}
+
+static void computeAndCheckAxisScrollVelocity(
+        const VelocityTracker::Strategy strategy,
+        const std::vector<std::pair<std::chrono::nanoseconds, float>>& motions,
+        std::optional<float> targetVelocity) {
+    checkVelocity(computeVelocity(strategy, motions, AMOTION_EVENT_AXIS_SCROLL), targetVelocity);
+}
+
+static void computeAndCheckQuadraticEstimate(const std::vector<PlanarMotionEventEntry>& motions,
+                                             const std::array<float, 3>& coefficients) {
     VelocityTracker vt(VelocityTracker::Strategy::LSQ2);
-    std::vector<MotionEvent> events = createMotionEventStream(motions);
+    std::vector<MotionEvent> events = createTouchMotionEventStream(motions);
     for (MotionEvent event : events) {
         vt.addMovement(&event);
     }
-    VelocityTracker::Estimator estimator;
-    EXPECT_TRUE(vt.getEstimator(0, &estimator));
+    std::optional<VelocityTracker::Estimator> estimatorX = vt.getEstimator(AMOTION_EVENT_AXIS_X, 0);
+    std::optional<VelocityTracker::Estimator> estimatorY = vt.getEstimator(AMOTION_EVENT_AXIS_Y, 0);
+    EXPECT_TRUE(estimatorX);
+    EXPECT_TRUE(estimatorY);
     for (size_t i = 0; i< coefficients.size(); i++) {
-        checkCoefficient(estimator.xCoeff[i], coefficients[i]);
-        checkCoefficient(estimator.yCoeff[i], coefficients[i]);
+        checkCoefficient((*estimatorX).coeff[i], coefficients[i]);
+        checkCoefficient((*estimatorY).coeff[i], coefficients[i]);
+    }
+}
+
+/*
+ *================== VelocityTracker tests that do not require test motion data ====================
+ */
+TEST(SimpleVelocityTrackerTest, TestSupportedAxis) {
+    // Note that we are testing up to the max possible axis value, plus 3 more values. We are going
+    // beyond the max value to add a bit more protection. "3" is chosen arbitrarily to cover a few
+    // more values beyond the max.
+    for (int32_t axis = 0; axis <= AMOTION_EVENT_MAXIMUM_VALID_AXIS_VALUE + 3; axis++) {
+        switch (axis) {
+            case AMOTION_EVENT_AXIS_X:
+            case AMOTION_EVENT_AXIS_Y:
+            case AMOTION_EVENT_AXIS_SCROLL:
+                EXPECT_TRUE(VelocityTracker::isAxisSupported(axis)) << axis << " is supported";
+                break;
+            default:
+                EXPECT_FALSE(VelocityTracker::isAxisSupported(axis)) << axis << " is NOT supported";
+        }
     }
 }
 
 /*
  * ================== VelocityTracker tests generated manually =====================================
  */
+TEST_F(VelocityTrackerTest, TestDefaultStrategiesForPlanarAxes) {
+    std::vector<PlanarMotionEventEntry> motions = {{10ms, {{2, 4}}},
+                                                   {20ms, {{4, 12}}},
+                                                   {30ms, {{6, 20}}},
+                                                   {40ms, {{10, 30}}}};
+
+    EXPECT_EQ(computePlanarVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_X),
+              computePlanarVelocity(VelocityTracker::Strategy::DEFAULT, motions,
+                                    AMOTION_EVENT_AXIS_X));
+    EXPECT_EQ(computePlanarVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_Y),
+              computePlanarVelocity(VelocityTracker::Strategy::DEFAULT, motions,
+                                    AMOTION_EVENT_AXIS_Y));
+}
+
+TEST_F(VelocityTrackerTest, TestComputedVelocity) {
+    VelocityTracker::ComputedVelocity computedVelocity;
+
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_X, /*id=*/0, /*velocity=*/200);
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_X, /*id=*/26U, /*velocity=*/400);
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_X, /*id=*/27U, /*velocity=*/650);
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_X, MAX_POINTER_ID, /*velocity=*/750);
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_Y, /*id=*/0, /*velocity=*/1000);
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_Y, /*id=*/26U, /*velocity=*/2000);
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_Y, /*id=*/27U, /*velocity=*/3000);
+    computedVelocity.addVelocity(AMOTION_EVENT_AXIS_Y, MAX_POINTER_ID, /*velocity=*/4000);
+
+    // Check the axes/indices with velocity.
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, /*id=*/0U)), 200);
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, /*id=*/26U)), 400);
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, /*id=*/27U)), 650);
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, MAX_POINTER_ID)), 750);
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_Y, /*id=*/0U)), 1000);
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_Y, /*id=*/26U)), 2000);
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_Y, /*id=*/27U)), 3000);
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_Y, MAX_POINTER_ID)), 4000);
+    for (uint32_t id = 0; id <= MAX_POINTER_ID; id++) {
+        // Since no data was added for AXIS_SCROLL, expect empty value for the axis for any id.
+        EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_SCROLL, id))
+                << "Empty scroll data expected at id=" << id;
+        if (id == 0 || id == 26U || id == 27U || id == MAX_POINTER_ID) {
+            // Already checked above; continue.
+            continue;
+        }
+        // No data was added to X/Y for this id, expect empty value.
+        EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, id))
+                << "Empty X data expected at id=" << id;
+        EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_Y, id))
+                << "Empty Y data expected at id=" << id;
+    }
+    // Out-of-bounds ids should given empty values.
+    EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, -1));
+    EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, MAX_POINTER_ID + 1));
+}
+
+/**
+ * For a single pointer, the resampled data is ignored.
+ */
+TEST_F(VelocityTrackerTest, SinglePointerResampledData) {
+    std::vector<PlanarMotionEventEntry> motions = {{10ms, {{1, 2}}},
+                                                   {20ms, {{2, 4}}},
+                                                   {30ms, {{3, 6}}},
+                                                   {35ms, {{30, 60, .isResampled = true}}},
+                                                   {40ms, {{4, 8}}}};
+
+    computeAndCheckVelocity(VelocityTracker::Strategy::DEFAULT, motions, AMOTION_EVENT_AXIS_X, 100);
+    computeAndCheckVelocity(VelocityTracker::Strategy::DEFAULT, motions, AMOTION_EVENT_AXIS_Y, 200);
+}
+
+/**
+ * For multiple pointers, the resampled data is ignored on a per-pointer basis. If a certain pointer
+ * does not have a resampled value, all of the points are used.
+ */
+TEST_F(VelocityTrackerTest, MultiPointerResampledData) {
+    std::vector<PlanarMotionEventEntry> motions = {
+            {0ms, {{0, 0}}},
+            {10ms, {{1, 0}, {1, 0}}},
+            {20ms, {{2, 0}, {2, 0}}},
+            {30ms, {{3, 0}, {3, 0}}},
+            {35ms, {{30, 0, .isResampled = true}, {30, 0}}},
+            {40ms, {{4, 0}, {4, 0}}},
+            {45ms, {{5, 0}}}, // ACTION_UP
+    };
+
+    // Sample at t=35ms breaks trend. It's marked as resampled for the first pointer, so it should
+    // be ignored, and the resulting velocity should be linear. For the second pointer, it's not
+    // resampled, so it should cause the velocity to be non-linear.
+    computeAndCheckVelocity(VelocityTracker::Strategy::DEFAULT, motions, AMOTION_EVENT_AXIS_X, 100,
+                            /*pointerId=*/0);
+    computeAndCheckVelocity(VelocityTracker::Strategy::DEFAULT, motions, AMOTION_EVENT_AXIS_X, 3455,
+                            /*pointerId=*/1);
+}
+
+TEST_F(VelocityTrackerTest, TestGetComputedVelocity) {
+    std::vector<PlanarMotionEventEntry> motions = {
+            {235089067457000ns, {{528.00, 0}}}, {235089084684000ns, {{527.00, 0}}},
+            {235089093349000ns, {{527.00, 0}}}, {235089095677625ns, {{527.00, 0}}},
+            {235089101859000ns, {{527.00, 0}}}, {235089110378000ns, {{528.00, 0}}},
+            {235089112497111ns, {{528.25, 0}}}, {235089118760000ns, {{531.00, 0}}},
+            {235089126686000ns, {{535.00, 0}}}, {235089129316820ns, {{536.33, 0}}},
+            {235089135199000ns, {{540.00, 0}}}, {235089144297000ns, {{546.00, 0}}},
+            {235089146136443ns, {{547.21, 0}}}, {235089152923000ns, {{553.00, 0}}},
+            {235089160784000ns, {{559.00, 0}}}, {235089162955851ns, {{560.66, 0}}},
+            {235089162955851ns, {{560.66, 0}}}, // ACTION_UP
+    };
+    VelocityTracker vt(VelocityTracker::Strategy::IMPULSE);
+    std::vector<MotionEvent> events = createTouchMotionEventStream(motions);
+    for (const MotionEvent& event : events) {
+        vt.addMovement(&event);
+    }
+
+    float maxFloat = std::numeric_limits<float>::max();
+    VelocityTracker::ComputedVelocity computedVelocity;
+    computedVelocity = vt.getComputedVelocity(/*units=*/1000, maxFloat);
+    checkVelocity(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, DEFAULT_POINTER_ID)),
+                  764.345703);
+
+    // Expect X velocity to be scaled with respective to provided units.
+    computedVelocity = vt.getComputedVelocity(/*units=*/1000000, maxFloat);
+    checkVelocity(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, DEFAULT_POINTER_ID)),
+                  764345.703);
+
+    // Expect X velocity to be clamped by provided max velocity.
+    computedVelocity = vt.getComputedVelocity(/*units=*/1000000, 1000);
+    checkVelocity(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, DEFAULT_POINTER_ID)), 1000);
+
+    // All 0 data for Y; expect 0 velocity.
+    EXPECT_EQ(*(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_Y, DEFAULT_POINTER_ID)), 0);
+
+    // No data for scroll-axis; expect empty velocity.
+    EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_SCROLL, DEFAULT_POINTER_ID));
+}
+
+TEST_F(VelocityTrackerTest, TestApiInteractionsWithNoMotionEvents) {
+    VelocityTracker vt(VelocityTracker::Strategy::DEFAULT);
+
+    EXPECT_FALSE(vt.getVelocity(AMOTION_EVENT_AXIS_X, DEFAULT_POINTER_ID));
+
+    EXPECT_FALSE(vt.getEstimator(AMOTION_EVENT_AXIS_X, DEFAULT_POINTER_ID));
+
+    VelocityTracker::ComputedVelocity computedVelocity = vt.getComputedVelocity(1000, 1000);
+    for (uint32_t id = 0; id <= MAX_POINTER_ID; id++) {
+        EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_X, id));
+        EXPECT_FALSE(computedVelocity.getVelocity(AMOTION_EVENT_AXIS_Y, id));
+    }
+
+    EXPECT_EQ(-1, vt.getActivePointerId());
+
+    // Make sure that the clearing functions execute without an issue.
+    vt.clearPointer(7U);
+    vt.clear();
+}
+
 TEST_F(VelocityTrackerTest, ThreePointsPositiveVelocityTest) {
     // Same coordinate is reported 2 times in a row
     // It is difficult to determine the correct answer here, but at least the direction
     // of the reported velocity should be positive.
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
             {0ms, {{273, 0}}},
             {12585us, {{293, 0}}},
             {14730us, {{293, 0}}},
@@ -252,7 +492,7 @@
 
 TEST_F(VelocityTrackerTest, ThreePointsZeroVelocityTest) {
     // Same coordinate is reported 3 times in a row
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
             {0ms, {{293, 0}}},
             {6132us, {{293, 0}}},
             {11283us, {{293, 0}}},
@@ -264,7 +504,7 @@
 
 TEST_F(VelocityTrackerTest, ThreePointsLinearVelocityTest) {
     // Fixed velocity at 5 points per 10 milliseconds
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
             {0ms, {{0, 0}}}, {10ms, {{5, 0}}}, {20ms, {{10, 0}}}, {20ms, {{10, 0}}}, // ACTION_UP
     };
     computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_X, 500);
@@ -288,7 +528,7 @@
 // --------------- Recorded by hand on swordfish ---------------------------------------------------
 TEST_F(VelocityTrackerTest, SwordfishFlingDown) {
     // Recording of a fling on Swordfish that could cause a fling in the wrong direction
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 0ms, {{271, 96}} },
         { 16071042ns, {{269.786346, 106.922775}} },
         { 35648403ns, {{267.983063, 156.660034}} },
@@ -323,7 +563,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpSlow1) {
     // Sailfish - fling up - slow - 1
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235089067457000ns, {{528.00, 983.00}} },
         { 235089084684000ns, {{527.00, 981.00}} },
         { 235089093349000ns, {{527.00, 977.00}} },
@@ -355,7 +595,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpSlow2) {
     // Sailfish - fling up - slow - 2
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235110560704000ns, {{522.00, 1107.00}} },
         { 235110575764000ns, {{522.00, 1107.00}} },
         { 235110584385000ns, {{522.00, 1107.00}} },
@@ -384,7 +624,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpSlow3) {
     // Sailfish - fling up - slow - 3
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 792536237000ns, {{580.00, 1317.00}} },
         { 792541538987ns, {{580.63, 1311.94}} },
         { 792544613000ns, {{581.00, 1309.00}} },
@@ -418,7 +658,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpFaster1) {
     // Sailfish - fling up - faster - 1
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235160420675000ns, {{610.00, 1042.00}} },
         { 235160428220000ns, {{609.00, 1026.00}} },
         { 235160436544000ns, {{609.00, 1024.00}} },
@@ -452,7 +692,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpFaster2) {
     // Sailfish - fling up - faster - 2
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 847153808000ns, {{576.00, 1264.00}} },
         { 847171174000ns, {{576.00, 1262.00}} },
         { 847179640000ns, {{576.00, 1257.00}} },
@@ -478,7 +718,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpFaster3) {
     // Sailfish - fling up - faster - 3
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235200532789000ns, {{507.00, 1084.00}} },
         { 235200549221000ns, {{507.00, 1083.00}} },
         { 235200557841000ns, {{507.00, 1081.00}} },
@@ -504,7 +744,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpFast1) {
     // Sailfish - fling up - fast - 1
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 920922149000ns, {{561.00, 1412.00}} },
         { 920930185000ns, {{559.00, 1377.00}} },
         { 920930262463ns, {{558.98, 1376.66}} },
@@ -533,7 +773,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpFast2) {
     // Sailfish - fling up - fast - 2
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235247153233000ns, {{518.00, 1168.00}} },
         { 235247170452000ns, {{517.00, 1167.00}} },
         { 235247178908000ns, {{515.00, 1159.00}} },
@@ -556,7 +796,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingUpFast3) {
     // Sailfish - fling up - fast - 3
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235302568736000ns, {{529.00, 1167.00}} },
         { 235302576644000ns, {{523.00, 1140.00}} },
         { 235302579395063ns, {{520.91, 1130.61}} },
@@ -577,7 +817,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownSlow1) {
     // Sailfish - fling down - slow - 1
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235655749552755ns, {{582.00, 432.49}} },
         { 235655750638000ns, {{582.00, 433.00}} },
         { 235655758865000ns, {{582.00, 440.00}} },
@@ -611,7 +851,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownSlow2) {
     // Sailfish - fling down - slow - 2
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235671152083370ns, {{485.24, 558.28}} },
         { 235671154126000ns, {{485.00, 559.00}} },
         { 235671162497000ns, {{484.00, 566.00}} },
@@ -645,7 +885,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownSlow3) {
     // Sailfish - fling down - slow - 3
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 170983201000ns, {{557.00, 533.00}} },
         { 171000668000ns, {{556.00, 534.00}} },
         { 171007359750ns, {{554.73, 535.27}} },
@@ -672,7 +912,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownFaster1) {
     // Sailfish - fling down - faster - 1
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235695280333000ns, {{558.00, 451.00}} },
         { 235695283971237ns, {{558.43, 454.45}} },
         { 235695289038000ns, {{559.00, 462.00}} },
@@ -702,7 +942,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownFaster2) {
     // Sailfish - fling down - faster - 2
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235709624766000ns, {{535.00, 579.00}} },
         { 235709642256000ns, {{534.00, 580.00}} },
         { 235709643350278ns, {{533.94, 580.06}} },
@@ -733,7 +973,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownFaster3) {
     // Sailfish - fling down - faster - 3
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235727628927000ns, {{540.00, 440.00}} },
         { 235727636810000ns, {{537.00, 454.00}} },
         { 235727646176000ns, {{536.00, 454.00}} },
@@ -762,7 +1002,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownFast1) {
     // Sailfish - fling down - fast - 1
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235762352849000ns, {{467.00, 286.00}} },
         { 235762360250000ns, {{443.00, 344.00}} },
         { 235762362787412ns, {{434.77, 363.89}} },
@@ -783,7 +1023,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownFast2) {
     // Sailfish - fling down - fast - 2
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 235772487188000ns, {{576.00, 204.00}} },
         { 235772495159000ns, {{553.00, 236.00}} },
         { 235772503568000ns, {{551.00, 240.00}} },
@@ -804,7 +1044,7 @@
 
 TEST_F(VelocityTrackerTest, SailfishFlingDownFast3) {
     // Sailfish - fling down - fast - 3
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 507650295000ns, {{628.00, 233.00}} },
         { 507658234000ns, {{605.00, 269.00}} },
         { 507666784000ns, {{601.00, 274.00}} },
@@ -830,12 +1070,12 @@
 /**
  * ================== Multiple pointers ============================================================
  *
- * Three fingers quickly tap the screen. Since this is a tap, the velocities should be zero.
+ * Three fingers quickly tap the screen. Since this is a tap, the velocities should be empty.
  * If the events with POINTER_UP or POINTER_DOWN are not handled correctly (these should not be
  * part of the fitted data), this can cause large velocity values to be reported instead.
  */
 TEST_F(VelocityTrackerTest, LeastSquaresVelocityTrackerStrategyEstimator_ThreeFingerTap) {
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 0us,      {{1063, 1128}, {NAN, NAN}, {NAN, NAN}} },
         { 10800us,  {{1063, 1128}, {682, 1318}, {NAN, NAN}} }, // POINTER_DOWN
         { 10800us,  {{1063, 1128}, {682, 1318}, {397, 1747}} }, // POINTER_DOWN
@@ -844,12 +1084,78 @@
         { 272700us, {{1063, 1128}, {NAN, NAN}, {NAN, NAN}} },
     };
 
-    // Velocity should actually be zero, but we expect 0.016 here instead.
-    // This is close enough to zero, and is likely caused by division by a very small number.
-    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_X, -0.016);
-    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_Y, -0.016);
-    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_X, 0);
-    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_Y, 0);
+    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt);
+    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_Y,
+                            std::nullopt);
+    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt);
+    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_Y,
+                            std::nullopt);
+}
+
+/**
+ * ================= Pointer liftoff ===============================================================
+ */
+
+/**
+ * The last movement of a pointer is always ACTION_POINTER_UP or ACTION_UP. If there's a short delay
+ * between the last ACTION_MOVE and the next ACTION_POINTER_UP or ACTION_UP, velocity should not be
+ * affected by the liftoff.
+ */
+TEST_F(VelocityTrackerTest, ShortDelayBeforeActionUp) {
+    std::vector<PlanarMotionEventEntry> motions = {
+            {0ms, {{10, 0}}}, {10ms, {{20, 0}}}, {20ms, {{30, 0}}}, {30ms, {{30, 0}}}, // ACTION_UP
+    };
+    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_X,
+                            1000);
+    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_X, 1000);
+}
+
+/**
+ * The last movement of a single pointer is ACTION_UP. If there's a long delay between the last
+ * ACTION_MOVE and the final ACTION_UP, velocity should be reported as empty because the pointer
+ * should be assumed to have stopped.
+ */
+TEST_F(VelocityTrackerTest, LongDelayBeforeActionUp) {
+    std::vector<PlanarMotionEventEntry> motions = {
+            {0ms, {{10, 0}}},
+            {10ms, {{20, 0}}},
+            {20ms, {{30, 0}}},
+            {3000ms, {{30, 0}}}, // ACTION_UP
+    };
+    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt);
+    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt);
+}
+
+/**
+ * The last movement of a pointer is always ACTION_POINTER_UP or ACTION_UP. If there's a long delay
+ * before ACTION_POINTER_UP event, the movement should be assumed to have stopped.
+ * The final velocity should be reported as empty for all pointers.
+ */
+TEST_F(VelocityTrackerTest, LongDelayBeforeActionPointerUp) {
+    std::vector<PlanarMotionEventEntry> motions = {
+            {0ms, {{10, 0}}},
+            {10ms, {{20, 0}, {100, 0}}},
+            {20ms, {{30, 0}, {200, 0}}},
+            {30ms, {{30, 0}, {300, 0}}},
+            {40ms, {{30, 0}, {400, 0}}},
+            {3000ms, {{30, 0}}}, // ACTION_POINTER_UP
+    };
+    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt,
+                            /*pointerId*/ 0);
+    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt,
+                            /*pointerId*/ 0);
+    computeAndCheckVelocity(VelocityTracker::Strategy::IMPULSE, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt,
+                            /*pointerId*/ 1);
+    computeAndCheckVelocity(VelocityTracker::Strategy::LSQ2, motions, AMOTION_EVENT_AXIS_X,
+                            std::nullopt,
+                            /*pointerId*/ 1);
 }
 
 /**
@@ -878,7 +1184,7 @@
  * In the test, we would convert these coefficients to (0*(1E3)^0, 0*(1E3)^1, 1*(1E3)^2).
  */
 TEST_F(VelocityTrackerTest, LeastSquaresVelocityTrackerStrategyEstimator_Constant) {
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 0ms, {{1, 1}} }, // 0 s
         { 1ms, {{1, 1}} }, // 0.001 s
         { 2ms, {{1, 1}} }, // 0.002 s
@@ -896,7 +1202,7 @@
  * Straight line y = x :: the constant and quadratic coefficients are zero.
  */
 TEST_F(VelocityTrackerTest, LeastSquaresVelocityTrackerStrategyEstimator_Linear) {
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 0ms, {{-2, -2}} },
         { 1ms, {{-1, -1}} },
         { 2ms, {{-0, -0}} },
@@ -914,7 +1220,7 @@
  * Parabola
  */
 TEST_F(VelocityTrackerTest, LeastSquaresVelocityTrackerStrategyEstimator_Parabolic) {
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 0ms, {{1, 1}} },
         { 1ms, {{4, 4}} },
         { 2ms, {{8, 8}} },
@@ -932,7 +1238,7 @@
  * Parabola
  */
 TEST_F(VelocityTrackerTest, LeastSquaresVelocityTrackerStrategyEstimator_Parabolic2) {
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 0ms, {{1, 1}} },
         { 1ms, {{4, 4}} },
         { 2ms, {{9, 9}} },
@@ -950,7 +1256,7 @@
  * Parabola :: y = x^2 :: the constant and linear coefficients are zero.
  */
 TEST_F(VelocityTrackerTest, LeastSquaresVelocityTrackerStrategyEstimator_Parabolic3) {
-    std::vector<MotionEventEntry> motions = {
+    std::vector<PlanarMotionEventEntry> motions = {
         { 0ms, {{4, 4}} },
         { 1ms, {{1, 1}} },
         { 2ms, {{0, 0}} },
@@ -964,4 +1270,114 @@
     computeAndCheckQuadraticEstimate(motions, std::array<float, 3>({0, 0E3, 1E6}));
 }
 
+// Recorded by hand on sailfish, but only the diffs are taken to test cumulative axis velocity.
+TEST_F(VelocityTrackerTest, AxisScrollVelocity) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {
+            {235089067457000ns, 0.00}, {235089084684000ns, -1.00}, {235089093349000ns, 0.00},
+            {235089095677625ns, 0.00}, {235089101859000ns, 0.00},  {235089110378000ns, 0.00},
+            {235089112497111ns, 0.25}, {235089118760000ns, 1.75},  {235089126686000ns, 4.00},
+            {235089129316820ns, 1.33}, {235089135199000ns, 3.67},  {235089144297000ns, 6.00},
+            {235089146136443ns, 1.21}, {235089152923000ns, 5.79},  {235089160784000ns, 6.00},
+            {235089162955851ns, 1.66},
+    };
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {764.345703});
+}
+
+// --------------- Recorded by hand on a Wear OS device using a rotating side button ---------------
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_ScrollDown) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {
+            {224598065152ns, -0.050100}, {224621871104ns, -0.133600}, {224645464064ns, -0.551100},
+            {224669171712ns, -0.801600}, {224687063040ns, -1.035400}, {224706691072ns, -0.484300},
+            {224738213888ns, -0.334000}, {224754401280ns, -0.083500},
+    };
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {-27.86});
+}
+
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_ScrollUp) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {
+            {269606010880ns, 0.050100}, {269626064896ns, 0.217100}, {269641973760ns, 0.267200},
+            {269658079232ns, 0.267200}, {269674217472ns, 0.267200}, {269690683392ns, 0.367400},
+            {269706133504ns, 0.551100}, {269722173440ns, 0.501000},
+    };
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {31.92});
+}
+
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_ScrollDown_ThenUp_ThenDown) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {
+            {2580534001664ns, -0.033400}, {2580549992448ns, -0.133600},
+            {2580566769664ns, -0.250500}, {2580581974016ns, -0.183700},
+            {2580597964800ns, -0.267200}, {2580613955584ns, -0.551100},
+            {2580635189248ns, -0.601200}, {2580661927936ns, -0.450900},
+            {2580683161600ns, -0.417500}, {2580705705984ns, -0.150300},
+            {2580722745344ns, -0.016700}, {2580786446336ns, 0.050100},
+            {2580801912832ns, 0.150300},  {2580822360064ns, 0.300600},
+            {2580838088704ns, 0.300600},  {2580854341632ns, 0.400800},
+            {2580869808128ns, 0.517700},  {2580886061056ns, 0.501000},
+            {2580905984000ns, 0.350700},  {2580921974784ns, 0.350700},
+            {2580937965568ns, 0.066800},  {2580974665728ns, 0.016700},
+            {2581034434560ns, -0.066800}, {2581049901056ns, -0.116900},
+            {2581070610432ns, -0.317300}, {2581086076928ns, -0.200400},
+            {2581101805568ns, -0.233800}, {2581118058496ns, -0.417500},
+            {2581134049280ns, -0.417500}, {2581150040064ns, -0.367400},
+            {2581166030848ns, -0.267200}, {2581181759488ns, -0.150300},
+            {2581199847424ns, -0.066800},
+    };
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {-9.73});
+}
+
+// ------------------------------- Hand generated test cases ---------------------------------------
+TEST_F(VelocityTrackerTest, TestDefaultStrategyForAxisScroll) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {
+            {10ms, 20},
+            {20ms, 25},
+            {30ms, 50},
+            {40ms, 100},
+    };
+
+    EXPECT_EQ(computeVelocity(VelocityTracker::Strategy::IMPULSE, motions,
+                              AMOTION_EVENT_AXIS_SCROLL),
+              computeVelocity(VelocityTracker::Strategy::DEFAULT, motions,
+                              AMOTION_EVENT_AXIS_SCROLL));
+}
+
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_SimilarDifferentialValues) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {{1ns, 2.12},  {3ns, 2.12},
+                                                                       {7ns, 2.12},  {8ns, 2.12},
+                                                                       {15ns, 2.12}, {18ns, 2.12}};
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {1690236059.86});
+}
+
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_OnlyTwoValues) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {{1ms, 5}, {2ms, 10}};
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {10000});
+}
+
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_ConstantVelocity) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {{1ms, 20}, {2ms, 20},
+                                                                       {3ms, 20}, {4ms, 20},
+                                                                       {5ms, 20}, {6ms, 20}};
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {20000});
+}
+
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_NoMotion) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {{1ns, 0}, {2ns, 0},
+                                                                       {3ns, 0}, {4ns, 0},
+                                                                       {5ns, 0}, {6ns, 0}};
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, {0});
+}
+
+TEST_F(VelocityTrackerTest, AxisScrollVelocity_NoData) {
+    std::vector<std::pair<std::chrono::nanoseconds, float>> motions = {};
+
+    computeAndCheckAxisScrollVelocity(VelocityTracker::Strategy::IMPULSE, motions, std::nullopt);
+}
+
 } // namespace android
diff --git a/libs/input/tests/VerifiedInputEvent_test.cpp b/libs/input/tests/VerifiedInputEvent_test.cpp
index f2b59ea..277d74d 100644
--- a/libs/input/tests/VerifiedInputEvent_test.cpp
+++ b/libs/input/tests/VerifiedInputEvent_test.cpp
@@ -23,10 +23,10 @@
 
 static KeyEvent getKeyEventWithFlags(int32_t flags) {
     KeyEvent event;
-    event.initialize(InputEvent::nextId(), 2 /*deviceId*/, AINPUT_SOURCE_GAMEPAD,
+    event.initialize(InputEvent::nextId(), /*deviceId=*/2, AINPUT_SOURCE_GAMEPAD,
                      ADISPLAY_ID_DEFAULT, INVALID_HMAC, AKEY_EVENT_ACTION_DOWN, flags,
-                     AKEYCODE_BUTTON_X, 121 /*scanCode*/, AMETA_ALT_ON, 1 /*repeatCount*/,
-                     1000 /*downTime*/, 2000 /*eventTime*/);
+                     AKEYCODE_BUTTON_X, /*scanCode=*/121, AMETA_ALT_ON, /*repeatCount=*/1,
+                     /*downTime=*/1000, /*eventTime=*/2000);
     return event;
 }
 
@@ -44,12 +44,12 @@
     ui::Transform transform;
     transform.set({2, 0, 4, 0, 3, 5, 0, 0, 1});
     ui::Transform identity;
-    event.initialize(InputEvent::nextId(), 0 /*deviceId*/, AINPUT_SOURCE_MOUSE, ADISPLAY_ID_DEFAULT,
-                     INVALID_HMAC, AMOTION_EVENT_ACTION_DOWN, 0 /*actionButton*/, flags,
-                     AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, 0 /*buttonState*/,
-                     MotionClassification::NONE, transform, 0.1 /*xPrecision*/, 0.2 /*yPrecision*/,
-                     280 /*xCursorPosition*/, 540 /*yCursorPosition*/, identity, 100 /*downTime*/,
-                     200 /*eventTime*/, pointerCount, pointerProperties, pointerCoords);
+    event.initialize(InputEvent::nextId(), /*deviceId=*/0, AINPUT_SOURCE_MOUSE, ADISPLAY_ID_DEFAULT,
+                     INVALID_HMAC, AMOTION_EVENT_ACTION_DOWN, /*actionButton=*/0, flags,
+                     AMOTION_EVENT_EDGE_FLAG_NONE, AMETA_NONE, /*buttonState=*/0,
+                     MotionClassification::NONE, transform, /*xPrecision=*/0.1, /*yPrecision=*/0.2,
+                     /*xCursorPosition=*/280, /*yCursorPosition=*/540, identity, /*downTime=*/100,
+                     /*eventTime=*/200, pointerCount, pointerProperties, pointerCoords);
     return event;
 }
 
diff --git a/libs/input/tests/data/bad_axis_label.kl b/libs/input/tests/data/bad_axis_label.kl
new file mode 100644
index 0000000..6897380
--- /dev/null
+++ b/libs/input/tests/data/bad_axis_label.kl
@@ -0,0 +1,17 @@
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This KL should not be loaded because the axis label is not valid
+
+axis 0 DEFINITELY_NOT_AXIS_LABEL
diff --git a/libs/input/tests/data/bad_led_label.kl b/libs/input/tests/data/bad_led_label.kl
new file mode 100644
index 0000000..293c0d2
--- /dev/null
+++ b/libs/input/tests/data/bad_led_label.kl
@@ -0,0 +1,17 @@
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This KL should not be loaded because the led label is invalid
+
+led 0 ABSOLUTELY_NOT_LED_LABEL
diff --git a/libs/nativedisplay/AChoreographer.cpp b/libs/nativedisplay/AChoreographer.cpp
index 8240b08..8f005a5 100644
--- a/libs/nativedisplay/AChoreographer.cpp
+++ b/libs/nativedisplay/AChoreographer.cpp
@@ -14,12 +14,9 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "Choreographer"
-//#define LOG_NDEBUG 0
-
 #include <android-base/thread_annotations.h>
-#include <gui/DisplayEventDispatcher.h>
-#include <gui/ISurfaceComposer.h>
+#include <android/gui/ISurfaceComposer.h>
+#include <gui/Choreographer.h>
 #include <jni.h>
 #include <private/android/choreographer.h>
 #include <utils/Looper.h>
@@ -31,444 +28,9 @@
 #include <queue>
 #include <thread>
 
-namespace {
-struct {
-    // Global JVM that is provided by zygote
-    JavaVM* jvm = nullptr;
-    struct {
-        jclass clazz;
-        jmethodID getInstance;
-        jmethodID registerNativeChoreographerForRefreshRateCallbacks;
-        jmethodID unregisterNativeChoreographerForRefreshRateCallbacks;
-    } displayManagerGlobal;
-} gJni;
+#undef LOG_TAG
+#define LOG_TAG "AChoreographer"
 
-// Gets the JNIEnv* for this thread, and performs one-off initialization if we
-// have never retrieved a JNIEnv* pointer before.
-JNIEnv* getJniEnv() {
-    if (gJni.jvm == nullptr) {
-        ALOGW("AChoreographer: No JVM provided!");
-        return nullptr;
-    }
-
-    JNIEnv* env = nullptr;
-    if (gJni.jvm->GetEnv((void**)&env, JNI_VERSION_1_4) != JNI_OK) {
-        ALOGD("Attaching thread to JVM for AChoreographer");
-        JavaVMAttachArgs args = {JNI_VERSION_1_4, "AChoreographer_env", NULL};
-        jint attachResult = gJni.jvm->AttachCurrentThreadAsDaemon(&env, (void*)&args);
-        if (attachResult != JNI_OK) {
-            ALOGE("Unable to attach thread. Error: %d", attachResult);
-            return nullptr;
-        }
-    }
-    if (env == nullptr) {
-        ALOGW("AChoreographer: No JNI env available!");
-    }
-    return env;
-}
-
-inline const char* toString(bool value) {
-    return value ? "true" : "false";
-}
-} // namespace
-
-namespace android {
-using gui::VsyncEventData;
-
-struct FrameCallback {
-    AChoreographer_frameCallback callback;
-    AChoreographer_frameCallback64 callback64;
-    AChoreographer_vsyncCallback vsyncCallback;
-    void* data;
-    nsecs_t dueTime;
-
-    inline bool operator<(const FrameCallback& rhs) const {
-        // Note that this is intentionally flipped because we want callbacks due sooner to be at
-        // the head of the queue
-        return dueTime > rhs.dueTime;
-    }
-};
-
-struct RefreshRateCallback {
-    AChoreographer_refreshRateCallback callback;
-    void* data;
-    bool firstCallbackFired = false;
-};
-
-class Choreographer;
-
-/**
- * Implementation of AChoreographerFrameCallbackData.
- */
-struct ChoreographerFrameCallbackDataImpl {
-    int64_t frameTimeNanos{0};
-
-    VsyncEventData vsyncEventData;
-
-    const Choreographer* choreographer;
-};
-
-struct {
-    std::mutex lock;
-    std::vector<Choreographer*> ptrs GUARDED_BY(lock);
-    std::map<AVsyncId, int64_t> startTimes GUARDED_BY(lock);
-    bool registeredToDisplayManager GUARDED_BY(lock) = false;
-
-    std::atomic<nsecs_t> mLastKnownVsync = -1;
-} gChoreographers;
-
-class Choreographer : public DisplayEventDispatcher, public MessageHandler {
-public:
-    explicit Choreographer(const sp<Looper>& looper) EXCLUDES(gChoreographers.lock);
-    void postFrameCallbackDelayed(AChoreographer_frameCallback cb,
-                                  AChoreographer_frameCallback64 cb64,
-                                  AChoreographer_vsyncCallback vsyncCallback, void* data,
-                                  nsecs_t delay);
-    void registerRefreshRateCallback(AChoreographer_refreshRateCallback cb, void* data)
-            EXCLUDES(gChoreographers.lock);
-    void unregisterRefreshRateCallback(AChoreographer_refreshRateCallback cb, void* data);
-    // Drains the queue of pending vsync periods and dispatches refresh rate
-    // updates to callbacks.
-    // The assumption is that this method is only called on a single
-    // processing thread, either by looper or by AChoreographer_handleEvents
-    void handleRefreshRateUpdates();
-    void scheduleLatestConfigRequest();
-
-    enum {
-        MSG_SCHEDULE_CALLBACKS = 0,
-        MSG_SCHEDULE_VSYNC = 1,
-        MSG_HANDLE_REFRESH_RATE_UPDATES = 2,
-    };
-    virtual void handleMessage(const Message& message) override;
-
-    static Choreographer* getForThread();
-    virtual ~Choreographer() override EXCLUDES(gChoreographers.lock);
-    int64_t getFrameInterval() const;
-    bool inCallback() const;
-
-private:
-    Choreographer(const Choreographer&) = delete;
-
-    void dispatchVsync(nsecs_t timestamp, PhysicalDisplayId displayId, uint32_t count,
-                       VsyncEventData vsyncEventData) override;
-    void dispatchHotplug(nsecs_t timestamp, PhysicalDisplayId displayId, bool connected) override;
-    void dispatchModeChanged(nsecs_t timestamp, PhysicalDisplayId displayId, int32_t modeId,
-                             nsecs_t vsyncPeriod) override;
-    void dispatchNullEvent(nsecs_t, PhysicalDisplayId) override;
-    void dispatchFrameRateOverrides(nsecs_t timestamp, PhysicalDisplayId displayId,
-                                    std::vector<FrameRateOverride> overrides) override;
-
-    void scheduleCallbacks();
-
-    ChoreographerFrameCallbackDataImpl createFrameCallbackData(nsecs_t timestamp) const;
-    void registerStartTime() const;
-
-    std::mutex mLock;
-    // Protected by mLock
-    std::priority_queue<FrameCallback> mFrameCallbacks;
-    std::vector<RefreshRateCallback> mRefreshRateCallbacks;
-
-    nsecs_t mLatestVsyncPeriod = -1;
-    VsyncEventData mLastVsyncEventData;
-    bool mInCallback = false;
-
-    const sp<Looper> mLooper;
-    const std::thread::id mThreadId;
-
-    // Approximation of num_threads_using_choreographer * num_frames_of_history with leeway.
-    static constexpr size_t kMaxStartTimes = 250;
-};
-
-static thread_local Choreographer* gChoreographer;
-Choreographer* Choreographer::getForThread() {
-    if (gChoreographer == nullptr) {
-        sp<Looper> looper = Looper::getForThread();
-        if (!looper.get()) {
-            ALOGW("No looper prepared for thread");
-            return nullptr;
-        }
-        gChoreographer = new Choreographer(looper);
-        status_t result = gChoreographer->initialize();
-        if (result != OK) {
-            ALOGW("Failed to initialize");
-            return nullptr;
-        }
-    }
-    return gChoreographer;
-}
-
-Choreographer::Choreographer(const sp<Looper>& looper)
-      : DisplayEventDispatcher(looper, ISurfaceComposer::VsyncSource::eVsyncSourceApp),
-        mLooper(looper),
-        mThreadId(std::this_thread::get_id()) {
-    std::lock_guard<std::mutex> _l(gChoreographers.lock);
-    gChoreographers.ptrs.push_back(this);
-}
-
-Choreographer::~Choreographer() {
-    std::lock_guard<std::mutex> _l(gChoreographers.lock);
-    gChoreographers.ptrs.erase(std::remove_if(gChoreographers.ptrs.begin(),
-                                              gChoreographers.ptrs.end(),
-                                              [=](Choreographer* c) { return c == this; }),
-                               gChoreographers.ptrs.end());
-    // Only poke DisplayManagerGlobal to unregister if we previously registered
-    // callbacks.
-    if (gChoreographers.ptrs.empty() && gChoreographers.registeredToDisplayManager) {
-        gChoreographers.registeredToDisplayManager = false;
-        JNIEnv* env = getJniEnv();
-        if (env == nullptr) {
-            ALOGW("JNI environment is unavailable, skipping choreographer cleanup");
-            return;
-        }
-        jobject dmg = env->CallStaticObjectMethod(gJni.displayManagerGlobal.clazz,
-                                                  gJni.displayManagerGlobal.getInstance);
-        if (dmg == nullptr) {
-            ALOGW("DMS is not initialized yet, skipping choreographer cleanup");
-        } else {
-            env->CallVoidMethod(dmg,
-                                gJni.displayManagerGlobal
-                                        .unregisterNativeChoreographerForRefreshRateCallbacks);
-            env->DeleteLocalRef(dmg);
-        }
-    }
-}
-
-void Choreographer::postFrameCallbackDelayed(AChoreographer_frameCallback cb,
-                                             AChoreographer_frameCallback64 cb64,
-                                             AChoreographer_vsyncCallback vsyncCallback, void* data,
-                                             nsecs_t delay) {
-    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
-    FrameCallback callback{cb, cb64, vsyncCallback, data, now + delay};
-    {
-        std::lock_guard<std::mutex> _l{mLock};
-        mFrameCallbacks.push(callback);
-    }
-    if (callback.dueTime <= now) {
-        if (std::this_thread::get_id() != mThreadId) {
-            if (mLooper != nullptr) {
-                Message m{MSG_SCHEDULE_VSYNC};
-                mLooper->sendMessage(this, m);
-            } else {
-                scheduleVsync();
-            }
-        } else {
-            scheduleVsync();
-        }
-    } else {
-        if (mLooper != nullptr) {
-            Message m{MSG_SCHEDULE_CALLBACKS};
-            mLooper->sendMessageDelayed(delay, this, m);
-        } else {
-            scheduleCallbacks();
-        }
-    }
-}
-
-void Choreographer::registerRefreshRateCallback(AChoreographer_refreshRateCallback cb, void* data) {
-    std::lock_guard<std::mutex> _l{mLock};
-    for (const auto& callback : mRefreshRateCallbacks) {
-        // Don't re-add callbacks.
-        if (cb == callback.callback && data == callback.data) {
-            return;
-        }
-    }
-    mRefreshRateCallbacks.emplace_back(
-            RefreshRateCallback{.callback = cb, .data = data, .firstCallbackFired = false});
-    bool needsRegistration = false;
-    {
-        std::lock_guard<std::mutex> _l2(gChoreographers.lock);
-        needsRegistration = !gChoreographers.registeredToDisplayManager;
-    }
-    if (needsRegistration) {
-        JNIEnv* env = getJniEnv();
-        if (env == nullptr) {
-            ALOGW("JNI environment is unavailable, skipping registration");
-            return;
-        }
-        jobject dmg = env->CallStaticObjectMethod(gJni.displayManagerGlobal.clazz,
-                                                  gJni.displayManagerGlobal.getInstance);
-        if (dmg == nullptr) {
-            ALOGW("DMS is not initialized yet: skipping registration");
-            return;
-        } else {
-            env->CallVoidMethod(dmg,
-                                gJni.displayManagerGlobal
-                                        .registerNativeChoreographerForRefreshRateCallbacks,
-                                reinterpret_cast<int64_t>(this));
-            env->DeleteLocalRef(dmg);
-            {
-                std::lock_guard<std::mutex> _l2(gChoreographers.lock);
-                gChoreographers.registeredToDisplayManager = true;
-            }
-        }
-    } else {
-        scheduleLatestConfigRequest();
-    }
-}
-
-void Choreographer::unregisterRefreshRateCallback(AChoreographer_refreshRateCallback cb,
-                                                  void* data) {
-    std::lock_guard<std::mutex> _l{mLock};
-    mRefreshRateCallbacks.erase(std::remove_if(mRefreshRateCallbacks.begin(),
-                                               mRefreshRateCallbacks.end(),
-                                               [&](const RefreshRateCallback& callback) {
-                                                   return cb == callback.callback &&
-                                                           data == callback.data;
-                                               }),
-                                mRefreshRateCallbacks.end());
-}
-
-void Choreographer::scheduleLatestConfigRequest() {
-    if (mLooper != nullptr) {
-        Message m{MSG_HANDLE_REFRESH_RATE_UPDATES};
-        mLooper->sendMessage(this, m);
-    } else {
-        // If the looper thread is detached from Choreographer, then refresh rate
-        // changes will be handled in AChoreographer_handlePendingEvents, so we
-        // need to wake up the looper thread by writing to the write-end of the
-        // socket the looper is listening on.
-        // Fortunately, these events are small so sending packets across the
-        // socket should be atomic across processes.
-        DisplayEventReceiver::Event event;
-        event.header =
-                DisplayEventReceiver::Event::Header{DisplayEventReceiver::DISPLAY_EVENT_NULL,
-                                                    PhysicalDisplayId::fromPort(0), systemTime()};
-        injectEvent(event);
-    }
-}
-
-void Choreographer::scheduleCallbacks() {
-    const nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
-    nsecs_t dueTime;
-    {
-        std::lock_guard<std::mutex> _l{mLock};
-        // If there are no pending callbacks then don't schedule a vsync
-        if (mFrameCallbacks.empty()) {
-            return;
-        }
-        dueTime = mFrameCallbacks.top().dueTime;
-    }
-
-    if (dueTime <= now) {
-        ALOGV("choreographer %p ~ scheduling vsync", this);
-        scheduleVsync();
-        return;
-    }
-}
-
-void Choreographer::handleRefreshRateUpdates() {
-    std::vector<RefreshRateCallback> callbacks{};
-    const nsecs_t pendingPeriod = gChoreographers.mLastKnownVsync.load();
-    const nsecs_t lastPeriod = mLatestVsyncPeriod;
-    if (pendingPeriod > 0) {
-        mLatestVsyncPeriod = pendingPeriod;
-    }
-    {
-        std::lock_guard<std::mutex> _l{mLock};
-        for (auto& cb : mRefreshRateCallbacks) {
-            callbacks.push_back(cb);
-            cb.firstCallbackFired = true;
-        }
-    }
-
-    for (auto& cb : callbacks) {
-        if (!cb.firstCallbackFired || (pendingPeriod > 0 && pendingPeriod != lastPeriod)) {
-            cb.callback(pendingPeriod, cb.data);
-        }
-    }
-}
-
-// TODO(b/74619554): The PhysicalDisplayId is ignored because SF only emits VSYNC events for the
-// internal display and DisplayEventReceiver::requestNextVsync only allows requesting VSYNC for
-// the internal display implicitly.
-void Choreographer::dispatchVsync(nsecs_t timestamp, PhysicalDisplayId, uint32_t,
-                                  VsyncEventData vsyncEventData) {
-    std::vector<FrameCallback> callbacks{};
-    {
-        std::lock_guard<std::mutex> _l{mLock};
-        nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
-        while (!mFrameCallbacks.empty() && mFrameCallbacks.top().dueTime < now) {
-            callbacks.push_back(mFrameCallbacks.top());
-            mFrameCallbacks.pop();
-        }
-    }
-    mLastVsyncEventData = vsyncEventData;
-    for (const auto& cb : callbacks) {
-        if (cb.vsyncCallback != nullptr) {
-            const ChoreographerFrameCallbackDataImpl frameCallbackData =
-                    createFrameCallbackData(timestamp);
-            registerStartTime();
-            mInCallback = true;
-            cb.vsyncCallback(reinterpret_cast<const AChoreographerFrameCallbackData*>(
-                                     &frameCallbackData),
-                             cb.data);
-            mInCallback = false;
-        } else if (cb.callback64 != nullptr) {
-            cb.callback64(timestamp, cb.data);
-        } else if (cb.callback != nullptr) {
-            cb.callback(timestamp, cb.data);
-        }
-    }
-}
-
-void Choreographer::dispatchHotplug(nsecs_t, PhysicalDisplayId displayId, bool connected) {
-    ALOGV("choreographer %p ~ received hotplug event (displayId=%s, connected=%s), ignoring.", this,
-          to_string(displayId).c_str(), toString(connected));
-}
-
-void Choreographer::dispatchModeChanged(nsecs_t, PhysicalDisplayId, int32_t, nsecs_t) {
-    LOG_ALWAYS_FATAL("dispatchModeChanged was called but was never registered");
-}
-
-void Choreographer::dispatchFrameRateOverrides(nsecs_t, PhysicalDisplayId,
-                                               std::vector<FrameRateOverride>) {
-    LOG_ALWAYS_FATAL("dispatchFrameRateOverrides was called but was never registered");
-}
-
-void Choreographer::dispatchNullEvent(nsecs_t, PhysicalDisplayId) {
-    ALOGV("choreographer %p ~ received null event.", this);
-    handleRefreshRateUpdates();
-}
-
-void Choreographer::handleMessage(const Message& message) {
-    switch (message.what) {
-        case MSG_SCHEDULE_CALLBACKS:
-            scheduleCallbacks();
-            break;
-        case MSG_SCHEDULE_VSYNC:
-            scheduleVsync();
-            break;
-        case MSG_HANDLE_REFRESH_RATE_UPDATES:
-            handleRefreshRateUpdates();
-            break;
-    }
-}
-
-int64_t Choreographer::getFrameInterval() const {
-    return mLastVsyncEventData.frameInterval;
-}
-
-bool Choreographer::inCallback() const {
-    return mInCallback;
-}
-
-ChoreographerFrameCallbackDataImpl Choreographer::createFrameCallbackData(nsecs_t timestamp) const {
-    return {.frameTimeNanos = timestamp,
-            .vsyncEventData = mLastVsyncEventData,
-            .choreographer = this};
-}
-
-void Choreographer::registerStartTime() const {
-    std::scoped_lock _l(gChoreographers.lock);
-    for (VsyncEventData::FrameTimeline frameTimeline : mLastVsyncEventData.frameTimelines) {
-        while (gChoreographers.startTimes.size() >= kMaxStartTimes) {
-            gChoreographers.startTimes.erase(gChoreographers.startTimes.begin());
-        }
-        gChoreographers.startTimes[frameTimeline.vsyncId] = systemTime(SYSTEM_TIME_MONOTONIC);
-    }
-}
-
-} // namespace android
 using namespace android;
 
 static inline Choreographer* AChoreographer_to_Choreographer(AChoreographer* choreographer) {
@@ -488,27 +50,12 @@
 
 // Glue for private C api
 namespace android {
-void AChoreographer_signalRefreshRateCallbacks(nsecs_t vsyncPeriod) EXCLUDES(gChoreographers.lock) {
-    std::lock_guard<std::mutex> _l(gChoreographers.lock);
-    gChoreographers.mLastKnownVsync.store(vsyncPeriod);
-    for (auto c : gChoreographers.ptrs) {
-        c->scheduleLatestConfigRequest();
-    }
+void AChoreographer_signalRefreshRateCallbacks(nsecs_t vsyncPeriod) {
+    Choreographer::signalRefreshRateCallbacks(vsyncPeriod);
 }
 
 void AChoreographer_initJVM(JNIEnv* env) {
-    env->GetJavaVM(&gJni.jvm);
-    // Now we need to find the java classes.
-    jclass dmgClass = env->FindClass("android/hardware/display/DisplayManagerGlobal");
-    gJni.displayManagerGlobal.clazz = static_cast<jclass>(env->NewGlobalRef(dmgClass));
-    gJni.displayManagerGlobal.getInstance =
-            env->GetStaticMethodID(dmgClass, "getInstance",
-                                   "()Landroid/hardware/display/DisplayManagerGlobal;");
-    gJni.displayManagerGlobal.registerNativeChoreographerForRefreshRateCallbacks =
-            env->GetMethodID(dmgClass, "registerNativeChoreographerForRefreshRateCallbacks", "()V");
-    gJni.displayManagerGlobal.unregisterNativeChoreographerForRefreshRateCallbacks =
-            env->GetMethodID(dmgClass, "unregisterNativeChoreographerForRefreshRateCallbacks",
-                             "()V");
+    Choreographer::initJVM(env);
 }
 
 AChoreographer* AChoreographer_routeGetInstance() {
@@ -583,13 +130,7 @@
 }
 
 int64_t AChoreographer_getStartTimeNanosForVsyncId(AVsyncId vsyncId) {
-    std::scoped_lock _l(gChoreographers.lock);
-    const auto iter = gChoreographers.startTimes.find(vsyncId);
-    if (iter == gChoreographers.startTimes.end()) {
-        ALOGW("Start time was not found for vsync id: %" PRId64, vsyncId);
-        return 0;
-    }
-    return iter->second;
+    return Choreographer::getStartTimeNanosForVsyncId(vsyncId);
 }
 
 } // namespace android
@@ -656,7 +197,7 @@
             AChoreographerFrameCallbackData_to_ChoreographerFrameCallbackDataImpl(data);
     LOG_ALWAYS_FATAL_IF(!frameCallbackData->choreographer->inCallback(),
                         "Data is only valid in callback");
-    return VsyncEventData::kFrameTimelinesLength;
+    return frameCallbackData->vsyncEventData.frameTimelinesLength;
 }
 size_t AChoreographerFrameCallbackData_getPreferredFrameTimelineIndex(
         const AChoreographerFrameCallbackData* data) {
@@ -672,7 +213,7 @@
             AChoreographerFrameCallbackData_to_ChoreographerFrameCallbackDataImpl(data);
     LOG_ALWAYS_FATAL_IF(!frameCallbackData->choreographer->inCallback(),
                         "Data is only valid in callback");
-    LOG_ALWAYS_FATAL_IF(index >= VsyncEventData::kFrameTimelinesLength, "Index out of bounds");
+    LOG_ALWAYS_FATAL_IF(index >= VsyncEventData::kFrameTimelinesCapacity, "Index out of bounds");
     return frameCallbackData->vsyncEventData.frameTimelines[index].vsyncId;
 }
 int64_t AChoreographerFrameCallbackData_getFrameTimelineExpectedPresentationTimeNanos(
@@ -681,7 +222,7 @@
             AChoreographerFrameCallbackData_to_ChoreographerFrameCallbackDataImpl(data);
     LOG_ALWAYS_FATAL_IF(!frameCallbackData->choreographer->inCallback(),
                         "Data is only valid in callback");
-    LOG_ALWAYS_FATAL_IF(index >= VsyncEventData::kFrameTimelinesLength, "Index out of bounds");
+    LOG_ALWAYS_FATAL_IF(index >= VsyncEventData::kFrameTimelinesCapacity, "Index out of bounds");
     return frameCallbackData->vsyncEventData.frameTimelines[index].expectedPresentationTime;
 }
 int64_t AChoreographerFrameCallbackData_getFrameTimelineDeadlineNanos(
@@ -690,7 +231,7 @@
             AChoreographerFrameCallbackData_to_ChoreographerFrameCallbackDataImpl(data);
     LOG_ALWAYS_FATAL_IF(!frameCallbackData->choreographer->inCallback(),
                         "Data is only valid in callback");
-    LOG_ALWAYS_FATAL_IF(index >= VsyncEventData::kFrameTimelinesLength, "Index out of bounds");
+    LOG_ALWAYS_FATAL_IF(index >= VsyncEventData::kFrameTimelinesCapacity, "Index out of bounds");
     return frameCallbackData->vsyncEventData.frameTimelines[index].deadlineTimestamp;
 }
 
diff --git a/libs/nativedisplay/ADisplay.cpp b/libs/nativedisplay/ADisplay.cpp
index 76b85d6..bf0805b 100644
--- a/libs/nativedisplay/ADisplay.cpp
+++ b/libs/nativedisplay/ADisplay.cpp
@@ -117,15 +117,6 @@
 #define CHECK_NOT_NULL(name) \
     LOG_ALWAYS_FATAL_IF(name == nullptr, "nullptr passed as " #name " argument");
 
-namespace {
-
-sp<IBinder> getToken(ADisplay* display) {
-    DisplayImpl* impl = reinterpret_cast<DisplayImpl*>(display);
-    return SurfaceComposerClient::getPhysicalDisplayToken(impl->id);
-}
-
-} // namespace
-
 namespace android {
 
 int ADisplay_acquirePhysicalDisplays(ADisplay*** outDisplays) {
@@ -136,19 +127,20 @@
     }
 
     std::vector<DisplayConfigImpl> modesPerDisplay[size];
+    ui::DisplayConnectionType displayConnectionTypes[size];
     int numModes = 0;
     for (int i = 0; i < size; ++i) {
-        const sp<IBinder> token = SurfaceComposerClient::getPhysicalDisplayToken(ids[i]);
-
         ui::StaticDisplayInfo staticInfo;
-        if (const status_t status = SurfaceComposerClient::getStaticDisplayInfo(token, &staticInfo);
+        if (const status_t status =
+                    SurfaceComposerClient::getStaticDisplayInfo(ids[i].value, &staticInfo);
             status != OK) {
             return status;
         }
+        displayConnectionTypes[i] = staticInfo.connectionType;
 
         ui::DynamicDisplayInfo dynamicInfo;
         if (const status_t status =
-                    SurfaceComposerClient::getDynamicDisplayInfo(token, &dynamicInfo);
+                    SurfaceComposerClient::getDynamicDisplayInfoFromId(ids[i].value, &dynamicInfo);
             status != OK) {
             return status;
         }
@@ -168,8 +160,6 @@
         }
     }
 
-    const std::optional<PhysicalDisplayId> internalId =
-            SurfaceComposerClient::getInternalDisplayId();
     ui::Dataspace defaultDataspace;
     ui::PixelFormat defaultPixelFormat;
     ui::Dataspace wcgDataspace;
@@ -201,8 +191,9 @@
 
     for (size_t i = 0; i < size; ++i) {
         const PhysicalDisplayId id = ids[i];
-        const ADisplayType type = (internalId == id) ? ADisplayType::DISPLAY_TYPE_INTERNAL
-                                                     : ADisplayType::DISPLAY_TYPE_EXTERNAL;
+        const ADisplayType type = (displayConnectionTypes[i] == ui::DisplayConnectionType::Internal)
+                ? ADisplayType::DISPLAY_TYPE_INTERNAL
+                : ADisplayType::DISPLAY_TYPE_EXTERNAL;
         const std::vector<DisplayConfigImpl>& configs = modesPerDisplay[i];
         memcpy(configData, configs.data(), sizeof(DisplayConfigImpl) * configs.size());
 
@@ -259,14 +250,15 @@
 int ADisplay_getCurrentConfig(ADisplay* display, ADisplayConfig** outConfig) {
     CHECK_NOT_NULL(display);
 
-    sp<IBinder> token = getToken(display);
     ui::DynamicDisplayInfo info;
-    if (const auto status = SurfaceComposerClient::getDynamicDisplayInfo(token, &info);
+    DisplayImpl* impl = reinterpret_cast<DisplayImpl*>(display);
+
+    if (const auto status =
+                SurfaceComposerClient::getDynamicDisplayInfoFromId(impl->id.value, &info);
         status != OK) {
         return status;
     }
 
-    DisplayImpl* impl = reinterpret_cast<DisplayImpl*>(display);
     for (size_t i = 0; i < impl->numConfigs; i++) {
         auto* config = impl->configs + i;
         if (config->id == info.activeDisplayModeId) {
diff --git a/libs/nativedisplay/Android.bp b/libs/nativedisplay/Android.bp
index 4659b96..8d8a2bc 100644
--- a/libs/nativedisplay/Android.bp
+++ b/libs/nativedisplay/Android.bp
@@ -53,6 +53,7 @@
     version_script: "libnativedisplay.map.txt",
 
     srcs: [
+        ":libgui_frame_event_aidl",
         "AChoreographer.cpp",
         "ADisplay.cpp",
         "surfacetexture/surface_texture.cpp",
diff --git a/libs/nativedisplay/libnativedisplay.map.txt b/libs/nativedisplay/libnativedisplay.map.txt
index 969d937..9172d5e 100644
--- a/libs/nativedisplay/libnativedisplay.map.txt
+++ b/libs/nativedisplay/libnativedisplay.map.txt
@@ -1,25 +1,25 @@
 LIBNATIVEDISPLAY {
   global:
-    AChoreographer_getInstance; # apex # introduced=30
-    AChoreographer_postFrameCallback; # apex # introduced=30
-    AChoreographer_postFrameCallbackDelayed; # apex # introduced=30
-    AChoreographer_postFrameCallback64; # apex # introduced=30
-    AChoreographer_postFrameCallbackDelayed64; # apex # introduced=30
-    AChoreographer_registerRefreshRateCallback; # apex # introduced=30
-    AChoreographer_unregisterRefreshRateCallback; # apex # introduced=30
-    AChoreographer_postVsyncCallback; # apex # introduced=33
-    AChoreographerFrameCallbackData_getFrameTimeNanos; # apex # introduced=33
-    AChoreographerFrameCallbackData_getFrameTimelinesLength; # apex # introduced=33
-    AChoreographerFrameCallbackData_getPreferredFrameTimelineIndex; # apex # introduced=33
-    AChoreographerFrameCallbackData_getFrameTimelineVsyncId; # apex # introduced=33
-    AChoreographerFrameCallbackData_getFrameTimelineExpectedPresentationTimeNanos; # apex # introduced=33
-    AChoreographerFrameCallbackData_getFrameTimelineDeadlineNanos; # apex # introduced=33
-    AChoreographer_create; # apex # introduced=30
-    AChoreographer_destroy; # apex # introduced=30
-    AChoreographer_getFd; # apex # introduced=30
-    AChoreographer_handlePendingEvents; # apex # introduced=30
-    ASurfaceTexture_fromSurfaceTexture; # apex # introduced=30
-    ASurfaceTexture_release; # apex # introduced=30
+    AChoreographer_getInstance; # systemapi # introduced=30
+    AChoreographer_postFrameCallback; # systemapi # introduced=30
+    AChoreographer_postFrameCallbackDelayed; # systemapi # introduced=30
+    AChoreographer_postFrameCallback64; # systemapi # introduced=30
+    AChoreographer_postFrameCallbackDelayed64; # systemapi # introduced=30
+    AChoreographer_registerRefreshRateCallback; # systemapi # introduced=30
+    AChoreographer_unregisterRefreshRateCallback; # systemapi # introduced=30
+    AChoreographer_postVsyncCallback; # systemapi # introduced=33
+    AChoreographerFrameCallbackData_getFrameTimeNanos; # systemapi # introduced=33
+    AChoreographerFrameCallbackData_getFrameTimelinesLength; # systemapi # introduced=33
+    AChoreographerFrameCallbackData_getPreferredFrameTimelineIndex; # systemapi # introduced=33
+    AChoreographerFrameCallbackData_getFrameTimelineVsyncId; # systemapi # introduced=33
+    AChoreographerFrameCallbackData_getFrameTimelineExpectedPresentationTimeNanos; # systemapi # introduced=33
+    AChoreographerFrameCallbackData_getFrameTimelineDeadlineNanos; # systemapi # introduced=33
+    AChoreographer_create; # systemapi # introduced=30
+    AChoreographer_destroy; # systemapi # introduced=30
+    AChoreographer_getFd; # systemapi # introduced=30
+    AChoreographer_handlePendingEvents; # systemapi # introduced=30
+    ASurfaceTexture_fromSurfaceTexture; # systemapi # introduced=30
+    ASurfaceTexture_release; # systemapi # introduced=30
   local:
     *;
 };
diff --git a/libs/nativewindow/AHardwareBuffer.cpp b/libs/nativewindow/AHardwareBuffer.cpp
index 2e0add5..8060705 100644
--- a/libs/nativewindow/AHardwareBuffer.cpp
+++ b/libs/nativewindow/AHardwareBuffer.cpp
@@ -360,12 +360,12 @@
         return INVALID_OPERATION;
     }
 
-    GraphicBuffer* gBuffer = new GraphicBuffer();
+    sp<GraphicBuffer> gBuffer(new GraphicBuffer());
     status_t err = gBuffer->unflatten(data, dataLen, fdData, fdCount);
     if (err != NO_ERROR) {
         return err;
     }
-    *outBuffer = AHardwareBuffer_from_GraphicBuffer(gBuffer);
+    *outBuffer = AHardwareBuffer_from_GraphicBuffer(gBuffer.get());
     // Ensure the buffer has a positive ref-count.
     AHardwareBuffer_acquire(*outBuffer);
 
@@ -715,6 +715,14 @@
     return ahardwarebuffer_format;
 }
 
+int32_t AHardwareBuffer_getDataSpace(AHardwareBuffer* buffer) {
+    GraphicBuffer* gb = AHardwareBuffer_to_GraphicBuffer(buffer);
+    auto& mapper = GraphicBufferMapper::get();
+    ui::Dataspace dataspace = ui::Dataspace::UNKNOWN;
+    mapper.getDataspace(gb->handle, &dataspace);
+    return static_cast<int32_t>(dataspace);
+}
+
 uint64_t AHardwareBuffer_convertToGrallocUsageBits(uint64_t usage) {
     using android::hardware::graphics::common::V1_1::BufferUsage;
     static_assert(AHARDWAREBUFFER_USAGE_CPU_READ_NEVER == (uint64_t)BufferUsage::CPU_READ_NEVER,
diff --git a/libs/nativewindow/ANativeWindow.cpp b/libs/nativewindow/ANativeWindow.cpp
index 73a05fc..dd5958d 100644
--- a/libs/nativewindow/ANativeWindow.cpp
+++ b/libs/nativewindow/ANativeWindow.cpp
@@ -79,27 +79,6 @@
     return res < 0 ? res : value;
 }
 
-static bool isDataSpaceValid(ANativeWindow* window, int32_t dataSpace) {
-    bool supported = false;
-    switch (dataSpace) {
-        case HAL_DATASPACE_UNKNOWN:
-        case HAL_DATASPACE_V0_SRGB:
-            return true;
-        // These data space need wide gamut support.
-        case HAL_DATASPACE_V0_SCRGB_LINEAR:
-        case HAL_DATASPACE_V0_SCRGB:
-        case HAL_DATASPACE_DISPLAY_P3:
-            native_window_get_wide_color_support(window, &supported);
-            return supported;
-        // These data space need HDR support.
-        case HAL_DATASPACE_BT2020_PQ:
-            native_window_get_hdr_support(window, &supported);
-            return supported;
-        default:
-            return false;
-    }
-}
-
 /**************************************************************************************************
  * NDK
  **************************************************************************************************/
@@ -216,11 +195,10 @@
         static_cast<int>(HAL_DATASPACE_BT2020_HLG));
     static_assert(static_cast<int>(ADATASPACE_BT2020_ITU_HLG) ==
         static_cast<int>(HAL_DATASPACE_BT2020_ITU_HLG));
-    static_assert(static_cast<int>(DEPTH) == static_cast<int>(HAL_DATASPACE_DEPTH));
-    static_assert(static_cast<int>(DYNAMIC_DEPTH) == static_cast<int>(HAL_DATASPACE_DYNAMIC_DEPTH));
+    static_assert(static_cast<int>(ADATASPACE_DEPTH) == static_cast<int>(HAL_DATASPACE_DEPTH));
+    static_assert(static_cast<int>(ADATASPACE_DYNAMIC_DEPTH) == static_cast<int>(HAL_DATASPACE_DYNAMIC_DEPTH));
 
-    if (!window || !query(window, NATIVE_WINDOW_IS_VALID) ||
-            !isDataSpaceValid(window, dataSpace)) {
+    if (!window || !query(window, NATIVE_WINDOW_IS_VALID)) {
         return -EINVAL;
     }
     return native_window_set_buffers_data_space(window,
@@ -233,6 +211,13 @@
     return query(window, NATIVE_WINDOW_DATASPACE);
 }
 
+int32_t ANativeWindow_getBuffersDefaultDataSpace(ANativeWindow* window) {
+    if (!window || !query(window, NATIVE_WINDOW_IS_VALID)) {
+        return -EINVAL;
+    }
+    return query(window, NATIVE_WINDOW_DEFAULT_DATASPACE);
+}
+
 int32_t ANativeWindow_setFrameRate(ANativeWindow* window, float frameRate, int8_t compatibility) {
     return ANativeWindow_setFrameRateWithChangeStrategy(window, frameRate, compatibility,
         ANATIVEWINDOW_CHANGE_FRAME_RATE_ONLY_IF_SEAMLESS);
diff --git a/libs/nativewindow/include-private/private/android/AHardwareBufferHelpers.h b/libs/nativewindow/include-private/private/android/AHardwareBufferHelpers.h
index ddfd1d1..6d3d295 100644
--- a/libs/nativewindow/include-private/private/android/AHardwareBufferHelpers.h
+++ b/libs/nativewindow/include-private/private/android/AHardwareBufferHelpers.h
@@ -52,6 +52,11 @@
 // convert HAL format to AHardwareBuffer format (note: this is a no-op)
 uint32_t AHardwareBuffer_convertToPixelFormat(uint32_t format);
 
+// retrieves a dataspace from the AHardwareBuffer metadata, if the device
+// support gralloc metadata. Returns UNKNOWN if gralloc metadata is not
+// supported.
+int32_t AHardwareBuffer_getDataSpace(AHardwareBuffer* buffer);
+
 // convert AHardwareBuffer usage bits to HAL usage bits (note: this is a no-op)
 uint64_t AHardwareBuffer_convertFromGrallocUsageBits(uint64_t usage);
 
diff --git a/libs/nativewindow/include/android/data_space.h b/libs/nativewindow/include/android/data_space.h
index 771844f..ad4cc4a 100644
--- a/libs/nativewindow/include/android/data_space.h
+++ b/libs/nativewindow/include/android/data_space.h
@@ -548,14 +548,14 @@
      *
      * This value is valid with formats HAL_PIXEL_FORMAT_Y16 and HAL_PIXEL_FORMAT_BLOB.
      */
-    DEPTH = 4096,
+    ADATASPACE_DEPTH = 4096,
 
     /**
      * ISO 16684-1:2011(E) Dynamic Depth:
      *
      * Embedded depth metadata following the dynamic depth specification.
      */
-    DYNAMIC_DEPTH = 4098
+    ADATASPACE_DYNAMIC_DEPTH = 4098
 };
 
 __END_DECLS
diff --git a/libs/nativewindow/include/android/native_window.h b/libs/nativewindow/include/android/native_window.h
index a593cd4..be6623e 100644
--- a/libs/nativewindow/include/android/native_window.h
+++ b/libs/nativewindow/include/android/native_window.h
@@ -227,6 +227,16 @@
  */
 int32_t ANativeWindow_getBuffersDataSpace(ANativeWindow* window) __INTRODUCED_IN(28);
 
+/**
+ * Get the default dataspace of the buffers in window as set by the consumer.
+ *
+ * Available since API level 34.
+ *
+ * \return the dataspace of buffers in window, ADATASPACE_UNKNOWN is returned if
+ * dataspace is unknown, or -EINVAL if window is invalid.
+ */
+int32_t ANativeWindow_getBuffersDefaultDataSpace(ANativeWindow* window) __INTRODUCED_IN(34);
+
 /** Compatibility value for ANativeWindow_setFrameRate. */
 enum ANativeWindow_FrameRateCompatibility {
     /**
@@ -303,6 +313,8 @@
  * You can register for changes in the refresh rate using
  * \a AChoreographer_registerRefreshRateCallback.
  *
+ * See ANativeWindow_clearFrameRate().
+ *
  * Available since API level 31.
  *
  * \param window pointer to an ANativeWindow object.
@@ -332,6 +344,41 @@
         int8_t compatibility, int8_t changeFrameRateStrategy)
         __INTRODUCED_IN(31);
 
+/**
+ * Clears the frame rate which is set for this window.
+ *
+ * This is equivalent to calling
+ * ANativeWindow_setFrameRateWithChangeStrategy(window, 0, compatibility, changeFrameRateStrategy).
+ *
+ * Usage of this API won't introduce frame rate throttling,
+ * or affect other aspects of the application's frame production
+ * pipeline. However, because the system may change the display refresh rate,
+ * calls to this function may result in changes to Choreographer callback
+ * timings, and changes to the time interval at which the system releases
+ * buffers back to the application.
+ *
+ * Note that this only has an effect for windows presented on the display. If
+ * this ANativeWindow is consumed by something other than the system compositor,
+ * e.g. a media codec, this call has no effect.
+ *
+ * You can register for changes in the refresh rate using
+ * \a AChoreographer_registerRefreshRateCallback.
+ *
+ * See ANativeWindow_setFrameRateWithChangeStrategy().
+ *
+ * Available since API level 34.
+ *
+ * \param window pointer to an ANativeWindow object.
+ *
+ * \return 0 for success, -EINVAL if the window value is invalid.
+ */
+inline int32_t ANativeWindow_clearFrameRate(ANativeWindow* window)
+        __INTRODUCED_IN(__ANDROID_API_U__) {
+    return ANativeWindow_setFrameRateWithChangeStrategy(window, 0,
+            ANATIVEWINDOW_FRAME_RATE_COMPATIBILITY_DEFAULT,
+            ANATIVEWINDOW_CHANGE_FRAME_RATE_ONLY_IF_SEAMLESS);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/libs/nativewindow/include/system/window.h b/libs/nativewindow/include/system/window.h
index 86e76c4..0fee3c1 100644
--- a/libs/nativewindow/include/system/window.h
+++ b/libs/nativewindow/include/system/window.h
@@ -235,8 +235,8 @@
     NATIVE_WINDOW_ENABLE_FRAME_TIMESTAMPS         = 25,
     NATIVE_WINDOW_GET_COMPOSITOR_TIMING           = 26,
     NATIVE_WINDOW_GET_FRAME_TIMESTAMPS            = 27,
-    NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT          = 28,
-    NATIVE_WINDOW_GET_HDR_SUPPORT                 = 29,
+    /* 28, removed: NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT */
+    /* 29, removed: NATIVE_WINDOW_GET_HDR_SUPPORT */
     NATIVE_WINDOW_SET_USAGE64                     = ANATIVEWINDOW_PERFORM_SET_USAGE64,
     NATIVE_WINDOW_GET_CONSUMER_USAGE64            = 31,
     NATIVE_WINDOW_SET_BUFFERS_SMPTE2086_METADATA  = 32,
@@ -988,15 +988,34 @@
             outDequeueReadyTime, outReleaseTime);
 }
 
-static inline int native_window_get_wide_color_support(
-    struct ANativeWindow* window, bool* outSupport) {
-    return window->perform(window, NATIVE_WINDOW_GET_WIDE_COLOR_SUPPORT,
-            outSupport);
+/* deprecated. Always returns 0 and outSupport holds true. Don't call. */
+static inline int native_window_get_wide_color_support (
+    struct ANativeWindow* window __UNUSED, bool* outSupport) __deprecated;
+
+/*
+   Deprecated(b/242763577): to be removed, this method should not be used
+   Surface support should not be tied to the display
+   Return true since most displays should have this support
+*/
+static inline int native_window_get_wide_color_support (
+    struct ANativeWindow* window __UNUSED, bool* outSupport) {
+    *outSupport = true;
+    return 0;
 }
 
-static inline int native_window_get_hdr_support(struct ANativeWindow* window,
+/* deprecated. Always returns 0 and outSupport holds true. Don't call. */
+static inline int native_window_get_hdr_support(struct ANativeWindow* window __UNUSED,
+                                                bool* outSupport) __deprecated;
+
+/*
+   Deprecated(b/242763577): to be removed, this method should not be used
+   Surface support should not be tied to the display
+   Return true since most displays should have this support
+*/
+static inline int native_window_get_hdr_support(struct ANativeWindow* window __UNUSED,
                                                 bool* outSupport) {
-    return window->perform(window, NATIVE_WINDOW_GET_HDR_SUPPORT, outSupport);
+    *outSupport = true;
+    return 0;
 }
 
 static inline int native_window_get_consumer_usage(struct ANativeWindow* window,
@@ -1034,6 +1053,11 @@
      * This surface is ignored while choosing the refresh rate.
      */
     ANATIVEWINDOW_FRAME_RATE_NO_VOTE,
+
+    /**
+     * This surface will vote for the minimum refresh rate.
+     */
+    ANATIVEWINDOW_FRAME_RATE_MIN
 };
 
 static inline int native_window_set_frame_rate(struct ANativeWindow* window, float frameRate,
@@ -1042,13 +1066,12 @@
                            (int)compatibility, (int)changeFrameRateStrategy);
 }
 
-static inline int native_window_set_frame_timeline_info(struct ANativeWindow* window,
-                                                        uint64_t frameNumber,
-                                                        int64_t frameTimelineVsyncId,
-                                                        int32_t inputEventId,
-                                                        int64_t startTimeNanos) {
+static inline int native_window_set_frame_timeline_info(
+        struct ANativeWindow* window, uint64_t frameNumber, int64_t frameTimelineVsyncId,
+        int32_t inputEventId, int64_t startTimeNanos, int32_t useForRefreshRateSelection) {
     return window->perform(window, NATIVE_WINDOW_SET_FRAME_TIMELINE_INFO, frameNumber,
-                           frameTimelineVsyncId, inputEventId, startTimeNanos);
+                           frameTimelineVsyncId, inputEventId, startTimeNanos,
+                           useForRefreshRateSelection);
 }
 
 // ------------------------------------------------------------------------------------------------
diff --git a/libs/nativewindow/libnativewindow.map.txt b/libs/nativewindow/libnativewindow.map.txt
index d9ac568..c2fd6ef 100644
--- a/libs/nativewindow/libnativewindow.map.txt
+++ b/libs/nativewindow/libnativewindow.map.txt
@@ -23,6 +23,7 @@
     ANativeWindow_cancelBuffer; # llndk
     ANativeWindow_dequeueBuffer; # llndk
     ANativeWindow_getBuffersDataSpace; # introduced=28
+    ANativeWindow_getBuffersDefaultDataSpace; # introduced=34
     ANativeWindow_getFormat;
     ANativeWindow_getHeight;
     ANativeWindow_getLastDequeueDuration; # systemapi # introduced=30
@@ -69,6 +70,7 @@
       android::AHardwareBuffer_convertToPixelFormat*;
       android::AHardwareBuffer_convertFromGrallocUsageBits*;
       android::AHardwareBuffer_convertToGrallocUsageBits*;
+      android::AHardwareBuffer_getDataSpace*;
       android::AHardwareBuffer_to_GraphicBuffer*;
       android::AHardwareBuffer_to_ANativeWindowBuffer*;
       android::AHardwareBuffer_from_GraphicBuffer*;
diff --git a/libs/permission/AppOpsManager.cpp b/libs/permission/AppOpsManager.cpp
index baa9d75..6959274 100644
--- a/libs/permission/AppOpsManager.cpp
+++ b/libs/permission/AppOpsManager.cpp
@@ -146,6 +146,14 @@
     }
 }
 
+void AppOpsManager::startWatchingMode(int32_t op, const String16& packageName, int32_t flags,
+        const sp<IAppOpsCallback>& callback) {
+    sp<IAppOpsService> service = getService();
+    if (service != nullptr) {
+        service->startWatchingModeWithFlags(op, packageName, flags, callback);
+    }
+}
+
 void AppOpsManager::stopWatchingMode(const sp<IAppOpsCallback>& callback) {
     sp<IAppOpsService> service = getService();
     if (service != nullptr) {
diff --git a/libs/permission/IAppOpsService.cpp b/libs/permission/IAppOpsService.cpp
index d59f445..7f235a4 100644
--- a/libs/permission/IAppOpsService.cpp
+++ b/libs/permission/IAppOpsService.cpp
@@ -166,6 +166,17 @@
         }
         return reply.readBool();
     }
+
+    virtual void startWatchingModeWithFlags(int32_t op, const String16& packageName,
+        int32_t flags, const sp<IAppOpsCallback>& callback) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAppOpsService::getInterfaceDescriptor());
+        data.writeInt32(op);
+        data.writeString16(packageName);
+        data.writeInt32(flags);
+        data.writeStrongBinder(IInterface::asBinder(callback));
+        remote()->transact(START_WATCHING_MODE_WITH_FLAGS_TRANSACTION, data, &reply);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AppOpsService, "com.android.internal.app.IAppOpsService")
diff --git a/libs/permission/include/binder/AppOpsManager.h b/libs/permission/include/binder/AppOpsManager.h
index abcd527..243532b 100644
--- a/libs/permission/include/binder/AppOpsManager.h
+++ b/libs/permission/include/binder/AppOpsManager.h
@@ -151,6 +151,10 @@
         _NUM_OP = 117
     };
 
+    enum {
+        WATCH_FOREGROUND_CHANGES = 1 << 0
+    };
+
     AppOpsManager();
 
     int32_t checkOp(int32_t op, int32_t uid, const String16& callingPackage);
@@ -174,6 +178,8 @@
             const std::optional<String16>& attributionTag);
     void startWatchingMode(int32_t op, const String16& packageName,
             const sp<IAppOpsCallback>& callback);
+    void startWatchingMode(int32_t op, const String16& packageName, int32_t flags,
+            const sp<IAppOpsCallback>& callback);
     void stopWatchingMode(const sp<IAppOpsCallback>& callback);
     int32_t permissionToOpCode(const String16& permission);
     void setCameraAudioRestriction(int32_t mode);
diff --git a/libs/permission/include/binder/IAppOpsService.h b/libs/permission/include/binder/IAppOpsService.h
index 22f056b..918fcdb 100644
--- a/libs/permission/include/binder/IAppOpsService.h
+++ b/libs/permission/include/binder/IAppOpsService.h
@@ -52,6 +52,8 @@
             const String16& packageName) = 0;
     virtual void setCameraAudioRestriction(int32_t mode) = 0;
     virtual bool shouldCollectNotes(int32_t opCode) = 0;
+    virtual void startWatchingModeWithFlags(int32_t op, const String16& packageName,
+            int32_t flags, const sp<IAppOpsCallback>& callback) = 0;
 
     enum {
         CHECK_OPERATION_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
@@ -64,6 +66,7 @@
         CHECK_AUDIO_OPERATION_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION+7,
         SHOULD_COLLECT_NOTES_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION+8,
         SET_CAMERA_AUDIO_RESTRICTION_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION+9,
+        START_WATCHING_MODE_WITH_FLAGS_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION+10,
     };
 
     enum {
diff --git a/libs/renderengine/Android.bp b/libs/renderengine/Android.bp
index f6f57dd..8d19c45 100644
--- a/libs/renderengine/Android.bp
+++ b/libs/renderengine/Android.bp
@@ -21,13 +21,15 @@
 
 cc_defaults {
     name: "librenderengine_defaults",
-    defaults: ["renderengine_defaults"],
+    defaults: [
+        "android.hardware.graphics.composer3-ndk_shared",
+        "renderengine_defaults",
+    ],
     cflags: [
         "-DGL_GLEXT_PROTOTYPES",
         "-DEGL_EGLEXT_PROTOTYPES",
     ],
     shared_libs: [
-        "android.hardware.graphics.composer3-V1-ndk",
         "libbase",
         "libcutils",
         "libEGL",
@@ -40,6 +42,7 @@
         "libsync",
         "libui",
         "libutils",
+        "libvulkan",
     ],
 
     static_libs: [
@@ -95,6 +98,7 @@
         "skia/ColorSpaces.cpp",
         "skia/SkiaRenderEngine.cpp",
         "skia/SkiaGLRenderEngine.cpp",
+        "skia/SkiaVkRenderEngine.cpp",
         "skia/debug/CaptureTimer.cpp",
         "skia/debug/CommonPool.cpp",
         "skia/debug/SkiaCapture.cpp",
@@ -107,9 +111,23 @@
     ],
 }
 
+// Used to consolidate and simplify pulling Skia & Skia deps into targets that depend on
+// librenderengine. This allows shared deps to be deduplicated (e.g. Perfetto), which doesn't seem
+// possible if libskia_renderengine is just pulled into librenderengine via whole_static_libs.
+cc_defaults {
+    name: "librenderengine_deps",
+    defaults: ["skia_renderengine_deps"],
+    static_libs: ["libskia_renderengine"],
+}
+
+// Note: if compilation fails when adding librenderengine as a dependency, try adding
+// librenderengine_deps to the defaults field of your dependent target.
 cc_library_static {
     name: "librenderengine",
-    defaults: ["librenderengine_defaults"],
+    defaults: [
+        "librenderengine_defaults",
+        "librenderengine_deps",
+    ],
     double_loadable: true,
     cflags: [
         "-fvisibility=hidden",
@@ -128,7 +146,6 @@
     include_dirs: [
         "external/skia/src/gpu",
     ],
-    whole_static_libs: ["libskia_renderengine"],
     lto: {
         thin: true,
     },
diff --git a/libs/renderengine/ExternalTexture.cpp b/libs/renderengine/ExternalTexture.cpp
index 84771c0..9eb42cd 100644
--- a/libs/renderengine/ExternalTexture.cpp
+++ b/libs/renderengine/ExternalTexture.cpp
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
+#include <log/log.h>
 #include <renderengine/RenderEngine.h>
 #include <renderengine/impl/ExternalTexture.h>
 #include <ui/GraphicBuffer.h>
-
-#include "log/log_main.h"
+#include <utils/Trace.h>
 
 namespace android::renderengine::impl {
 
 ExternalTexture::ExternalTexture(const sp<GraphicBuffer>& buffer,
                                  renderengine::RenderEngine& renderEngine, uint32_t usage)
-      : mBuffer(buffer), mRenderEngine(renderEngine) {
+      : mBuffer(buffer), mRenderEngine(renderEngine), mWritable(usage & WRITEABLE) {
     LOG_ALWAYS_FATAL_IF(buffer == nullptr,
                         "Attempted to bind a null buffer to an external texture!");
     // GLESRenderEngine has a separate texture cache for output buffers,
@@ -35,11 +35,20 @@
                  renderengine::RenderEngine::RenderEngineType::THREADED)) {
         return;
     }
-    mRenderEngine.mapExternalTextureBuffer(mBuffer, usage & WRITEABLE);
+    mRenderEngine.mapExternalTextureBuffer(mBuffer, mWritable);
 }
 
 ExternalTexture::~ExternalTexture() {
-    mRenderEngine.unmapExternalTextureBuffer(mBuffer);
+    mRenderEngine.unmapExternalTextureBuffer(std::move(mBuffer));
+}
+
+void ExternalTexture::remapBuffer() {
+    ATRACE_CALL();
+    {
+        auto buf = mBuffer;
+        mRenderEngine.unmapExternalTextureBuffer(std::move(buf));
+    }
+    mRenderEngine.mapExternalTextureBuffer(mBuffer, mWritable);
 }
 
 } // namespace android::renderengine::impl
diff --git a/libs/renderengine/RenderEngine.cpp b/libs/renderengine/RenderEngine.cpp
index c7ad058..d08c221 100644
--- a/libs/renderengine/RenderEngine.cpp
+++ b/libs/renderengine/RenderEngine.cpp
@@ -19,9 +19,11 @@
 #include <cutils/properties.h>
 #include <log/log.h>
 #include "gl/GLESRenderEngine.h"
+#include "renderengine/ExternalTexture.h"
 #include "threaded/RenderEngineThreaded.h"
 
 #include "skia/SkiaGLRenderEngine.h"
+#include "skia/SkiaVkRenderEngine.h"
 
 namespace android {
 namespace renderengine {
@@ -36,6 +38,9 @@
         case RenderEngineType::SKIA_GL:
             ALOGD("RenderEngine with SkiaGL Backend");
             return renderengine::skia::SkiaGLRenderEngine::create(args);
+        case RenderEngineType::SKIA_VK:
+            ALOGD("RenderEngine with SkiaVK Backend");
+            return renderengine::skia::SkiaVkRenderEngine::create(args);
         case RenderEngineType::SKIA_GL_THREADED: {
             ALOGD("Threaded RenderEngine with SkiaGL Backend");
             return renderengine::threaded::RenderEngineThreaded::create(
@@ -44,6 +49,13 @@
                     },
                     args.renderEngineType);
         }
+        case RenderEngineType::SKIA_VK_THREADED:
+            ALOGD("Threaded RenderEngine with SkiaVK Backend");
+            return renderengine::threaded::RenderEngineThreaded::create(
+                    [args]() {
+                        return android::renderengine::skia::SkiaVkRenderEngine::create(args);
+                    },
+                    args.renderEngineType);
         case RenderEngineType::GLES:
         default:
             ALOGD("RenderEngine with GLES Backend");
@@ -63,16 +75,29 @@
                         "output buffer not gpu writeable");
 }
 
-std::future<RenderEngineResult> RenderEngine::drawLayers(
-        const DisplaySettings& display, const std::vector<LayerSettings>& layers,
-        const std::shared_ptr<ExternalTexture>& buffer, const bool useFramebufferCache,
-        base::unique_fd&& bufferFence) {
-    const auto resultPromise = std::make_shared<std::promise<RenderEngineResult>>();
-    std::future<RenderEngineResult> resultFuture = resultPromise->get_future();
+ftl::Future<FenceResult> RenderEngine::drawLayers(const DisplaySettings& display,
+                                                  const std::vector<LayerSettings>& layers,
+                                                  const std::shared_ptr<ExternalTexture>& buffer,
+                                                  const bool useFramebufferCache,
+                                                  base::unique_fd&& bufferFence) {
+    const auto resultPromise = std::make_shared<std::promise<FenceResult>>();
+    std::future<FenceResult> resultFuture = resultPromise->get_future();
+    updateProtectedContext(layers, buffer);
     drawLayersInternal(std::move(resultPromise), display, layers, buffer, useFramebufferCache,
                        std::move(bufferFence));
     return resultFuture;
 }
 
+void RenderEngine::updateProtectedContext(const std::vector<LayerSettings>& layers,
+                                          const std::shared_ptr<ExternalTexture>& buffer) {
+    const bool needsProtectedContext =
+            (buffer && (buffer->getUsage() & GRALLOC_USAGE_PROTECTED)) ||
+            std::any_of(layers.begin(), layers.end(), [](const LayerSettings& layer) {
+                const std::shared_ptr<ExternalTexture>& buffer = layer.source.buffer.buffer;
+                return buffer && (buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
+            });
+    useProtectedContext(needsProtectedContext);
+}
+
 } // namespace renderengine
 } // namespace android
diff --git a/libs/renderengine/benchmark/Android.bp b/libs/renderengine/benchmark/Android.bp
index 249fec5..55c34cd 100644
--- a/libs/renderengine/benchmark/Android.bp
+++ b/libs/renderengine/benchmark/Android.bp
@@ -24,7 +24,8 @@
 cc_benchmark {
     name: "librenderengine_bench",
     defaults: [
-        "skia_deps",
+        "android.hardware.graphics.composer3-ndk_shared",
+        "librenderengine_deps",
         "surfaceflinger_defaults",
     ],
     srcs: [
@@ -43,7 +44,6 @@
     ],
 
     shared_libs: [
-        "android.hardware.graphics.composer3-V1-ndk",
         "libbase",
         "libcutils",
         "libjnigraphics",
diff --git a/libs/renderengine/benchmark/RenderEngineBench.cpp b/libs/renderengine/benchmark/RenderEngineBench.cpp
index ead97cf..bd7b617 100644
--- a/libs/renderengine/benchmark/RenderEngineBench.cpp
+++ b/libs/renderengine/benchmark/RenderEngineBench.cpp
@@ -39,6 +39,10 @@
             return "skiaglthreaded";
         case RenderEngine::RenderEngineType::SKIA_GL:
             return "skiagl";
+        case RenderEngine::RenderEngineType::SKIA_VK:
+            return "skiavk";
+        case RenderEngine::RenderEngineType::SKIA_VK_THREADED:
+            return "skiavkthreaded";
         case RenderEngine::RenderEngineType::GLES:
         case RenderEngine::RenderEngineType::THREADED:
             LOG_ALWAYS_FATAL("GLESRenderEngine is deprecated - why time it?");
@@ -80,16 +84,26 @@
     std::once_flag once;
     std::call_once(once, []() {
         auto surfaceComposerClient = SurfaceComposerClient::getDefault();
-        auto displayToken = surfaceComposerClient->getInternalDisplayToken();
-        ui::DisplayMode displayMode;
-        if (surfaceComposerClient->getActiveDisplayMode(displayToken, &displayMode) < 0) {
-            LOG_ALWAYS_FATAL("Failed to get active display mode!");
+        auto ids = SurfaceComposerClient::getPhysicalDisplayIds();
+        LOG_ALWAYS_FATAL_IF(ids.empty(), "Failed to get any display!");
+        ui::Size resolution = ui::kEmptySize;
+        // find the largest display resolution
+        for (auto id : ids) {
+            auto displayToken = surfaceComposerClient->getPhysicalDisplayToken(id);
+            ui::DisplayMode displayMode;
+            if (surfaceComposerClient->getActiveDisplayMode(displayToken, &displayMode) < 0) {
+                LOG_ALWAYS_FATAL("Failed to get active display mode!");
+            }
+            auto tw = displayMode.resolution.width;
+            auto th = displayMode.resolution.height;
+            LOG_ALWAYS_FATAL_IF(tw <= 0 || th <= 0, "Invalid display size!");
+            if (resolution.width * resolution.height <
+                displayMode.resolution.width * displayMode.resolution.height) {
+                resolution = displayMode.resolution;
+            }
         }
-        auto w = displayMode.resolution.width;
-        auto h = displayMode.resolution.height;
-        LOG_ALWAYS_FATAL_IF(w <= 0 || h <= 0, "Invalid display size!");
-        width = static_cast<uint32_t>(w);
-        height = static_cast<uint32_t>(h);
+        width = static_cast<uint32_t>(resolution.width);
+        height = static_cast<uint32_t>(resolution.height);
     });
     return std::pair<uint32_t, uint32_t>(width, height);
 }
@@ -117,11 +131,12 @@
                                                        uint64_t extraUsageFlags = 0,
                                                        std::string name = "output") {
     return std::make_shared<
-            impl::ExternalTexture>(new GraphicBuffer(width, height, HAL_PIXEL_FORMAT_RGBA_8888, 1,
-                                                     GRALLOC_USAGE_HW_RENDER |
-                                                             GRALLOC_USAGE_HW_TEXTURE |
-                                                             extraUsageFlags,
-                                                     std::move(name)),
+            impl::ExternalTexture>(sp<GraphicBuffer>::make(width, height,
+                                                           HAL_PIXEL_FORMAT_RGBA_8888, 1u,
+                                                           GRALLOC_USAGE_HW_RENDER |
+                                                                   GRALLOC_USAGE_HW_TEXTURE |
+                                                                   extraUsageFlags,
+                                                           std::move(name)),
                                    re,
                                    impl::ExternalTexture::Usage::READABLE |
                                            impl::ExternalTexture::Usage::WRITEABLE);
@@ -158,9 +173,10 @@
     };
     auto layers = std::vector<LayerSettings>{layer};
 
-    auto [status, drawFence] =
-            re.drawLayers(display, layers, texture, kUseFrameBufferCache, base::unique_fd()).get();
-    sp<Fence> waitFence = sp<Fence>::make(std::move(drawFence));
+    sp<Fence> waitFence =
+            re.drawLayers(display, layers, texture, kUseFrameBufferCache, base::unique_fd())
+                    .get()
+                    .value();
     waitFence->waitForever(LOG_TAG);
     return texture;
 }
@@ -189,10 +205,10 @@
 
     // This loop starts and stops the timer.
     for (auto _ : benchState) {
-        auto [status, drawFence] = re.drawLayers(display, layers, outputBuffer,
-                                                 kUseFrameBufferCache, base::unique_fd())
-                                           .get();
-        sp<Fence> waitFence = sp<Fence>::make(std::move(drawFence));
+        sp<Fence> waitFence = re.drawLayers(display, layers, outputBuffer, kUseFrameBufferCache,
+                                            base::unique_fd())
+                                      .get()
+                                      .value();
         waitFence->waitForever(LOG_TAG);
     }
 
diff --git a/libs/renderengine/gl/GLESRenderEngine.cpp b/libs/renderengine/gl/GLESRenderEngine.cpp
index 6dc01b9..0d7df10 100644
--- a/libs/renderengine/gl/GLESRenderEngine.cpp
+++ b/libs/renderengine/gl/GLESRenderEngine.cpp
@@ -454,8 +454,9 @@
     mImageManager->initThread();
     mDrawingBuffer = createFramebuffer();
     sp<GraphicBuffer> buf =
-            new GraphicBuffer(1, 1, PIXEL_FORMAT_RGBA_8888, 1,
-                              GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE, "placeholder");
+            sp<GraphicBuffer>::make(1, 1, PIXEL_FORMAT_RGBA_8888, 1,
+                                    GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE,
+                                    "placeholder");
 
     const status_t err = buf->initCheck();
     if (err != OK) {
@@ -799,7 +800,7 @@
     return NO_ERROR;
 }
 
-void GLESRenderEngine::unmapExternalTextureBuffer(const sp<GraphicBuffer>& buffer) {
+void GLESRenderEngine::unmapExternalTextureBuffer(sp<GraphicBuffer>&& buffer) {
     mImageManager->releaseAsync(buffer->getId(), nullptr);
 }
 
@@ -1080,14 +1081,14 @@
 }
 
 void GLESRenderEngine::drawLayersInternal(
-        const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
+        const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
         const DisplaySettings& display, const std::vector<LayerSettings>& layers,
         const std::shared_ptr<ExternalTexture>& buffer, const bool useFramebufferCache,
         base::unique_fd&& bufferFence) {
     ATRACE_CALL();
     if (layers.empty()) {
         ALOGV("Drawing empty layer stack");
-        resultPromise->set_value({NO_ERROR, base::unique_fd()});
+        resultPromise->set_value(Fence::NO_FENCE);
         return;
     }
 
@@ -1102,7 +1103,7 @@
 
     if (buffer == nullptr) {
         ALOGE("No output buffer provided. Aborting GPU composition.");
-        resultPromise->set_value({BAD_VALUE, base::unique_fd()});
+        resultPromise->set_value(base::unexpected(BAD_VALUE));
         return;
     }
 
@@ -1131,7 +1132,7 @@
             ALOGE("Failed to bind framebuffer! Aborting GPU composition for buffer (%p).",
                   buffer->getBuffer()->handle);
             checkErrors();
-            resultPromise->set_value({fbo->getStatus(), base::unique_fd()});
+            resultPromise->set_value(base::unexpected(fbo->getStatus()));
             return;
         }
         setViewportAndProjection(display.physicalDisplay, display.clip);
@@ -1143,7 +1144,7 @@
             ALOGE("Failed to prepare blur filter! Aborting GPU composition for buffer (%p).",
                   buffer->getBuffer()->handle);
             checkErrors();
-            resultPromise->set_value({status, base::unique_fd()});
+            resultPromise->set_value(base::unexpected(status));
             return;
         }
     }
@@ -1177,7 +1178,7 @@
                 ALOGE("Failed to render blur effect! Aborting GPU composition for buffer (%p).",
                       buffer->getBuffer()->handle);
                 checkErrors("Can't render first blur pass");
-                resultPromise->set_value({status, base::unique_fd()});
+                resultPromise->set_value(base::unexpected(status));
                 return;
             }
 
@@ -1200,7 +1201,7 @@
                 ALOGE("Failed to bind framebuffer! Aborting GPU composition for buffer (%p).",
                       buffer->getBuffer()->handle);
                 checkErrors("Can't bind native framebuffer");
-                resultPromise->set_value({status, base::unique_fd()});
+                resultPromise->set_value(base::unexpected(status));
                 return;
             }
 
@@ -1209,7 +1210,7 @@
                 ALOGE("Failed to render blur effect! Aborting GPU composition for buffer (%p).",
                       buffer->getBuffer()->handle);
                 checkErrors("Can't render blur filter");
-                resultPromise->set_value({status, base::unique_fd()});
+                resultPromise->set_value(base::unexpected(status));
                 return;
             }
         }
@@ -1261,7 +1262,7 @@
 
             // Do not cache protected EGLImage, protected memory is limited.
             if (gBuf->getUsage() & GRALLOC_USAGE_PROTECTED) {
-                unmapExternalTextureBuffer(gBuf);
+                unmapExternalTextureBuffer(std::move(gBuf));
             }
         }
 
@@ -1309,7 +1310,7 @@
             checkErrors();
             // Chances are, something illegal happened (either the caller passed
             // us bad parameters, or we messed up our shader generation).
-            resultPromise->set_value({INVALID_OPERATION, std::move(drawFence)});
+            resultPromise->set_value(base::unexpected(INVALID_OPERATION));
             return;
         }
         mLastDrawFence = nullptr;
@@ -1321,8 +1322,7 @@
     mPriorResourcesCleaned = false;
 
     checkErrors();
-    resultPromise->set_value({NO_ERROR, std::move(drawFence)});
-    return;
+    resultPromise->set_value(sp<Fence>::make(std::move(drawFence)));
 }
 
 void GLESRenderEngine::setViewportAndProjection(Rect viewport, Rect clip) {
diff --git a/libs/renderengine/gl/GLESRenderEngine.h b/libs/renderengine/gl/GLESRenderEngine.h
index 1d7c2ca..402ff52 100644
--- a/libs/renderengine/gl/GLESRenderEngine.h
+++ b/libs/renderengine/gl/GLESRenderEngine.h
@@ -31,6 +31,7 @@
 #include <renderengine/RenderEngine.h>
 #include <renderengine/private/Description.h>
 #include <sys/types.h>
+#include <ui/FenceResult.h>
 #include "GLShadowTexture.h"
 #include "ImageManager.h"
 
@@ -60,7 +61,7 @@
     std::future<void> primeCache() override;
     void genTextures(size_t count, uint32_t* names) override;
     void deleteTextures(size_t count, uint32_t const* names) override;
-    bool isProtected() const override { return mInProtectedContext; }
+    bool isProtected() const { return mInProtectedContext; }
     bool supportsProtectedContent() const override;
     void useProtectedContext(bool useProtectedContext) override;
     void cleanupPostRender() override;
@@ -100,9 +101,9 @@
     size_t getMaxViewportDims() const override;
     void mapExternalTextureBuffer(const sp<GraphicBuffer>& buffer, bool isRenderable)
             EXCLUDES(mRenderingMutex);
-    void unmapExternalTextureBuffer(const sp<GraphicBuffer>& buffer) EXCLUDES(mRenderingMutex);
+    void unmapExternalTextureBuffer(sp<GraphicBuffer>&& buffer) EXCLUDES(mRenderingMutex);
     bool canSkipPostRenderCleanup() const override;
-    void drawLayersInternal(const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
+    void drawLayersInternal(const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
                             const DisplaySettings& display,
                             const std::vector<LayerSettings>& layers,
                             const std::shared_ptr<ExternalTexture>& buffer,
diff --git a/libs/renderengine/gl/ProgramCache.cpp b/libs/renderengine/gl/ProgramCache.cpp
index b1bfa3a..422b070 100644
--- a/libs/renderengine/gl/ProgramCache.cpp
+++ b/libs/renderengine/gl/ProgramCache.cpp
@@ -601,7 +601,7 @@
     }
 
     if (needs.hasTextureCoords()) {
-        fs << "varying vec2 outTexCoords;";
+        fs << "varying highp vec2 outTexCoords;";
     }
 
     if (needs.hasRoundedCorners()) {
diff --git a/libs/renderengine/include/renderengine/BorderRenderInfo.h b/libs/renderengine/include/renderengine/BorderRenderInfo.h
new file mode 100644
index 0000000..0ee6661
--- /dev/null
+++ b/libs/renderengine/include/renderengine/BorderRenderInfo.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+#include <math/mat4.h>
+#include <ui/Region.h>
+
+namespace android {
+namespace renderengine {
+
+struct BorderRenderInfo {
+    float width = 0;
+    half4 color;
+    Region combinedRegion;
+
+    bool operator==(const BorderRenderInfo& rhs) const {
+        return (width == rhs.width && color == rhs.color &&
+                combinedRegion.hasSameRects(rhs.combinedRegion));
+    }
+};
+
+} // namespace renderengine
+} // namespace android
\ No newline at end of file
diff --git a/libs/renderengine/include/renderengine/DisplaySettings.h b/libs/renderengine/include/renderengine/DisplaySettings.h
index 59ef991..8d7c13c 100644
--- a/libs/renderengine/include/renderengine/DisplaySettings.h
+++ b/libs/renderengine/include/renderengine/DisplaySettings.h
@@ -22,17 +22,25 @@
 
 #include <math/mat4.h>
 #include <renderengine/PrintMatrix.h>
+#include <renderengine/BorderRenderInfo.h>
+#include <ui/DisplayId.h>
 #include <ui/GraphicTypes.h>
 #include <ui/Rect.h>
 #include <ui/Region.h>
 #include <ui/Transform.h>
 
+#include <optional>
+
 namespace android {
 namespace renderengine {
 
 // DisplaySettings contains the settings that are applicable when drawing all
 // layers for a given display.
 struct DisplaySettings {
+    // A string containing the name of the display, along with its id, if it has
+    // one.
+    std::string namePlusId;
+
     // Rectangle describing the physical display. We will project from the
     // logical clip onto this rectangle.
     Rect physicalDisplay = Rect::INVALID_RECT;
@@ -79,18 +87,21 @@
     // Configures the rendering intent of the output display. This is used for tonemapping.
     aidl::android::hardware::graphics::composer3::RenderIntent renderIntent =
             aidl::android::hardware::graphics::composer3::RenderIntent::TONE_MAP_COLORIMETRIC;
+
+    std::vector<renderengine::BorderRenderInfo> borderInfoList;
 };
 
 static inline bool operator==(const DisplaySettings& lhs, const DisplaySettings& rhs) {
-    return lhs.physicalDisplay == rhs.physicalDisplay && lhs.clip == rhs.clip &&
-            lhs.maxLuminance == rhs.maxLuminance &&
+    return lhs.namePlusId == rhs.namePlusId && lhs.physicalDisplay == rhs.physicalDisplay &&
+            lhs.clip == rhs.clip && lhs.maxLuminance == rhs.maxLuminance &&
             lhs.currentLuminanceNits == rhs.currentLuminanceNits &&
             lhs.outputDataspace == rhs.outputDataspace &&
             lhs.colorTransform == rhs.colorTransform &&
             lhs.deviceHandlesColorTransform == rhs.deviceHandlesColorTransform &&
             lhs.orientation == rhs.orientation &&
             lhs.targetLuminanceNits == rhs.targetLuminanceNits &&
-            lhs.dimmingStage == rhs.dimmingStage && lhs.renderIntent == rhs.renderIntent;
+            lhs.dimmingStage == rhs.dimmingStage && lhs.renderIntent == rhs.renderIntent &&
+            lhs.borderInfoList == rhs.borderInfoList;
 }
 
 static const char* orientation_to_string(uint32_t orientation) {
@@ -117,6 +128,7 @@
 
 static inline void PrintTo(const DisplaySettings& settings, ::std::ostream* os) {
     *os << "DisplaySettings {";
+    *os << "\n    .display = " << settings.namePlusId;
     *os << "\n    .physicalDisplay = ";
     PrintTo(settings.physicalDisplay, os);
     *os << "\n    .clip = ";
diff --git a/libs/renderengine/include/renderengine/ExternalTexture.h b/libs/renderengine/include/renderengine/ExternalTexture.h
index 621a209..82e5d83 100644
--- a/libs/renderengine/include/renderengine/ExternalTexture.h
+++ b/libs/renderengine/include/renderengine/ExternalTexture.h
@@ -46,6 +46,8 @@
     // Retrieves the buffer that is bound to this texture.
     virtual const sp<GraphicBuffer>& getBuffer() const = 0;
 
+    virtual void remapBuffer() = 0;
+
     Rect getBounds() const {
         return {0, 0, static_cast<int32_t>(getWidth()), static_cast<int32_t>(getHeight())};
     }
diff --git a/libs/renderengine/include/renderengine/RenderEngine.h b/libs/renderengine/include/renderengine/RenderEngine.h
index 3e7f69c..0d910c9 100644
--- a/libs/renderengine/include/renderengine/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/RenderEngine.h
@@ -18,6 +18,7 @@
 #define SF_RENDERENGINE_H_
 
 #include <android-base/unique_fd.h>
+#include <ftl/future.h>
 #include <math/mat4.h>
 #include <renderengine/DisplaySettings.h>
 #include <renderengine/ExternalTexture.h>
@@ -26,6 +27,7 @@
 #include <renderengine/LayerSettings.h>
 #include <stdint.h>
 #include <sys/types.h>
+#include <ui/FenceResult.h>
 #include <ui/GraphicTypes.h>
 #include <ui/Transform.h>
 
@@ -68,7 +70,6 @@
 class Mesh;
 class Texture;
 struct RenderEngineCreationArgs;
-struct RenderEngineResult;
 
 namespace threaded {
 class RenderEngineThreaded;
@@ -98,6 +99,8 @@
         THREADED = 2,
         SKIA_GL = 3,
         SKIA_GL_THREADED = 4,
+        SKIA_VK = 5,
+        SKIA_VK_THREADED = 6,
     };
 
     static std::unique_ptr<RenderEngine> create(const RenderEngineCreationArgs& args);
@@ -125,12 +128,8 @@
     // ----- BEGIN NEW INTERFACE -----
 
     // queries that are required to be thread safe
-    virtual bool isProtected() const = 0;
     virtual bool supportsProtectedContent() const = 0;
 
-    // Attempt to switch RenderEngine into and out of protectedContext mode
-    virtual void useProtectedContext(bool useProtectedContext) = 0;
-
     // Notify RenderEngine of changes to the dimensions of the active display
     // so that it can configure its internal caches accordingly.
     virtual void onActiveDisplaySizeChanged(ui::Size size) = 0;
@@ -158,12 +157,13 @@
     // parameter does nothing.
     // @param bufferFence Fence signalling that the buffer is ready to be drawn
     // to.
-    // @return A future object of RenderEngineResult struct indicating whether
-    // drawing was successful in async mode.
-    virtual std::future<RenderEngineResult> drawLayers(
-            const DisplaySettings& display, const std::vector<LayerSettings>& layers,
-            const std::shared_ptr<ExternalTexture>& buffer, const bool useFramebufferCache,
-            base::unique_fd&& bufferFence);
+    // @return A future object of FenceResult indicating whether drawing was
+    // successful in async mode.
+    virtual ftl::Future<FenceResult> drawLayers(const DisplaySettings& display,
+                                                const std::vector<LayerSettings>& layers,
+                                                const std::shared_ptr<ExternalTexture>& buffer,
+                                                const bool useFramebufferCache,
+                                                base::unique_fd&& bufferFence);
 
     // Clean-up method that should be called on the main thread after the
     // drawFence returned by drawLayers fires. This method will free up
@@ -172,9 +172,16 @@
     virtual void cleanupPostRender() = 0;
 
     virtual void cleanFramebufferCache() = 0;
-    // Returns the priority this context was actually created with. Note: this may not be
-    // the same as specified at context creation time, due to implementation limits on the
-    // number of contexts that can be created at a specific priority level in the system.
+
+    // Returns the priority this context was actually created with. Note: this
+    // may not be the same as specified at context creation time, due to
+    // implementation limits on the number of contexts that can be created at a
+    // specific priority level in the system.
+    //
+    // This should return a valid EGL context priority enum as described by
+    // https://registry.khronos.org/EGL/extensions/IMG/EGL_IMG_context_priority.txt
+    // or
+    // https://registry.khronos.org/EGL/extensions/NV/EGL_NV_context_priority_realtime.txt
     virtual int getContextPriority() = 0;
 
     // Returns true if blur was requested in the RenderEngineCreationArgs and the implementation
@@ -224,7 +231,7 @@
     // asynchronously, but the caller can expect that map/unmap calls are performed in a manner
     // that's conflict serializable, i.e. unmap a buffer should never occur before binding the
     // buffer if the caller called mapExternalTextureBuffer before calling unmap.
-    virtual void unmapExternalTextureBuffer(const sp<GraphicBuffer>& buffer) = 0;
+    virtual void unmapExternalTextureBuffer(sp<GraphicBuffer>&& buffer) = 0;
 
     // A thread safe query to determine if any post rendering cleanup is necessary.  Returning true
     // is a signal that calling the postRenderCleanup method would be a no-op and that callers can
@@ -236,8 +243,15 @@
     friend class RenderEngineTest_cleanupPostRender_cleansUpOnce_Test;
     const RenderEngineType mRenderEngineType;
 
+    // Update protectedContext mode depending on whether or not any layer has a protected buffer.
+    void updateProtectedContext(const std::vector<LayerSettings>&,
+                                const std::shared_ptr<ExternalTexture>&);
+
+    // Attempt to switch RenderEngine into and out of protectedContext mode
+    virtual void useProtectedContext(bool useProtectedContext) = 0;
+
     virtual void drawLayersInternal(
-            const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
+            const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
             const DisplaySettings& display, const std::vector<LayerSettings>& layers,
             const std::shared_ptr<ExternalTexture>& buffer, const bool useFramebufferCache,
             base::unique_fd&& bufferFence) = 0;
@@ -327,13 +341,6 @@
             RenderEngine::RenderEngineType::SKIA_GL_THREADED;
 };
 
-struct RenderEngineResult {
-    // status indicates if drawing is successful
-    status_t status;
-    // drawFence will fire when the buffer has been drawn to and is ready to be examined.
-    base::unique_fd drawFence;
-};
-
 } // namespace renderengine
 } // namespace android
 
diff --git a/libs/renderengine/include/renderengine/impl/ExternalTexture.h b/libs/renderengine/include/renderengine/impl/ExternalTexture.h
index c0e24f0..d30262d 100644
--- a/libs/renderengine/include/renderengine/impl/ExternalTexture.h
+++ b/libs/renderengine/include/renderengine/impl/ExternalTexture.h
@@ -51,10 +51,12 @@
     bool hasSameBuffer(const renderengine::ExternalTexture& other) const override {
         return getBuffer() == other.getBuffer();
     }
+    void remapBuffer() override;
 
 private:
     sp<GraphicBuffer> mBuffer;
     android::renderengine::RenderEngine& mRenderEngine;
+    const bool mWritable;
 };
 
 } // namespace android::renderengine::impl
diff --git a/libs/renderengine/include/renderengine/mock/FakeExternalTexture.h b/libs/renderengine/include/renderengine/mock/FakeExternalTexture.h
index 974e0fd..474e2e7 100644
--- a/libs/renderengine/include/renderengine/mock/FakeExternalTexture.h
+++ b/libs/renderengine/include/renderengine/mock/FakeExternalTexture.h
@@ -23,7 +23,9 @@
 namespace mock {
 
 class FakeExternalTexture : public renderengine::ExternalTexture {
-    const sp<GraphicBuffer> mNullBuffer = nullptr;
+    const sp<GraphicBuffer> mEmptyBuffer =
+            sp<GraphicBuffer>::make(1u, 1u, PIXEL_FORMAT_RGBA_8888,
+                                    GRALLOC_USAGE_SW_WRITE_OFTEN | GRALLOC_USAGE_SW_READ_OFTEN);
     uint32_t mWidth;
     uint32_t mHeight;
     uint64_t mId;
@@ -34,7 +36,7 @@
     FakeExternalTexture(uint32_t width, uint32_t height, uint64_t id, PixelFormat pixelFormat,
                         uint64_t usage)
           : mWidth(width), mHeight(height), mId(id), mPixelFormat(pixelFormat), mUsage(usage) {}
-    const sp<GraphicBuffer>& getBuffer() const { return mNullBuffer; }
+    const sp<GraphicBuffer>& getBuffer() const { return mEmptyBuffer; }
     bool hasSameBuffer(const renderengine::ExternalTexture& other) const override {
         return getId() == other.getId();
     }
@@ -43,6 +45,7 @@
     uint64_t getId() const override { return mId; }
     PixelFormat getPixelFormat() const override { return mPixelFormat; }
     uint64_t getUsage() const override { return mUsage; }
+    void remapBuffer() override {}
     ~FakeExternalTexture() = default;
 };
 
diff --git a/libs/renderengine/include/renderengine/mock/RenderEngine.h b/libs/renderengine/include/renderengine/mock/RenderEngine.h
index 248bd65..d3035e2 100644
--- a/libs/renderengine/include/renderengine/mock/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/mock/RenderEngine.h
@@ -48,14 +48,13 @@
     MOCK_METHOD0(cleanupPostRender, void());
     MOCK_CONST_METHOD0(canSkipPostRenderCleanup, bool());
     MOCK_METHOD5(drawLayers,
-                 std::future<RenderEngineResult>(const DisplaySettings&,
-                                                 const std::vector<LayerSettings>&,
-                                                 const std::shared_ptr<ExternalTexture>&,
-                                                 const bool, base::unique_fd&&));
+                 ftl::Future<FenceResult>(const DisplaySettings&, const std::vector<LayerSettings>&,
+                                          const std::shared_ptr<ExternalTexture>&, const bool,
+                                          base::unique_fd&&));
     MOCK_METHOD6(drawLayersInternal,
-                 void(const std::shared_ptr<std::promise<RenderEngineResult>>&&,
-                      const DisplaySettings&, const std::vector<LayerSettings>&,
-                      const std::shared_ptr<ExternalTexture>&, const bool, base::unique_fd&&));
+                 void(const std::shared_ptr<std::promise<FenceResult>>&&, const DisplaySettings&,
+                      const std::vector<LayerSettings>&, const std::shared_ptr<ExternalTexture>&,
+                      const bool, base::unique_fd&&));
     MOCK_METHOD0(cleanFramebufferCache, void());
     MOCK_METHOD0(getContextPriority, int());
     MOCK_METHOD0(supportsBackgroundBlur, bool());
@@ -64,7 +63,7 @@
 protected:
     // mock renderengine still needs to implement these, but callers should never need to call them.
     void mapExternalTextureBuffer(const sp<GraphicBuffer>&, bool) {}
-    void unmapExternalTextureBuffer(const sp<GraphicBuffer>&) {}
+    void unmapExternalTextureBuffer(sp<GraphicBuffer>&&) {}
 };
 
 } // namespace mock
diff --git a/libs/renderengine/skia/AutoBackendTexture.cpp b/libs/renderengine/skia/AutoBackendTexture.cpp
index 5c122d4..c412c9c 100644
--- a/libs/renderengine/skia/AutoBackendTexture.cpp
+++ b/libs/renderengine/skia/AutoBackendTexture.cpp
@@ -43,10 +43,12 @@
                                                        createProtectedImage, backendFormat,
                                                        isOutputBuffer);
     mColorType = GrAHardwareBufferUtils::GetSkColorTypeFromBufferFormat(desc.format);
-    ALOGE_IF(!mBackendTexture.isValid(),
-             "Failed to create a valid texture. [%p]:[%d,%d] isProtected:%d isWriteable:%d "
-             "format:%d",
-             this, desc.width, desc.height, createProtectedImage, isOutputBuffer, desc.format);
+    if (!mBackendTexture.isValid() || !desc.width || !desc.height) {
+        LOG_ALWAYS_FATAL("Failed to create a valid texture. [%p]:[%d,%d] isProtected:%d "
+                         "isWriteable:%d format:%d",
+                         this, desc.width, desc.height, createProtectedImage, isOutputBuffer,
+                         desc.format);
+    }
 }
 
 AutoBackendTexture::~AutoBackendTexture() {
@@ -82,6 +84,18 @@
     textureRelease->unref(false);
 }
 
+void logFatalTexture(const char* msg, const GrBackendTexture& tex, ui::Dataspace dataspace,
+                     SkColorType colorType) {
+    GrGLTextureInfo textureInfo;
+    bool retrievedTextureInfo = tex.getGLTextureInfo(&textureInfo);
+    LOG_ALWAYS_FATAL("%s isTextureValid:%d dataspace:%d"
+                     "\n\tGrBackendTexture: (%i x %i) hasMipmaps: %i isProtected: %i texType: %i"
+                     "\n\t\tGrGLTextureInfo: success: %i fTarget: %u fFormat: %u colorType %i",
+                     msg, tex.isValid(), dataspace, tex.width(), tex.height(), tex.hasMipmaps(),
+                     tex.isProtected(), static_cast<int>(tex.textureType()), retrievedTextureInfo,
+                     textureInfo.fTarget, textureInfo.fFormat, colorType);
+}
+
 sk_sp<SkImage> AutoBackendTexture::makeImage(ui::Dataspace dataspace, SkAlphaType alphaType,
                                              GrDirectContext* context) {
     ATRACE_CALL();
@@ -107,9 +121,9 @@
 
     mImage = image;
     mDataspace = dataspace;
-    LOG_ALWAYS_FATAL_IF(mImage == nullptr,
-                        "Unable to generate SkImage. isTextureValid:%d dataspace:%d",
-                        mBackendTexture.isValid(), dataspace);
+    if (!mImage) {
+        logFatalTexture("Unable to generate SkImage.", mBackendTexture, dataspace, colorType);
+    }
     return mImage;
 }
 
@@ -131,9 +145,9 @@
     }
 
     mDataspace = dataspace;
-    LOG_ALWAYS_FATAL_IF(mSurface == nullptr,
-                        "Unable to generate SkSurface. isTextureValid:%d dataspace:%d",
-                        mBackendTexture.isValid(), dataspace);
+    if (!mSurface) {
+        logFatalTexture("Unable to generate SkSurface.", mBackendTexture, dataspace, mColorType);
+    }
     return mSurface;
 }
 
diff --git a/libs/renderengine/skia/Cache.cpp b/libs/renderengine/skia/Cache.cpp
index c39f0a9..f6b9183 100644
--- a/libs/renderengine/skia/Cache.cpp
+++ b/libs/renderengine/skia/Cache.cpp
@@ -364,8 +364,8 @@
         const int64_t usage = GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;
 
         sp<GraphicBuffer> dstBuffer =
-                new GraphicBuffer(displayRect.width(), displayRect.height(), PIXEL_FORMAT_RGBA_8888,
-                                  1, usage, "primeShaderCache_dst");
+                sp<GraphicBuffer>::make(displayRect.width(), displayRect.height(),
+                                        PIXEL_FORMAT_RGBA_8888, 1, usage, "primeShaderCache_dst");
 
         const auto dstTexture =
                 std::make_shared<impl::ExternalTexture>(dstBuffer, *renderengine,
@@ -375,8 +375,8 @@
         // something, but the details are not important. Make use of the shadow layer drawing step
         // to populate it.
         sp<GraphicBuffer> srcBuffer =
-                new GraphicBuffer(displayRect.width(), displayRect.height(), PIXEL_FORMAT_RGBA_8888,
-                                  1, usage, "drawImageLayer_src");
+                sp<GraphicBuffer>::make(displayRect.width(), displayRect.height(),
+                                        PIXEL_FORMAT_RGBA_8888, 1, usage, "drawImageLayer_src");
 
         const auto srcTexture = std::make_shared<
                 impl::ExternalTexture>(srcBuffer, *renderengine,
@@ -398,8 +398,9 @@
         // GRALLOC_USAGE_HW_TEXTURE should be the same as AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.
         const int64_t usageExternal = GRALLOC_USAGE_HW_TEXTURE;
         sp<GraphicBuffer> externalBuffer =
-                new GraphicBuffer(displayRect.width(), displayRect.height(), PIXEL_FORMAT_RGBA_8888,
-                                  1, usageExternal, "primeShaderCache_external");
+                sp<GraphicBuffer>::make(displayRect.width(), displayRect.height(),
+                                        PIXEL_FORMAT_RGBA_8888, 1, usageExternal,
+                                        "primeShaderCache_external");
         const auto externalTexture =
                 std::make_shared<impl::ExternalTexture>(externalBuffer, *renderengine,
                                                         impl::ExternalTexture::Usage::READABLE);
@@ -409,8 +410,9 @@
         // Another external texture with a different pixel format triggers useIsOpaqueWorkaround.
         // It doesn't have to be f16, but it can't be the usual 8888.
         sp<GraphicBuffer> f16ExternalBuffer =
-                new GraphicBuffer(displayRect.width(), displayRect.height(), PIXEL_FORMAT_RGBA_FP16,
-                                  1, usageExternal, "primeShaderCache_external_f16");
+                sp<GraphicBuffer>::make(displayRect.width(), displayRect.height(),
+                                        PIXEL_FORMAT_RGBA_FP16, 1, usageExternal,
+                                        "primeShaderCache_external_f16");
         // The F16 texture may not be usable on all devices, so check first that it was created.
         status_t error = f16ExternalBuffer->initCheck();
         if (!error) {
diff --git a/libs/renderengine/skia/ColorSpaces.cpp b/libs/renderengine/skia/ColorSpaces.cpp
index 37ff5df..92b01e0 100644
--- a/libs/renderengine/skia/ColorSpaces.cpp
+++ b/libs/renderengine/skia/ColorSpaces.cpp
@@ -21,6 +21,8 @@
 namespace skia {
 
 // please keep in sync with hwui/utils/Color.cpp
+// TODO: Scale by the dimming ratio here instead of in a generic 3x3 transform
+// Otherwise there may be luminance shift for e.g., HLG.
 sk_sp<SkColorSpace> toSkColorSpace(ui::Dataspace dataspace) {
     skcms_Matrix3x3 gamut;
     switch (dataspace & HAL_DATASPACE_STANDARD_MASK) {
@@ -61,13 +63,14 @@
         case HAL_DATASPACE_TRANSFER_GAMMA2_8:
             return SkColorSpace::MakeRGB({2.8f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, gamut);
         case HAL_DATASPACE_TRANSFER_ST2084:
-            return SkColorSpace::MakeRGB(SkNamedTransferFn::kPQ, gamut);
+            return SkColorSpace::MakeRGB({-2.f, -1.55522297832f, 1.86045365631f, 32 / 2523.0f,
+                                          2413 / 128.0f, -2392 / 128.0f, 8192 / 1305.0f},
+                                         gamut);
         case HAL_DATASPACE_TRANSFER_SMPTE_170M:
             return SkColorSpace::MakeRGB(SkNamedTransferFn::kRec2020, gamut);
         case HAL_DATASPACE_TRANSFER_HLG:
-            // return HLG transfer but scale by 1/12
             skcms_TransferFunction hlgFn;
-            if (skcms_TransferFunction_makeScaledHLGish(&hlgFn, 1.f / 12.f, 2.f, 2.f,
+            if (skcms_TransferFunction_makeScaledHLGish(&hlgFn, 0.314509843, 2.f, 2.f,
                                                         1.f / 0.17883277f, 0.28466892f,
                                                         0.55991073f)) {
                 return SkColorSpace::MakeRGB(hlgFn, gamut);
diff --git a/libs/renderengine/skia/SkiaGLRenderEngine.cpp b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
index 0caa9f2..ff598e7 100644
--- a/libs/renderengine/skia/SkiaGLRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
@@ -24,24 +24,11 @@
 #include <EGL/egl.h>
 #include <EGL/eglext.h>
 #include <GrContextOptions.h>
-#include <SkCanvas.h>
-#include <SkColorFilter.h>
-#include <SkColorMatrix.h>
-#include <SkColorSpace.h>
-#include <SkGraphics.h>
-#include <SkImage.h>
-#include <SkImageFilters.h>
-#include <SkRegion.h>
-#include <SkShadowUtils.h>
-#include <SkSurface.h>
 #include <android-base/stringprintf.h>
 #include <gl/GrGLInterface.h>
 #include <gui/TraceUtils.h>
 #include <sync/sync.h>
-#include <ui/BlurRegion.h>
-#include <ui/DataspaceUtils.h>
 #include <ui/DebugUtils.h>
-#include <ui/GraphicBuffer.h>
 #include <utils/Trace.h>
 
 #include <cmath>
@@ -50,25 +37,7 @@
 #include <numeric>
 
 #include "../gl/GLExtensions.h"
-#include "Cache.h"
-#include "ColorSpaces.h"
-#include "SkBlendMode.h"
-#include "SkImageInfo.h"
-#include "filters/BlurFilter.h"
-#include "filters/GaussianBlurFilter.h"
-#include "filters/KawaseBlurFilter.h"
-#include "filters/LinearEffect.h"
 #include "log/log_main.h"
-#include "skia/debug/SkiaCapture.h"
-#include "skia/debug/SkiaMemoryReporter.h"
-#include "skia/filters/StretchShaderFactory.h"
-#include "system/graphics-base-v1.0.h"
-
-namespace {
-// Debugging settings
-static const bool kPrintLayerSettings = false;
-static const bool kFlushAfterEveryLayer = kPrintLayerSettings;
-} // namespace
 
 bool checkGlError(const char* op, int lineNumber);
 
@@ -224,9 +193,10 @@
     }
 
     // initialize the renderer while GL is current
-    std::unique_ptr<SkiaGLRenderEngine> engine =
-            std::make_unique<SkiaGLRenderEngine>(args, display, ctxt, placeholder, protectedContext,
-                                                 protectedPlaceholder);
+    std::unique_ptr<SkiaGLRenderEngine> engine(new SkiaGLRenderEngine(args, display, ctxt,
+                                                                      placeholder, protectedContext,
+                                                                      protectedPlaceholder));
+    engine->ensureGrContextsCreated();
 
     ALOGI("OpenGL ES informations:");
     ALOGI("vendor    : %s", extensions.getVendor());
@@ -239,11 +209,6 @@
     return engine;
 }
 
-std::future<void> SkiaGLRenderEngine::primeCache() {
-    Cache::primeShaderCache(this);
-    return {};
-}
-
 EGLConfig SkiaGLRenderEngine::chooseEglConfig(EGLDisplay display, int format, bool logConfig) {
     status_t err;
     EGLConfig config;
@@ -283,72 +248,20 @@
     return config;
 }
 
-sk_sp<SkData> SkiaGLRenderEngine::SkSLCacheMonitor::load(const SkData& key) {
-    // This "cache" does not actually cache anything. It just allows us to
-    // monitor Skia's internal cache. So this method always returns null.
-    return nullptr;
-}
-
-void SkiaGLRenderEngine::SkSLCacheMonitor::store(const SkData& key, const SkData& data,
-                                                 const SkString& description) {
-    mShadersCachedSinceLastCall++;
-    mTotalShadersCompiled++;
-    ATRACE_FORMAT("SF cache: %i shaders", mTotalShadersCompiled);
-}
-
-int SkiaGLRenderEngine::reportShadersCompiled() {
-    return mSkSLCacheMonitor.totalShadersCompiled();
-}
-
 SkiaGLRenderEngine::SkiaGLRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display,
                                        EGLContext ctxt, EGLSurface placeholder,
                                        EGLContext protectedContext, EGLSurface protectedPlaceholder)
-      : SkiaRenderEngine(args.renderEngineType),
+      : SkiaRenderEngine(args.renderEngineType,
+                         static_cast<PixelFormat>(args.pixelFormat),
+                         args.useColorManagement, args.supportsBackgroundBlur),
         mEGLDisplay(display),
         mEGLContext(ctxt),
         mPlaceholderSurface(placeholder),
         mProtectedEGLContext(protectedContext),
-        mProtectedPlaceholderSurface(protectedPlaceholder),
-        mDefaultPixelFormat(static_cast<PixelFormat>(args.pixelFormat)),
-        mUseColorManagement(args.useColorManagement) {
-    sk_sp<const GrGLInterface> glInterface(GrGLCreateNativeInterface());
-    LOG_ALWAYS_FATAL_IF(!glInterface.get());
-
-    GrContextOptions options;
-    options.fDisableDriverCorrectnessWorkarounds = true;
-    options.fDisableDistanceFieldPaths = true;
-    options.fReducedShaderVariations = true;
-    options.fPersistentCache = &mSkSLCacheMonitor;
-    mGrContext = GrDirectContext::MakeGL(glInterface, options);
-    if (supportsProtectedContent()) {
-        useProtectedContext(true);
-        mProtectedGrContext = GrDirectContext::MakeGL(glInterface, options);
-        useProtectedContext(false);
-    }
-
-    if (args.supportsBackgroundBlur) {
-        ALOGD("Background Blurs Enabled");
-        mBlurFilter = new KawaseBlurFilter();
-    }
-    mCapture = std::make_unique<SkiaCapture>();
-}
+        mProtectedPlaceholderSurface(protectedPlaceholder) { }
 
 SkiaGLRenderEngine::~SkiaGLRenderEngine() {
-    std::lock_guard<std::mutex> lock(mRenderingMutex);
-    if (mBlurFilter) {
-        delete mBlurFilter;
-    }
-
-    mCapture = nullptr;
-
-    mGrContext->flushAndSubmit(true);
-    mGrContext->abandonContext();
-
-    if (mProtectedGrContext) {
-        mProtectedGrContext->flushAndSubmit(true);
-        mProtectedGrContext->abandonContext();
-    }
-
+    finishRenderingAndAbandonContext();
     if (mPlaceholderSurface != EGL_NO_SURFACE) {
         eglDestroySurface(mEGLDisplay, mPlaceholderSurface);
     }
@@ -366,71 +279,69 @@
     eglReleaseThread();
 }
 
-bool SkiaGLRenderEngine::supportsProtectedContent() const {
+SkiaRenderEngine::Contexts SkiaGLRenderEngine::createDirectContexts(
+    const GrContextOptions& options) {
+
+    LOG_ALWAYS_FATAL_IF(isProtected(),
+                        "Cannot setup contexts while already in protected mode");
+
+    sk_sp<const GrGLInterface> glInterface = GrGLMakeNativeInterface();
+
+    LOG_ALWAYS_FATAL_IF(!glInterface.get(), "GrGLMakeNativeInterface() failed");
+
+    SkiaRenderEngine::Contexts contexts;
+    contexts.first = GrDirectContext::MakeGL(glInterface, options);
+    if (supportsProtectedContentImpl()) {
+        useProtectedContextImpl(GrProtected::kYes);
+        contexts.second = GrDirectContext::MakeGL(glInterface, options);
+        useProtectedContextImpl(GrProtected::kNo);
+    }
+
+    return contexts;
+}
+
+bool SkiaGLRenderEngine::supportsProtectedContentImpl() const {
     return mProtectedEGLContext != EGL_NO_CONTEXT;
 }
 
-GrDirectContext* SkiaGLRenderEngine::getActiveGrContext() const {
-    return mInProtectedContext ? mProtectedGrContext.get() : mGrContext.get();
-}
-
-void SkiaGLRenderEngine::useProtectedContext(bool useProtectedContext) {
-    if (useProtectedContext == mInProtectedContext ||
-        (useProtectedContext && !supportsProtectedContent())) {
-        return;
-    }
-
-    // release any scratch resources before switching into a new mode
-    if (getActiveGrContext()) {
-        getActiveGrContext()->purgeUnlockedResources(true);
-    }
-
+bool SkiaGLRenderEngine::useProtectedContextImpl(GrProtected isProtected) {
     const EGLSurface surface =
-            useProtectedContext ? mProtectedPlaceholderSurface : mPlaceholderSurface;
-    const EGLContext context = useProtectedContext ? mProtectedEGLContext : mEGLContext;
+        (isProtected == GrProtected::kYes) ?
+        mProtectedPlaceholderSurface : mPlaceholderSurface;
+    const EGLContext context = (isProtected == GrProtected::kYes) ?
+        mProtectedEGLContext : mEGLContext;
 
-    if (eglMakeCurrent(mEGLDisplay, surface, surface, context) == EGL_TRUE) {
-        mInProtectedContext = useProtectedContext;
-        // given that we are sharing the same thread between two GrContexts we need to
-        // make sure that the thread state is reset when switching between the two.
-        if (getActiveGrContext()) {
-            getActiveGrContext()->resetContext();
-        }
-    }
+    return eglMakeCurrent(mEGLDisplay, surface, surface, context) == EGL_TRUE;
 }
 
-base::unique_fd SkiaGLRenderEngine::flush() {
-    ATRACE_CALL();
-    if (!gl::GLExtensions::getInstance().hasNativeFenceSync()) {
-        return base::unique_fd();
-    }
-
-    EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
-    if (sync == EGL_NO_SYNC_KHR) {
-        ALOGW("failed to create EGL native fence sync: %#x", eglGetError());
-        return base::unique_fd();
-    }
-
-    // native fence fd will not be populated until flush() is done.
-    glFlush();
-
-    // get the fence fd
-    base::unique_fd fenceFd(eglDupNativeFenceFDANDROID(mEGLDisplay, sync));
-    eglDestroySyncKHR(mEGLDisplay, sync);
-    if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
-        ALOGW("failed to dup EGL native fence sync: %#x", eglGetError());
-    }
-
-    return fenceFd;
-}
-
-void SkiaGLRenderEngine::waitFence(base::borrowed_fd fenceFd) {
+void SkiaGLRenderEngine::waitFence(GrDirectContext*, base::borrowed_fd fenceFd) {
     if (fenceFd.get() >= 0 && !waitGpuFence(fenceFd)) {
         ATRACE_NAME("SkiaGLRenderEngine::waitFence");
         sync_wait(fenceFd.get(), -1);
     }
 }
 
+base::unique_fd SkiaGLRenderEngine::flushAndSubmit(GrDirectContext* grContext) {
+    base::unique_fd drawFence = flush();
+
+    bool requireSync = drawFence.get() < 0;
+    if (requireSync) {
+        ATRACE_BEGIN("Submit(sync=true)");
+    } else {
+        ATRACE_BEGIN("Submit(sync=false)");
+    }
+    bool success = grContext->submit(requireSync);
+    ATRACE_END();
+    if (!success) {
+        ALOGE("Failed to flush RenderEngine commands");
+        // Chances are, something illegal happened (Skia's internal GPU object
+        // doesn't exist, or the context was abandoned).
+        return drawFence;
+    }
+
+    return drawFence;
+}
+
 bool SkiaGLRenderEngine::waitGpuFence(base::borrowed_fd fenceFd) {
     if (!gl::GLExtensions::getInstance().hasNativeFenceSync() ||
         !gl::GLExtensions::getInstance().hasWaitSync()) {
@@ -466,960 +377,29 @@
     return true;
 }
 
-static float toDegrees(uint32_t transform) {
-    switch (transform) {
-        case ui::Transform::ROT_90:
-            return 90.0;
-        case ui::Transform::ROT_180:
-            return 180.0;
-        case ui::Transform::ROT_270:
-            return 270.0;
-        default:
-            return 0.0;
-    }
-}
-
-static SkColorMatrix toSkColorMatrix(const mat4& matrix) {
-    return SkColorMatrix(matrix[0][0], matrix[1][0], matrix[2][0], matrix[3][0], 0, matrix[0][1],
-                         matrix[1][1], matrix[2][1], matrix[3][1], 0, matrix[0][2], matrix[1][2],
-                         matrix[2][2], matrix[3][2], 0, matrix[0][3], matrix[1][3], matrix[2][3],
-                         matrix[3][3], 0);
-}
-
-static bool needsToneMapping(ui::Dataspace sourceDataspace, ui::Dataspace destinationDataspace) {
-    int64_t sourceTransfer = sourceDataspace & HAL_DATASPACE_TRANSFER_MASK;
-    int64_t destTransfer = destinationDataspace & HAL_DATASPACE_TRANSFER_MASK;
-
-    // Treat unsupported dataspaces as srgb
-    if (destTransfer != HAL_DATASPACE_TRANSFER_LINEAR &&
-        destTransfer != HAL_DATASPACE_TRANSFER_HLG &&
-        destTransfer != HAL_DATASPACE_TRANSFER_ST2084) {
-        destTransfer = HAL_DATASPACE_TRANSFER_SRGB;
-    }
-
-    if (sourceTransfer != HAL_DATASPACE_TRANSFER_LINEAR &&
-        sourceTransfer != HAL_DATASPACE_TRANSFER_HLG &&
-        sourceTransfer != HAL_DATASPACE_TRANSFER_ST2084) {
-        sourceTransfer = HAL_DATASPACE_TRANSFER_SRGB;
-    }
-
-    const bool isSourceLinear = sourceTransfer == HAL_DATASPACE_TRANSFER_LINEAR;
-    const bool isSourceSRGB = sourceTransfer == HAL_DATASPACE_TRANSFER_SRGB;
-    const bool isDestLinear = destTransfer == HAL_DATASPACE_TRANSFER_LINEAR;
-    const bool isDestSRGB = destTransfer == HAL_DATASPACE_TRANSFER_SRGB;
-
-    return !(isSourceLinear && isDestSRGB) && !(isSourceSRGB && isDestLinear) &&
-            sourceTransfer != destTransfer;
-}
-
-void SkiaGLRenderEngine::mapExternalTextureBuffer(const sp<GraphicBuffer>& buffer,
-                                                  bool isRenderable) {
-    // Only run this if RE is running on its own thread. This way the access to GL
-    // operations is guaranteed to be happening on the same thread.
-    if (mRenderEngineType != RenderEngineType::SKIA_GL_THREADED) {
-        return;
-    }
-    // We currently don't attempt to map a buffer if the buffer contains protected content
-    // because GPU resources for protected buffers is much more limited.
-    const bool isProtectedBuffer = buffer->getUsage() & GRALLOC_USAGE_PROTECTED;
-    if (isProtectedBuffer) {
-        return;
-    }
+base::unique_fd SkiaGLRenderEngine::flush() {
     ATRACE_CALL();
-
-    // If we were to support caching protected buffers then we will need to switch the
-    // currently bound context if we are not already using the protected context (and subsequently
-    // switch back after the buffer is cached).  However, for non-protected content we can bind
-    // the texture in either GL context because they are initialized with the same share_context
-    // which allows the texture state to be shared between them.
-    auto grContext = getActiveGrContext();
-    auto& cache = mTextureCache;
-
-    std::lock_guard<std::mutex> lock(mRenderingMutex);
-    mGraphicBufferExternalRefs[buffer->getId()]++;
-
-    if (const auto& iter = cache.find(buffer->getId()); iter == cache.end()) {
-        std::shared_ptr<AutoBackendTexture::LocalRef> imageTextureRef =
-                std::make_shared<AutoBackendTexture::LocalRef>(grContext,
-                                                               buffer->toAHardwareBuffer(),
-                                                               isRenderable, mTextureCleanupMgr);
-        cache.insert({buffer->getId(), imageTextureRef});
-    }
-}
-
-void SkiaGLRenderEngine::unmapExternalTextureBuffer(const sp<GraphicBuffer>& buffer) {
-    ATRACE_CALL();
-    std::lock_guard<std::mutex> lock(mRenderingMutex);
-    if (const auto& iter = mGraphicBufferExternalRefs.find(buffer->getId());
-        iter != mGraphicBufferExternalRefs.end()) {
-        if (iter->second == 0) {
-            ALOGW("Attempted to unmap GraphicBuffer <id: %" PRId64
-                  "> from RenderEngine texture, but the "
-                  "ref count was already zero!",
-                  buffer->getId());
-            mGraphicBufferExternalRefs.erase(buffer->getId());
-            return;
-        }
-
-        iter->second--;
-
-        // Swap contexts if needed prior to deleting this buffer
-        // See Issue 1 of
-        // https://www.khronos.org/registry/EGL/extensions/EXT/EGL_EXT_protected_content.txt: even
-        // when a protected context and an unprotected context are part of the same share group,
-        // protected surfaces may not be accessed by an unprotected context, implying that protected
-        // surfaces may only be freed when a protected context is active.
-        const bool inProtected = mInProtectedContext;
-        useProtectedContext(buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
-
-        if (iter->second == 0) {
-            mTextureCache.erase(buffer->getId());
-            mGraphicBufferExternalRefs.erase(buffer->getId());
-        }
-
-        // Swap back to the previous context so that cached values of isProtected in SurfaceFlinger
-        // are up-to-date.
-        if (inProtected != mInProtectedContext) {
-            useProtectedContext(inProtected);
-        }
-    }
-}
-
-bool SkiaGLRenderEngine::canSkipPostRenderCleanup() const {
-    std::lock_guard<std::mutex> lock(mRenderingMutex);
-    return mTextureCleanupMgr.isEmpty();
-}
-
-void SkiaGLRenderEngine::cleanupPostRender() {
-    ATRACE_CALL();
-    std::lock_guard<std::mutex> lock(mRenderingMutex);
-    mTextureCleanupMgr.cleanup();
-}
-
-// Helper class intended to be used on the stack to ensure that texture cleanup
-// is deferred until after this class goes out of scope.
-class DeferTextureCleanup final {
-public:
-    DeferTextureCleanup(AutoBackendTexture::CleanupManager& mgr) : mMgr(mgr) {
-        mMgr.setDeferredStatus(true);
-    }
-    ~DeferTextureCleanup() { mMgr.setDeferredStatus(false); }
-
-private:
-    DISALLOW_COPY_AND_ASSIGN(DeferTextureCleanup);
-    AutoBackendTexture::CleanupManager& mMgr;
-};
-
-sk_sp<SkShader> SkiaGLRenderEngine::createRuntimeEffectShader(
-        const RuntimeEffectShaderParameters& parameters) {
-    // The given surface will be stretched by HWUI via matrix transformation
-    // which gets similar results for most surfaces
-    // Determine later on if we need to leverage the stertch shader within
-    // surface flinger
-    const auto& stretchEffect = parameters.layer.stretchEffect;
-    auto shader = parameters.shader;
-    if (stretchEffect.hasEffect()) {
-        const auto targetBuffer = parameters.layer.source.buffer.buffer;
-        const auto graphicBuffer = targetBuffer ? targetBuffer->getBuffer() : nullptr;
-        if (graphicBuffer && parameters.shader) {
-            shader = mStretchShaderFactory.createSkShader(shader, stretchEffect);
-        }
+    if (!gl::GLExtensions::getInstance().hasNativeFenceSync()) {
+        return base::unique_fd();
     }
 
-    if (parameters.requiresLinearEffect) {
-        const ui::Dataspace inputDataspace = mUseColorManagement ? parameters.layer.sourceDataspace
-                                                                 : ui::Dataspace::V0_SRGB_LINEAR;
-        const ui::Dataspace outputDataspace = mUseColorManagement
-                ? parameters.display.outputDataspace
-                : ui::Dataspace::V0_SRGB_LINEAR;
-
-        auto effect =
-                shaders::LinearEffect{.inputDataspace = inputDataspace,
-                                      .outputDataspace = outputDataspace,
-                                      .undoPremultipliedAlpha = parameters.undoPremultipliedAlpha};
-
-        auto effectIter = mRuntimeEffects.find(effect);
-        sk_sp<SkRuntimeEffect> runtimeEffect = nullptr;
-        if (effectIter == mRuntimeEffects.end()) {
-            runtimeEffect = buildRuntimeEffect(effect);
-            mRuntimeEffects.insert({effect, runtimeEffect});
-        } else {
-            runtimeEffect = effectIter->second;
-        }
-        mat4 colorTransform = parameters.layer.colorTransform;
-
-        colorTransform *=
-                mat4::scale(vec4(parameters.layerDimmingRatio, parameters.layerDimmingRatio,
-                                 parameters.layerDimmingRatio, 1.f));
-        const auto targetBuffer = parameters.layer.source.buffer.buffer;
-        const auto graphicBuffer = targetBuffer ? targetBuffer->getBuffer() : nullptr;
-        const auto hardwareBuffer = graphicBuffer ? graphicBuffer->toAHardwareBuffer() : nullptr;
-        return createLinearEffectShader(parameters.shader, effect, runtimeEffect, colorTransform,
-                                        parameters.display.maxLuminance,
-                                        parameters.display.currentLuminanceNits,
-                                        parameters.layer.source.buffer.maxLuminanceNits,
-                                        hardwareBuffer, parameters.display.renderIntent);
-    }
-    return parameters.shader;
-}
-
-void SkiaGLRenderEngine::initCanvas(SkCanvas* canvas, const DisplaySettings& display) {
-    if (CC_UNLIKELY(mCapture->isCaptureRunning())) {
-        // Record display settings when capture is running.
-        std::stringstream displaySettings;
-        PrintTo(display, &displaySettings);
-        // Store the DisplaySettings in additional information.
-        canvas->drawAnnotation(SkRect::MakeEmpty(), "DisplaySettings",
-                               SkData::MakeWithCString(displaySettings.str().c_str()));
+    EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
+    if (sync == EGL_NO_SYNC_KHR) {
+        ALOGW("failed to create EGL native fence sync: %#x", eglGetError());
+        return base::unique_fd();
     }
 
-    // Before doing any drawing, let's make sure that we'll start at the origin of the display.
-    // Some displays don't start at 0,0 for example when we're mirroring the screen. Also, virtual
-    // displays might have different scaling when compared to the physical screen.
+    // native fence fd will not be populated until flush() is done.
+    glFlush();
 
-    canvas->clipRect(getSkRect(display.physicalDisplay));
-    canvas->translate(display.physicalDisplay.left, display.physicalDisplay.top);
-
-    const auto clipWidth = display.clip.width();
-    const auto clipHeight = display.clip.height();
-    auto rotatedClipWidth = clipWidth;
-    auto rotatedClipHeight = clipHeight;
-    // Scale is contingent on the rotation result.
-    if (display.orientation & ui::Transform::ROT_90) {
-        std::swap(rotatedClipWidth, rotatedClipHeight);
-    }
-    const auto scaleX = static_cast<SkScalar>(display.physicalDisplay.width()) /
-            static_cast<SkScalar>(rotatedClipWidth);
-    const auto scaleY = static_cast<SkScalar>(display.physicalDisplay.height()) /
-            static_cast<SkScalar>(rotatedClipHeight);
-    canvas->scale(scaleX, scaleY);
-
-    // Canvas rotation is done by centering the clip window at the origin, rotating, translating
-    // back so that the top left corner of the clip is at (0, 0).
-    canvas->translate(rotatedClipWidth / 2, rotatedClipHeight / 2);
-    canvas->rotate(toDegrees(display.orientation));
-    canvas->translate(-clipWidth / 2, -clipHeight / 2);
-    canvas->translate(-display.clip.left, -display.clip.top);
-}
-
-class AutoSaveRestore {
-public:
-    AutoSaveRestore(SkCanvas* canvas) : mCanvas(canvas) { mSaveCount = canvas->save(); }
-    ~AutoSaveRestore() { restore(); }
-    void replace(SkCanvas* canvas) {
-        mCanvas = canvas;
-        mSaveCount = canvas->save();
-    }
-    void restore() {
-        if (mCanvas) {
-            mCanvas->restoreToCount(mSaveCount);
-            mCanvas = nullptr;
-        }
+    // get the fence fd
+    base::unique_fd fenceFd(eglDupNativeFenceFDANDROID(mEGLDisplay, sync));
+    eglDestroySyncKHR(mEGLDisplay, sync);
+    if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
+        ALOGW("failed to dup EGL native fence sync: %#x", eglGetError());
     }
 
-private:
-    SkCanvas* mCanvas;
-    int mSaveCount;
-};
-
-static SkRRect getBlurRRect(const BlurRegion& region) {
-    const auto rect = SkRect::MakeLTRB(region.left, region.top, region.right, region.bottom);
-    const SkVector radii[4] = {SkVector::Make(region.cornerRadiusTL, region.cornerRadiusTL),
-                               SkVector::Make(region.cornerRadiusTR, region.cornerRadiusTR),
-                               SkVector::Make(region.cornerRadiusBR, region.cornerRadiusBR),
-                               SkVector::Make(region.cornerRadiusBL, region.cornerRadiusBL)};
-    SkRRect roundedRect;
-    roundedRect.setRectRadii(rect, radii);
-    return roundedRect;
-}
-
-// Arbitrary default margin which should be close enough to zero.
-constexpr float kDefaultMargin = 0.0001f;
-static bool equalsWithinMargin(float expected, float value, float margin = kDefaultMargin) {
-    LOG_ALWAYS_FATAL_IF(margin < 0.f, "Margin is negative!");
-    return std::abs(expected - value) < margin;
-}
-
-namespace {
-template <typename T>
-void logSettings(const T& t) {
-    std::stringstream stream;
-    PrintTo(t, &stream);
-    auto string = stream.str();
-    size_t pos = 0;
-    // Perfetto ignores \n, so split up manually into separate ALOGD statements.
-    const size_t size = string.size();
-    while (pos < size) {
-        const size_t end = std::min(string.find("\n", pos), size);
-        ALOGD("%s", string.substr(pos, end - pos).c_str());
-        pos = end + 1;
-    }
-}
-} // namespace
-
-void SkiaGLRenderEngine::drawLayersInternal(
-        const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
-        const DisplaySettings& display, const std::vector<LayerSettings>& layers,
-        const std::shared_ptr<ExternalTexture>& buffer, const bool /*useFramebufferCache*/,
-        base::unique_fd&& bufferFence) {
-    ATRACE_NAME("SkiaGL::drawLayers");
-
-    std::lock_guard<std::mutex> lock(mRenderingMutex);
-    if (layers.empty()) {
-        ALOGV("Drawing empty layer stack");
-        resultPromise->set_value({NO_ERROR, base::unique_fd()});
-        return;
-    }
-
-    if (buffer == nullptr) {
-        ALOGE("No output buffer provided. Aborting GPU composition.");
-        resultPromise->set_value({BAD_VALUE, base::unique_fd()});
-        return;
-    }
-
-    validateOutputBufferUsage(buffer->getBuffer());
-
-    auto grContext = getActiveGrContext();
-    auto& cache = mTextureCache;
-
-    // any AutoBackendTexture deletions will now be deferred until cleanupPostRender is called
-    DeferTextureCleanup dtc(mTextureCleanupMgr);
-
-    std::shared_ptr<AutoBackendTexture::LocalRef> surfaceTextureRef;
-    if (const auto& it = cache.find(buffer->getBuffer()->getId()); it != cache.end()) {
-        surfaceTextureRef = it->second;
-    } else {
-        surfaceTextureRef =
-                std::make_shared<AutoBackendTexture::LocalRef>(grContext,
-                                                               buffer->getBuffer()
-                                                                       ->toAHardwareBuffer(),
-                                                               true, mTextureCleanupMgr);
-    }
-
-    // wait on the buffer to be ready to use prior to using it
-    waitFence(bufferFence);
-
-    const ui::Dataspace dstDataspace =
-            mUseColorManagement ? display.outputDataspace : ui::Dataspace::V0_SRGB_LINEAR;
-    sk_sp<SkSurface> dstSurface = surfaceTextureRef->getOrCreateSurface(dstDataspace, grContext);
-
-    SkCanvas* dstCanvas = mCapture->tryCapture(dstSurface.get());
-    if (dstCanvas == nullptr) {
-        ALOGE("Cannot acquire canvas from Skia.");
-        resultPromise->set_value({BAD_VALUE, base::unique_fd()});
-        return;
-    }
-
-    // setup color filter if necessary
-    sk_sp<SkColorFilter> displayColorTransform;
-    if (display.colorTransform != mat4() && !display.deviceHandlesColorTransform) {
-        displayColorTransform = SkColorFilters::Matrix(toSkColorMatrix(display.colorTransform));
-    }
-    const bool ctModifiesAlpha =
-            displayColorTransform && !displayColorTransform->isAlphaUnchanged();
-
-    // Find the max layer white point to determine the max luminance of the scene...
-    const float maxLayerWhitePoint = std::transform_reduce(
-            layers.cbegin(), layers.cend(), 0.f,
-            [](float left, float right) { return std::max(left, right); },
-            [&](const auto& l) { return l.whitePointNits; });
-
-    // ...and compute the dimming ratio if dimming is requested
-    const float displayDimmingRatio = display.targetLuminanceNits > 0.f &&
-                    maxLayerWhitePoint > 0.f && display.targetLuminanceNits > maxLayerWhitePoint
-            ? maxLayerWhitePoint / display.targetLuminanceNits
-            : 1.f;
-
-    // Find if any layers have requested blur, we'll use that info to decide when to render to an
-    // offscreen buffer and when to render to the native buffer.
-    sk_sp<SkSurface> activeSurface(dstSurface);
-    SkCanvas* canvas = dstCanvas;
-    SkiaCapture::OffscreenState offscreenCaptureState;
-    const LayerSettings* blurCompositionLayer = nullptr;
-    if (mBlurFilter) {
-        bool requiresCompositionLayer = false;
-        for (const auto& layer : layers) {
-            // if the layer doesn't have blur or it is not visible then continue
-            if (!layerHasBlur(layer, ctModifiesAlpha)) {
-                continue;
-            }
-            if (layer.backgroundBlurRadius > 0 &&
-                layer.backgroundBlurRadius < mBlurFilter->getMaxCrossFadeRadius()) {
-                requiresCompositionLayer = true;
-            }
-            for (auto region : layer.blurRegions) {
-                if (region.blurRadius < mBlurFilter->getMaxCrossFadeRadius()) {
-                    requiresCompositionLayer = true;
-                }
-            }
-            if (requiresCompositionLayer) {
-                activeSurface = dstSurface->makeSurface(dstSurface->imageInfo());
-                canvas = mCapture->tryOffscreenCapture(activeSurface.get(), &offscreenCaptureState);
-                blurCompositionLayer = &layer;
-                break;
-            }
-        }
-    }
-
-    AutoSaveRestore surfaceAutoSaveRestore(canvas);
-    // Clear the entire canvas with a transparent black to prevent ghost images.
-    canvas->clear(SK_ColorTRANSPARENT);
-    initCanvas(canvas, display);
-
-    if (kPrintLayerSettings) {
-        logSettings(display);
-    }
-    for (const auto& layer : layers) {
-        ATRACE_FORMAT("DrawLayer: %s", layer.name.c_str());
-
-        if (kPrintLayerSettings) {
-            logSettings(layer);
-        }
-
-        sk_sp<SkImage> blurInput;
-        if (blurCompositionLayer == &layer) {
-            LOG_ALWAYS_FATAL_IF(activeSurface == dstSurface);
-            LOG_ALWAYS_FATAL_IF(canvas == dstCanvas);
-
-            // save a snapshot of the activeSurface to use as input to the blur shaders
-            blurInput = activeSurface->makeImageSnapshot();
-
-            // blit the offscreen framebuffer into the destination AHB, but only
-            // if there are blur regions. backgroundBlurRadius blurs the entire
-            // image below, so it can skip this step.
-            if (layer.blurRegions.size()) {
-                SkPaint paint;
-                paint.setBlendMode(SkBlendMode::kSrc);
-                if (CC_UNLIKELY(mCapture->isCaptureRunning())) {
-                    uint64_t id = mCapture->endOffscreenCapture(&offscreenCaptureState);
-                    dstCanvas->drawAnnotation(SkRect::Make(dstCanvas->imageInfo().dimensions()),
-                                              String8::format("SurfaceID|%" PRId64, id).c_str(),
-                                              nullptr);
-                    dstCanvas->drawImage(blurInput, 0, 0, SkSamplingOptions(), &paint);
-                } else {
-                    activeSurface->draw(dstCanvas, 0, 0, SkSamplingOptions(), &paint);
-                }
-            }
-
-            // assign dstCanvas to canvas and ensure that the canvas state is up to date
-            canvas = dstCanvas;
-            surfaceAutoSaveRestore.replace(canvas);
-            initCanvas(canvas, display);
-
-            LOG_ALWAYS_FATAL_IF(activeSurface->getCanvas()->getSaveCount() !=
-                                dstSurface->getCanvas()->getSaveCount());
-            LOG_ALWAYS_FATAL_IF(activeSurface->getCanvas()->getTotalMatrix() !=
-                                dstSurface->getCanvas()->getTotalMatrix());
-
-            // assign dstSurface to activeSurface
-            activeSurface = dstSurface;
-        }
-
-        SkAutoCanvasRestore layerAutoSaveRestore(canvas, true);
-        if (CC_UNLIKELY(mCapture->isCaptureRunning())) {
-            // Record the name of the layer if the capture is running.
-            std::stringstream layerSettings;
-            PrintTo(layer, &layerSettings);
-            // Store the LayerSettings in additional information.
-            canvas->drawAnnotation(SkRect::MakeEmpty(), layer.name.c_str(),
-                                   SkData::MakeWithCString(layerSettings.str().c_str()));
-        }
-        // Layers have a local transform that should be applied to them
-        canvas->concat(getSkM44(layer.geometry.positionTransform).asM33());
-
-        const auto [bounds, roundRectClip] =
-                getBoundsAndClip(layer.geometry.boundaries, layer.geometry.roundedCornersCrop,
-                                 layer.geometry.roundedCornersRadius);
-        if (mBlurFilter && layerHasBlur(layer, ctModifiesAlpha)) {
-            std::unordered_map<uint32_t, sk_sp<SkImage>> cachedBlurs;
-
-            // if multiple layers have blur, then we need to take a snapshot now because
-            // only the lowest layer will have blurImage populated earlier
-            if (!blurInput) {
-                blurInput = activeSurface->makeImageSnapshot();
-            }
-            // rect to be blurred in the coordinate space of blurInput
-            const auto blurRect = canvas->getTotalMatrix().mapRect(bounds.rect());
-
-            // if the clip needs to be applied then apply it now and make sure
-            // it is restored before we attempt to draw any shadows.
-            SkAutoCanvasRestore acr(canvas, true);
-            if (!roundRectClip.isEmpty()) {
-                canvas->clipRRect(roundRectClip, true);
-            }
-
-            // TODO(b/182216890): Filter out empty layers earlier
-            if (blurRect.width() > 0 && blurRect.height() > 0) {
-                if (layer.backgroundBlurRadius > 0) {
-                    ATRACE_NAME("BackgroundBlur");
-                    auto blurredImage = mBlurFilter->generate(grContext, layer.backgroundBlurRadius,
-                                                              blurInput, blurRect);
-
-                    cachedBlurs[layer.backgroundBlurRadius] = blurredImage;
-
-                    mBlurFilter->drawBlurRegion(canvas, bounds, layer.backgroundBlurRadius, 1.0f,
-                                                blurRect, blurredImage, blurInput);
-                }
-
-                canvas->concat(getSkM44(layer.blurRegionTransform).asM33());
-                for (auto region : layer.blurRegions) {
-                    if (cachedBlurs[region.blurRadius] == nullptr) {
-                        ATRACE_NAME("BlurRegion");
-                        cachedBlurs[region.blurRadius] =
-                                mBlurFilter->generate(grContext, region.blurRadius, blurInput,
-                                                      blurRect);
-                    }
-
-                    mBlurFilter->drawBlurRegion(canvas, getBlurRRect(region), region.blurRadius,
-                                                region.alpha, blurRect,
-                                                cachedBlurs[region.blurRadius], blurInput);
-                }
-            }
-        }
-
-        if (layer.shadow.length > 0) {
-            // This would require a new parameter/flag to SkShadowUtils::DrawShadow
-            LOG_ALWAYS_FATAL_IF(layer.disableBlending, "Cannot disableBlending with a shadow");
-
-            SkRRect shadowBounds, shadowClip;
-            if (layer.geometry.boundaries == layer.shadow.boundaries) {
-                shadowBounds = bounds;
-                shadowClip = roundRectClip;
-            } else {
-                std::tie(shadowBounds, shadowClip) =
-                        getBoundsAndClip(layer.shadow.boundaries, layer.geometry.roundedCornersCrop,
-                                         layer.geometry.roundedCornersRadius);
-            }
-
-            // Technically, if bounds is a rect and roundRectClip is not empty,
-            // it means that the bounds and roundedCornersCrop were different
-            // enough that we should intersect them to find the proper shadow.
-            // In practice, this often happens when the two rectangles appear to
-            // not match due to rounding errors. Draw the rounded version, which
-            // looks more like the intent.
-            const auto& rrect =
-                    shadowBounds.isRect() && !shadowClip.isEmpty() ? shadowClip : shadowBounds;
-            drawShadow(canvas, rrect, layer.shadow);
-        }
-
-        const float layerDimmingRatio = layer.whitePointNits <= 0.f
-                ? displayDimmingRatio
-                : (layer.whitePointNits / maxLayerWhitePoint) * displayDimmingRatio;
-
-        const bool dimInLinearSpace = display.dimmingStage !=
-                aidl::android::hardware::graphics::composer3::DimmingStage::GAMMA_OETF;
-
-        const bool requiresLinearEffect = layer.colorTransform != mat4() ||
-                (mUseColorManagement &&
-                 needsToneMapping(layer.sourceDataspace, display.outputDataspace)) ||
-                (dimInLinearSpace && !equalsWithinMargin(1.f, layerDimmingRatio));
-
-        // quick abort from drawing the remaining portion of the layer
-        if (layer.skipContentDraw ||
-            (layer.alpha == 0 && !requiresLinearEffect && !layer.disableBlending &&
-             (!displayColorTransform || displayColorTransform->isAlphaUnchanged()))) {
-            continue;
-        }
-
-        // If we need to map to linear space or color management is disabled, then mark the source
-        // image with the same colorspace as the destination surface so that Skia's color
-        // management is a no-op.
-        const ui::Dataspace layerDataspace = (!mUseColorManagement || requiresLinearEffect)
-                ? dstDataspace
-                : layer.sourceDataspace;
-
-        SkPaint paint;
-        if (layer.source.buffer.buffer) {
-            ATRACE_NAME("DrawImage");
-            validateInputBufferUsage(layer.source.buffer.buffer->getBuffer());
-            const auto& item = layer.source.buffer;
-            std::shared_ptr<AutoBackendTexture::LocalRef> imageTextureRef = nullptr;
-
-            if (const auto& iter = cache.find(item.buffer->getBuffer()->getId());
-                iter != cache.end()) {
-                imageTextureRef = iter->second;
-            } else {
-                // If we didn't find the image in the cache, then create a local ref but don't cache
-                // it. If we're using skia, we're guaranteed to run on a dedicated GPU thread so if
-                // we didn't find anything in the cache then we intentionally did not cache this
-                // buffer's resources.
-                imageTextureRef = std::make_shared<
-                        AutoBackendTexture::LocalRef>(grContext,
-                                                      item.buffer->getBuffer()->toAHardwareBuffer(),
-                                                      false, mTextureCleanupMgr);
-            }
-
-            // if the layer's buffer has a fence, then we must must respect the fence prior to using
-            // the buffer.
-            if (layer.source.buffer.fence != nullptr) {
-                waitFence(layer.source.buffer.fence->get());
-            }
-
-            // isOpaque means we need to ignore the alpha in the image,
-            // replacing it with the alpha specified by the LayerSettings. See
-            // https://developer.android.com/reference/android/view/SurfaceControl.Builder#setOpaque(boolean)
-            // The proper way to do this is to use an SkColorType that ignores
-            // alpha, like kRGB_888x_SkColorType, and that is used if the
-            // incoming image is kRGBA_8888_SkColorType. However, the incoming
-            // image may be kRGBA_F16_SkColorType, for which there is no RGBX
-            // SkColorType, or kRGBA_1010102_SkColorType, for which we have
-            // kRGB_101010x_SkColorType, but it is not yet supported as a source
-            // on the GPU. (Adding both is tracked in skbug.com/12048.) In the
-            // meantime, we'll use a workaround that works unless we need to do
-            // any color conversion. The workaround requires that we pretend the
-            // image is already premultiplied, so that we do not premultiply it
-            // before applying SkBlendMode::kPlus.
-            const bool useIsOpaqueWorkaround = item.isOpaque &&
-                    (imageTextureRef->colorType() == kRGBA_1010102_SkColorType ||
-                     imageTextureRef->colorType() == kRGBA_F16_SkColorType);
-            const auto alphaType = useIsOpaqueWorkaround ? kPremul_SkAlphaType
-                    : item.isOpaque                      ? kOpaque_SkAlphaType
-                    : item.usePremultipliedAlpha         ? kPremul_SkAlphaType
-                                                         : kUnpremul_SkAlphaType;
-            sk_sp<SkImage> image = imageTextureRef->makeImage(layerDataspace, alphaType, grContext);
-
-            auto texMatrix = getSkM44(item.textureTransform).asM33();
-            // textureTansform was intended to be passed directly into a shader, so when
-            // building the total matrix with the textureTransform we need to first
-            // normalize it, then apply the textureTransform, then scale back up.
-            texMatrix.preScale(1.0f / bounds.width(), 1.0f / bounds.height());
-            texMatrix.postScale(image->width(), image->height());
-
-            SkMatrix matrix;
-            if (!texMatrix.invert(&matrix)) {
-                matrix = texMatrix;
-            }
-            // The shader does not respect the translation, so we add it to the texture
-            // transform for the SkImage. This will make sure that the correct layer contents
-            // are drawn in the correct part of the screen.
-            matrix.postTranslate(bounds.rect().fLeft, bounds.rect().fTop);
-
-            sk_sp<SkShader> shader;
-
-            if (layer.source.buffer.useTextureFiltering) {
-                shader = image->makeShader(SkTileMode::kClamp, SkTileMode::kClamp,
-                                           SkSamplingOptions(
-                                                   {SkFilterMode::kLinear, SkMipmapMode::kNone}),
-                                           &matrix);
-            } else {
-                shader = image->makeShader(SkSamplingOptions(), matrix);
-            }
-
-            if (useIsOpaqueWorkaround) {
-                shader = SkShaders::Blend(SkBlendMode::kPlus, shader,
-                                          SkShaders::Color(SkColors::kBlack,
-                                                           toSkColorSpace(layerDataspace)));
-            }
-
-            paint.setShader(createRuntimeEffectShader(
-                    RuntimeEffectShaderParameters{.shader = shader,
-                                                  .layer = layer,
-                                                  .display = display,
-                                                  .undoPremultipliedAlpha = !item.isOpaque &&
-                                                          item.usePremultipliedAlpha,
-                                                  .requiresLinearEffect = requiresLinearEffect,
-                                                  .layerDimmingRatio = dimInLinearSpace
-                                                          ? layerDimmingRatio
-                                                          : 1.f}));
-
-            // Turn on dithering when dimming beyond this (arbitrary) threshold...
-            static constexpr float kDimmingThreshold = 0.2f;
-            // ...or we're rendering an HDR layer down to an 8-bit target
-            // Most HDR standards require at least 10-bits of color depth for source content, so we
-            // can just extract the transfer function rather than dig into precise gralloc layout.
-            // Furthermore, we can assume that the only 8-bit target we support is RGBA8888.
-            const bool requiresDownsample = isHdrDataspace(layer.sourceDataspace) &&
-                    buffer->getPixelFormat() == PIXEL_FORMAT_RGBA_8888;
-            if (layerDimmingRatio <= kDimmingThreshold || requiresDownsample) {
-                paint.setDither(true);
-            }
-            paint.setAlphaf(layer.alpha);
-
-            if (imageTextureRef->colorType() == kAlpha_8_SkColorType) {
-                LOG_ALWAYS_FATAL_IF(layer.disableBlending, "Cannot disableBlending with A8");
-
-                // SysUI creates the alpha layer as a coverage layer, which is
-                // appropriate for the DPU. Use a color matrix to convert it to
-                // a mask.
-                // TODO (b/219525258): Handle input as a mask.
-                //
-                // The color matrix will convert A8 pixels with no alpha to
-                // black, as described by this vector. If the display handles
-                // the color transform, we need to invert it to find the color
-                // that will result in black after the DPU applies the transform.
-                SkV4 black{0.0f, 0.0f, 0.0f, 1.0f}; // r, g, b, a
-                if (display.colorTransform != mat4() && display.deviceHandlesColorTransform) {
-                    SkM44 colorSpaceMatrix = getSkM44(display.colorTransform);
-                    if (colorSpaceMatrix.invert(&colorSpaceMatrix)) {
-                        black = colorSpaceMatrix * black;
-                    } else {
-                        // We'll just have to use 0,0,0 as black, which should
-                        // be close to correct.
-                        ALOGI("Could not invert colorTransform!");
-                    }
-                }
-                SkColorMatrix colorMatrix(0, 0, 0, 0, black[0],
-                                          0, 0, 0, 0, black[1],
-                                          0, 0, 0, 0, black[2],
-                                          0, 0, 0, -1, 1);
-                if (display.colorTransform != mat4() && !display.deviceHandlesColorTransform) {
-                    // On the other hand, if the device doesn't handle it, we
-                    // have to apply it ourselves.
-                    colorMatrix.postConcat(toSkColorMatrix(display.colorTransform));
-                }
-                paint.setColorFilter(SkColorFilters::Matrix(colorMatrix));
-            }
-        } else {
-            ATRACE_NAME("DrawColor");
-            const auto color = layer.source.solidColor;
-            sk_sp<SkShader> shader = SkShaders::Color(SkColor4f{.fR = color.r,
-                                                                .fG = color.g,
-                                                                .fB = color.b,
-                                                                .fA = layer.alpha},
-                                                      toSkColorSpace(layerDataspace));
-            paint.setShader(createRuntimeEffectShader(
-                    RuntimeEffectShaderParameters{.shader = shader,
-                                                  .layer = layer,
-                                                  .display = display,
-                                                  .undoPremultipliedAlpha = false,
-                                                  .requiresLinearEffect = requiresLinearEffect,
-                                                  .layerDimmingRatio = layerDimmingRatio}));
-        }
-
-        if (layer.disableBlending) {
-            paint.setBlendMode(SkBlendMode::kSrc);
-        }
-
-        // An A8 buffer will already have the proper color filter attached to
-        // its paint, including the displayColorTransform as needed.
-        if (!paint.getColorFilter()) {
-            if (!dimInLinearSpace && !equalsWithinMargin(1.0, layerDimmingRatio)) {
-                // If we don't dim in linear space, then when we gamma correct the dimming ratio we
-                // can assume a gamma 2.2 transfer function.
-                static constexpr float kInverseGamma22 = 1.f / 2.2f;
-                const auto gammaCorrectedDimmingRatio =
-                        std::pow(layerDimmingRatio, kInverseGamma22);
-                auto dimmingMatrix =
-                        mat4::scale(vec4(gammaCorrectedDimmingRatio, gammaCorrectedDimmingRatio,
-                                         gammaCorrectedDimmingRatio, 1.f));
-
-                const auto colorFilter =
-                        SkColorFilters::Matrix(toSkColorMatrix(std::move(dimmingMatrix)));
-                paint.setColorFilter(displayColorTransform
-                                             ? displayColorTransform->makeComposed(colorFilter)
-                                             : colorFilter);
-            } else {
-                paint.setColorFilter(displayColorTransform);
-            }
-        }
-
-        if (!roundRectClip.isEmpty()) {
-            canvas->clipRRect(roundRectClip, true);
-        }
-
-        if (!bounds.isRect()) {
-            paint.setAntiAlias(true);
-            canvas->drawRRect(bounds, paint);
-        } else {
-            canvas->drawRect(bounds.rect(), paint);
-        }
-        if (kFlushAfterEveryLayer) {
-            ATRACE_NAME("flush surface");
-            activeSurface->flush();
-        }
-    }
-    surfaceAutoSaveRestore.restore();
-    mCapture->endCapture();
-    {
-        ATRACE_NAME("flush surface");
-        LOG_ALWAYS_FATAL_IF(activeSurface != dstSurface);
-        activeSurface->flush();
-    }
-
-    base::unique_fd drawFence = flush();
-
-    // If flush failed or we don't support native fences, we need to force the
-    // gl command stream to be executed.
-    bool requireSync = drawFence.get() < 0;
-    if (requireSync) {
-        ATRACE_BEGIN("Submit(sync=true)");
-    } else {
-        ATRACE_BEGIN("Submit(sync=false)");
-    }
-    bool success = grContext->submit(requireSync);
-    ATRACE_END();
-    if (!success) {
-        ALOGE("Failed to flush RenderEngine commands");
-        // Chances are, something illegal happened (either the caller passed
-        // us bad parameters, or we messed up our shader generation).
-        resultPromise->set_value({INVALID_OPERATION, std::move(drawFence)});
-        return;
-    }
-
-    // checkErrors();
-    resultPromise->set_value({NO_ERROR, std::move(drawFence)});
-    return;
-}
-
-inline SkRect SkiaGLRenderEngine::getSkRect(const FloatRect& rect) {
-    return SkRect::MakeLTRB(rect.left, rect.top, rect.right, rect.bottom);
-}
-
-inline SkRect SkiaGLRenderEngine::getSkRect(const Rect& rect) {
-    return SkRect::MakeLTRB(rect.left, rect.top, rect.right, rect.bottom);
-}
-
-/**
- *  Verifies that common, simple bounds + clip combinations can be converted into
- *  a single RRect draw call returning true if possible. If true the radii parameter
- *  will be filled with the correct radii values that combined with bounds param will
- *  produce the insected roundRect. If false, the returned state of the radii param is undefined.
- */
-static bool intersectionIsRoundRect(const SkRect& bounds, const SkRect& crop,
-                                    const SkRect& insetCrop, const vec2& cornerRadius,
-                                    SkVector radii[4]) {
-    const bool leftEqual = bounds.fLeft == crop.fLeft;
-    const bool topEqual = bounds.fTop == crop.fTop;
-    const bool rightEqual = bounds.fRight == crop.fRight;
-    const bool bottomEqual = bounds.fBottom == crop.fBottom;
-
-    // In the event that the corners of the bounds only partially align with the crop we
-    // need to ensure that the resulting shape can still be represented as a round rect.
-    // In particular the round rect implementation will scale the value of all corner radii
-    // if the sum of the radius along any edge is greater than the length of that edge.
-    // See https://www.w3.org/TR/css-backgrounds-3/#corner-overlap
-    const bool requiredWidth = bounds.width() > (cornerRadius.x * 2);
-    const bool requiredHeight = bounds.height() > (cornerRadius.y * 2);
-    if (!requiredWidth || !requiredHeight) {
-        return false;
-    }
-
-    // Check each cropped corner to ensure that it exactly matches the crop or its corner is
-    // contained within the cropped shape and does not need rounded.
-    // compute the UpperLeft corner radius
-    if (leftEqual && topEqual) {
-        radii[0].set(cornerRadius.x, cornerRadius.y);
-    } else if ((leftEqual && bounds.fTop >= insetCrop.fTop) ||
-               (topEqual && bounds.fLeft >= insetCrop.fLeft)) {
-        radii[0].set(0, 0);
-    } else {
-        return false;
-    }
-    // compute the UpperRight corner radius
-    if (rightEqual && topEqual) {
-        radii[1].set(cornerRadius.x, cornerRadius.y);
-    } else if ((rightEqual && bounds.fTop >= insetCrop.fTop) ||
-               (topEqual && bounds.fRight <= insetCrop.fRight)) {
-        radii[1].set(0, 0);
-    } else {
-        return false;
-    }
-    // compute the BottomRight corner radius
-    if (rightEqual && bottomEqual) {
-        radii[2].set(cornerRadius.x, cornerRadius.y);
-    } else if ((rightEqual && bounds.fBottom <= insetCrop.fBottom) ||
-               (bottomEqual && bounds.fRight <= insetCrop.fRight)) {
-        radii[2].set(0, 0);
-    } else {
-        return false;
-    }
-    // compute the BottomLeft corner radius
-    if (leftEqual && bottomEqual) {
-        radii[3].set(cornerRadius.x, cornerRadius.y);
-    } else if ((leftEqual && bounds.fBottom <= insetCrop.fBottom) ||
-               (bottomEqual && bounds.fLeft >= insetCrop.fLeft)) {
-        radii[3].set(0, 0);
-    } else {
-        return false;
-    }
-
-    return true;
-}
-
-inline std::pair<SkRRect, SkRRect> SkiaGLRenderEngine::getBoundsAndClip(const FloatRect& boundsRect,
-                                                                        const FloatRect& cropRect,
-                                                                        const vec2& cornerRadius) {
-    const SkRect bounds = getSkRect(boundsRect);
-    const SkRect crop = getSkRect(cropRect);
-
-    SkRRect clip;
-    if (cornerRadius.x > 0 && cornerRadius.y > 0) {
-        // it the crop and the bounds are equivalent or there is no crop then we don't need a clip
-        if (bounds == crop || crop.isEmpty()) {
-            return {SkRRect::MakeRectXY(bounds, cornerRadius.x, cornerRadius.y), clip};
-        }
-
-        // This makes an effort to speed up common, simple bounds + clip combinations by
-        // converting them to a single RRect draw. It is possible there are other cases
-        // that can be converted.
-        if (crop.contains(bounds)) {
-            const auto insetCrop = crop.makeInset(cornerRadius.x, cornerRadius.y);
-            if (insetCrop.contains(bounds)) {
-                return {SkRRect::MakeRect(bounds), clip}; // clip is empty - no rounding required
-            }
-
-            SkVector radii[4];
-            if (intersectionIsRoundRect(bounds, crop, insetCrop, cornerRadius, radii)) {
-                SkRRect intersectionBounds;
-                intersectionBounds.setRectRadii(bounds, radii);
-                return {intersectionBounds, clip};
-            }
-        }
-
-        // we didn't hit any of our fast paths so set the clip to the cropRect
-        clip.setRectXY(crop, cornerRadius.x, cornerRadius.y);
-    }
-
-    // if we hit this point then we either don't have rounded corners or we are going to rely
-    // on the clip to round the corners for us
-    return {SkRRect::MakeRect(bounds), clip};
-}
-
-inline bool SkiaGLRenderEngine::layerHasBlur(const LayerSettings& layer,
-                                             bool colorTransformModifiesAlpha) {
-    if (layer.backgroundBlurRadius > 0 || layer.blurRegions.size()) {
-        // return false if the content is opaque and would therefore occlude the blur
-        const bool opaqueContent = !layer.source.buffer.buffer || layer.source.buffer.isOpaque;
-        const bool opaqueAlpha = layer.alpha == 1.0f && !colorTransformModifiesAlpha;
-        return layer.skipContentDraw || !(opaqueContent && opaqueAlpha);
-    }
-    return false;
-}
-
-inline SkColor SkiaGLRenderEngine::getSkColor(const vec4& color) {
-    return SkColorSetARGB(color.a * 255, color.r * 255, color.g * 255, color.b * 255);
-}
-
-inline SkM44 SkiaGLRenderEngine::getSkM44(const mat4& matrix) {
-    return SkM44(matrix[0][0], matrix[1][0], matrix[2][0], matrix[3][0],
-                 matrix[0][1], matrix[1][1], matrix[2][1], matrix[3][1],
-                 matrix[0][2], matrix[1][2], matrix[2][2], matrix[3][2],
-                 matrix[0][3], matrix[1][3], matrix[2][3], matrix[3][3]);
-}
-
-inline SkPoint3 SkiaGLRenderEngine::getSkPoint3(const vec3& vector) {
-    return SkPoint3::Make(vector.x, vector.y, vector.z);
-}
-
-size_t SkiaGLRenderEngine::getMaxTextureSize() const {
-    return mGrContext->maxTextureSize();
-}
-
-size_t SkiaGLRenderEngine::getMaxViewportDims() const {
-    return mGrContext->maxRenderTargetSize();
-}
-
-void SkiaGLRenderEngine::drawShadow(SkCanvas* canvas, const SkRRect& casterRRect,
-                                    const ShadowSettings& settings) {
-    ATRACE_CALL();
-    const float casterZ = settings.length / 2.0f;
-    const auto flags =
-            settings.casterIsTranslucent ? kTransparentOccluder_ShadowFlag : kNone_ShadowFlag;
-
-    SkShadowUtils::DrawShadow(canvas, SkPath::RRect(casterRRect), SkPoint3::Make(0, 0, casterZ),
-                              getSkPoint3(settings.lightPos), settings.lightRadius,
-                              getSkColor(settings.ambientColor), getSkColor(settings.spotColor),
-                              flags);
+    return fenceFd;
 }
 
 EGLContext SkiaGLRenderEngine::createEglContext(EGLDisplay display, EGLConfig config,
@@ -1539,114 +519,14 @@
     return value;
 }
 
-void SkiaGLRenderEngine::onActiveDisplaySizeChanged(ui::Size size) {
-    // This cache multiplier was selected based on review of cache sizes relative
-    // to the screen resolution. Looking at the worst case memory needed by blur (~1.5x),
-    // shadows (~1x), and general data structures (e.g. vertex buffers) we selected this as a
-    // conservative default based on that analysis.
-    const float SURFACE_SIZE_MULTIPLIER = 3.5f * bytesPerPixel(mDefaultPixelFormat);
-    const int maxResourceBytes = size.width * size.height * SURFACE_SIZE_MULTIPLIER;
-
-    // start by resizing the current context
-    getActiveGrContext()->setResourceCacheLimit(maxResourceBytes);
-
-    // if it is possible to switch contexts then we will resize the other context
-    const bool originalProtectedState = mInProtectedContext;
-    useProtectedContext(!mInProtectedContext);
-    if (mInProtectedContext != originalProtectedState) {
-        getActiveGrContext()->setResourceCacheLimit(maxResourceBytes);
-        // reset back to the initial context that was active when this method was called
-        useProtectedContext(originalProtectedState);
-    }
-}
-
-void SkiaGLRenderEngine::dump(std::string& result) {
+void SkiaGLRenderEngine::appendBackendSpecificInfoToDump(std::string& result) {
     const gl::GLExtensions& extensions = gl::GLExtensions::getInstance();
-
-    StringAppendF(&result, "\n ------------RE-----------------\n");
+    StringAppendF(&result, "\n ------------RE GLES------------\n");
     StringAppendF(&result, "EGL implementation : %s\n", extensions.getEGLVersion());
     StringAppendF(&result, "%s\n", extensions.getEGLExtensions());
     StringAppendF(&result, "GLES: %s, %s, %s\n", extensions.getVendor(), extensions.getRenderer(),
                   extensions.getVersion());
     StringAppendF(&result, "%s\n", extensions.getExtensions());
-    StringAppendF(&result, "RenderEngine supports protected context: %d\n",
-                  supportsProtectedContent());
-    StringAppendF(&result, "RenderEngine is in protected context: %d\n", mInProtectedContext);
-    StringAppendF(&result, "RenderEngine shaders cached since last dump/primeCache: %d\n",
-                  mSkSLCacheMonitor.shadersCachedSinceLastCall());
-
-    std::vector<ResourcePair> cpuResourceMap = {
-            {"skia/sk_resource_cache/bitmap_", "Bitmaps"},
-            {"skia/sk_resource_cache/rrect-blur_", "Masks"},
-            {"skia/sk_resource_cache/rects-blur_", "Masks"},
-            {"skia/sk_resource_cache/tessellated", "Shadows"},
-            {"skia", "Other"},
-    };
-    SkiaMemoryReporter cpuReporter(cpuResourceMap, false);
-    SkGraphics::DumpMemoryStatistics(&cpuReporter);
-    StringAppendF(&result, "Skia CPU Caches: ");
-    cpuReporter.logTotals(result);
-    cpuReporter.logOutput(result);
-
-    {
-        std::lock_guard<std::mutex> lock(mRenderingMutex);
-
-        std::vector<ResourcePair> gpuResourceMap = {
-                {"texture_renderbuffer", "Texture/RenderBuffer"},
-                {"texture", "Texture"},
-                {"gr_text_blob_cache", "Text"},
-                {"skia", "Other"},
-        };
-        SkiaMemoryReporter gpuReporter(gpuResourceMap, true);
-        mGrContext->dumpMemoryStatistics(&gpuReporter);
-        StringAppendF(&result, "Skia's GPU Caches: ");
-        gpuReporter.logTotals(result);
-        gpuReporter.logOutput(result);
-        StringAppendF(&result, "Skia's Wrapped Objects:\n");
-        gpuReporter.logOutput(result, true);
-
-        StringAppendF(&result, "RenderEngine tracked buffers: %zu\n",
-                      mGraphicBufferExternalRefs.size());
-        StringAppendF(&result, "Dumping buffer ids...\n");
-        for (const auto& [id, refCounts] : mGraphicBufferExternalRefs) {
-            StringAppendF(&result, "- 0x%" PRIx64 " - %d refs \n", id, refCounts);
-        }
-        StringAppendF(&result, "RenderEngine AHB/BackendTexture cache size: %zu\n",
-                      mTextureCache.size());
-        StringAppendF(&result, "Dumping buffer ids...\n");
-        // TODO(178539829): It would be nice to know which layer these are coming from and what
-        // the texture sizes are.
-        for (const auto& [id, unused] : mTextureCache) {
-            StringAppendF(&result, "- 0x%" PRIx64 "\n", id);
-        }
-        StringAppendF(&result, "\n");
-
-        SkiaMemoryReporter gpuProtectedReporter(gpuResourceMap, true);
-        if (mProtectedGrContext) {
-            mProtectedGrContext->dumpMemoryStatistics(&gpuProtectedReporter);
-        }
-        StringAppendF(&result, "Skia's GPU Protected Caches: ");
-        gpuProtectedReporter.logTotals(result);
-        gpuProtectedReporter.logOutput(result);
-        StringAppendF(&result, "Skia's Protected Wrapped Objects:\n");
-        gpuProtectedReporter.logOutput(result, true);
-
-        StringAppendF(&result, "\n");
-        StringAppendF(&result, "RenderEngine runtime effects: %zu\n", mRuntimeEffects.size());
-        for (const auto& [linearEffect, unused] : mRuntimeEffects) {
-            StringAppendF(&result, "- inputDataspace: %s\n",
-                          dataspaceDetails(
-                                  static_cast<android_dataspace>(linearEffect.inputDataspace))
-                                  .c_str());
-            StringAppendF(&result, "- outputDataspace: %s\n",
-                          dataspaceDetails(
-                                  static_cast<android_dataspace>(linearEffect.outputDataspace))
-                                  .c_str());
-            StringAppendF(&result, "undoPremultipliedAlpha: %s\n",
-                          linearEffect.undoPremultipliedAlpha ? "true" : "false");
-        }
-    }
-    StringAppendF(&result, "\n");
 }
 
 } // namespace skia
diff --git a/libs/renderengine/skia/SkiaGLRenderEngine.h b/libs/renderengine/skia/SkiaGLRenderEngine.h
index 68c3363..af33110 100644
--- a/libs/renderengine/skia/SkiaGLRenderEngine.h
+++ b/libs/renderengine/skia/SkiaGLRenderEngine.h
@@ -41,6 +41,10 @@
 #include "filters/LinearEffect.h"
 #include "filters/StretchShaderFactory.h"
 
+class SkData;
+
+struct SkPoint3;
+
 namespace android {
 namespace renderengine {
 namespace skia {
@@ -48,36 +52,26 @@
 class SkiaGLRenderEngine : public skia::SkiaRenderEngine {
 public:
     static std::unique_ptr<SkiaGLRenderEngine> create(const RenderEngineCreationArgs& args);
+    ~SkiaGLRenderEngine() override;
+
+    int getContextPriority() override;
+
+protected:
+    // Implementations of abstract SkiaRenderEngine functions specific to
+    // rendering backend
+    virtual SkiaRenderEngine::Contexts createDirectContexts(const GrContextOptions& options);
+    bool supportsProtectedContentImpl() const override;
+    bool useProtectedContextImpl(GrProtected isProtected) override;
+    void waitFence(GrDirectContext* grContext, base::borrowed_fd fenceFd) override;
+    base::unique_fd flushAndSubmit(GrDirectContext* context) override;
+    void appendBackendSpecificInfoToDump(std::string& result) override;
+
+private:
     SkiaGLRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display, EGLContext ctxt,
                        EGLSurface placeholder, EGLContext protectedContext,
                        EGLSurface protectedPlaceholder);
-    ~SkiaGLRenderEngine() override EXCLUDES(mRenderingMutex);
-
-    std::future<void> primeCache() override;
-    void cleanupPostRender() override;
-    void cleanFramebufferCache() override{};
-    int getContextPriority() override;
-    bool isProtected() const override { return mInProtectedContext; }
-    bool supportsProtectedContent() const override;
-    void useProtectedContext(bool useProtectedContext) override;
-    bool supportsBackgroundBlur() override { return mBlurFilter != nullptr; }
-    void onActiveDisplaySizeChanged(ui::Size size) override;
-    int reportShadersCompiled() override;
-
-protected:
-    void dump(std::string& result) override;
-    size_t getMaxTextureSize() const override;
-    size_t getMaxViewportDims() const override;
-    void mapExternalTextureBuffer(const sp<GraphicBuffer>& buffer, bool isRenderable) override;
-    void unmapExternalTextureBuffer(const sp<GraphicBuffer>& buffer) override;
-    bool canSkipPostRenderCleanup() const override;
-    void drawLayersInternal(const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
-                            const DisplaySettings& display,
-                            const std::vector<LayerSettings>& layers,
-                            const std::shared_ptr<ExternalTexture>& buffer,
-                            const bool useFramebufferCache, base::unique_fd&& bufferFence) override;
-
-private:
+    bool waitGpuFence(base::borrowed_fd fenceFd);
+    base::unique_fd flush();
     static EGLConfig chooseEglConfig(EGLDisplay display, int format, bool logConfig);
     static EGLContext createEglContext(EGLDisplay display, EGLConfig config,
                                        EGLContext shareContext,
@@ -85,107 +79,14 @@
                                        Protection protection);
     static std::optional<RenderEngine::ContextPriority> createContextPriority(
             const RenderEngineCreationArgs& args);
-    static EGLSurface createPlaceholderEglPbufferSurface(EGLDisplay display, EGLConfig config,
-                                                         int hwcFormat, Protection protection);
-    inline SkRect getSkRect(const FloatRect& layer);
-    inline SkRect getSkRect(const Rect& layer);
-    inline std::pair<SkRRect, SkRRect> getBoundsAndClip(const FloatRect& bounds,
-                                                        const FloatRect& crop,
-                                                        const vec2& cornerRadius);
-    inline bool layerHasBlur(const LayerSettings& layer, bool colorTransformModifiesAlpha);
-    inline SkColor getSkColor(const vec4& color);
-    inline SkM44 getSkM44(const mat4& matrix);
-    inline SkPoint3 getSkPoint3(const vec3& vector);
-    inline GrDirectContext* getActiveGrContext() const;
-
-    base::unique_fd flush();
-    // waitFence attempts to wait in the GPU, and if unable to waits on the CPU instead.
-    void waitFence(base::borrowed_fd fenceFd);
-    bool waitGpuFence(base::borrowed_fd fenceFd);
-
-    void initCanvas(SkCanvas* canvas, const DisplaySettings& display);
-    void drawShadow(SkCanvas* canvas, const SkRRect& casterRRect,
-                    const ShadowSettings& shadowSettings);
-
-    // If requiresLinearEffect is true or the layer has a stretchEffect a new shader is returned.
-    // Otherwise it returns the input shader.
-    struct RuntimeEffectShaderParameters {
-        sk_sp<SkShader> shader;
-        const LayerSettings& layer;
-        const DisplaySettings& display;
-        bool undoPremultipliedAlpha;
-        bool requiresLinearEffect;
-        float layerDimmingRatio;
-    };
-    sk_sp<SkShader> createRuntimeEffectShader(const RuntimeEffectShaderParameters&);
+    static EGLSurface createPlaceholderEglPbufferSurface(
+            EGLDisplay display, EGLConfig config, int hwcFormat, Protection protection);
 
     EGLDisplay mEGLDisplay;
     EGLContext mEGLContext;
     EGLSurface mPlaceholderSurface;
     EGLContext mProtectedEGLContext;
     EGLSurface mProtectedPlaceholderSurface;
-    BlurFilter* mBlurFilter = nullptr;
-
-    const PixelFormat mDefaultPixelFormat;
-    const bool mUseColorManagement;
-
-    // Identifier used or various mappings of layers to various
-    // textures or shaders
-    using GraphicBufferId = uint64_t;
-
-    // Number of external holders of ExternalTexture references, per GraphicBuffer ID.
-    std::unordered_map<GraphicBufferId, int32_t> mGraphicBufferExternalRefs
-            GUARDED_BY(mRenderingMutex);
-    // Cache of GL textures that we'll store per GraphicBuffer ID, shared between GPU contexts.
-    std::unordered_map<GraphicBufferId, std::shared_ptr<AutoBackendTexture::LocalRef>> mTextureCache
-            GUARDED_BY(mRenderingMutex);
-    std::unordered_map<shaders::LinearEffect, sk_sp<SkRuntimeEffect>, shaders::LinearEffectHasher>
-            mRuntimeEffects;
-    AutoBackendTexture::CleanupManager mTextureCleanupMgr GUARDED_BY(mRenderingMutex);
-
-    StretchShaderFactory mStretchShaderFactory;
-    // Mutex guarding rendering operations, so that:
-    // 1. GL operations aren't interleaved, and
-    // 2. Internal state related to rendering that is potentially modified by
-    // multiple threads is guaranteed thread-safe.
-    mutable std::mutex mRenderingMutex;
-
-    sp<Fence> mLastDrawFence;
-
-    // Graphics context used for creating surfaces and submitting commands
-    sk_sp<GrDirectContext> mGrContext;
-    // Same as above, but for protected content (eg. DRM)
-    sk_sp<GrDirectContext> mProtectedGrContext;
-
-    bool mInProtectedContext = false;
-    // Object to capture commands send to Skia.
-    std::unique_ptr<SkiaCapture> mCapture;
-
-    // Implements PersistentCache as a way to monitor what SkSL shaders Skia has
-    // cached.
-    class SkSLCacheMonitor : public GrContextOptions::PersistentCache {
-    public:
-        SkSLCacheMonitor() = default;
-        ~SkSLCacheMonitor() override = default;
-
-        sk_sp<SkData> load(const SkData& key) override;
-
-        void store(const SkData& key, const SkData& data, const SkString& description) override;
-
-        int shadersCachedSinceLastCall() {
-            const int shadersCachedSinceLastCall = mShadersCachedSinceLastCall;
-            mShadersCachedSinceLastCall = 0;
-            return shadersCachedSinceLastCall;
-        }
-
-        int totalShadersCompiled() const { return mTotalShadersCompiled; }
-
-    private:
-        int mShadersCachedSinceLastCall = 0;
-        int mTotalShadersCompiled = 0;
-    };
-
-    SkSLCacheMonitor mSkSLCacheMonitor;
 };
 
 } // namespace skia
diff --git a/libs/renderengine/skia/SkiaRenderEngine.cpp b/libs/renderengine/skia/SkiaRenderEngine.cpp
index 1fb24f5..fda6ea1 100644
--- a/libs/renderengine/skia/SkiaRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaRenderEngine.cpp
@@ -20,16 +20,1261 @@
 
 #include "SkiaRenderEngine.h"
 
+#include <GrBackendSemaphore.h>
+#include <GrContextOptions.h>
+#include <SkBlendMode.h>
+#include <SkCanvas.h>
+#include <SkColor.h>
+#include <SkColorFilter.h>
+#include <SkColorMatrix.h>
+#include <SkColorSpace.h>
+#include <SkData.h>
+#include <SkGraphics.h>
+#include <SkImage.h>
+#include <SkImageFilters.h>
+#include <SkImageInfo.h>
+#include <SkM44.h>
+#include <SkMatrix.h>
+#include <SkPaint.h>
+#include <SkPath.h>
+#include <SkPoint.h>
+#include <SkPoint3.h>
+#include <SkRRect.h>
+#include <SkRect.h>
+#include <SkRefCnt.h>
+#include <SkRegion.h>
+#include <SkRuntimeEffect.h>
+#include <SkSamplingOptions.h>
+#include <SkScalar.h>
+#include <SkShader.h>
+#include <SkShadowUtils.h>
+#include <SkString.h>
+#include <SkSurface.h>
+#include <SkTileMode.h>
+#include <android-base/stringprintf.h>
+#include <gui/FenceMonitor.h>
+#include <gui/TraceUtils.h>
+#include <pthread.h>
 #include <src/core/SkTraceEventCommon.h>
+#include <sync/sync.h>
+#include <ui/BlurRegion.h>
+#include <ui/DataspaceUtils.h>
+#include <ui/DebugUtils.h>
+#include <ui/GraphicBuffer.h>
+#include <utils/Trace.h>
+
+#include <cmath>
+#include <cstdint>
+#include <deque>
+#include <memory>
+#include <numeric>
+
+#include "Cache.h"
+#include "ColorSpaces.h"
+#include "filters/BlurFilter.h"
+#include "filters/GaussianBlurFilter.h"
+#include "filters/KawaseBlurFilter.h"
+#include "filters/LinearEffect.h"
+#include "log/log_main.h"
+#include "skia/debug/SkiaCapture.h"
+#include "skia/debug/SkiaMemoryReporter.h"
+#include "skia/filters/StretchShaderFactory.h"
+#include "system/graphics-base-v1.0.h"
+
+namespace {
+
+// Debugging settings
+static const bool kPrintLayerSettings = false;
+static const bool kFlushAfterEveryLayer = kPrintLayerSettings;
+static constexpr bool kEnableLayerBrightening = true;
+
+} // namespace
+
+// Utility functions related to SkRect
+
+namespace {
+
+static inline SkRect getSkRect(const android::FloatRect& rect) {
+    return SkRect::MakeLTRB(rect.left, rect.top, rect.right, rect.bottom);
+}
+
+static inline SkRect getSkRect(const android::Rect& rect) {
+    return SkRect::MakeLTRB(rect.left, rect.top, rect.right, rect.bottom);
+}
+
+/**
+ *  Verifies that common, simple bounds + clip combinations can be converted into
+ *  a single RRect draw call returning true if possible. If true the radii parameter
+ *  will be filled with the correct radii values that combined with bounds param will
+ *  produce the insected roundRect. If false, the returned state of the radii param is undefined.
+ */
+static bool intersectionIsRoundRect(const SkRect& bounds, const SkRect& crop,
+                                    const SkRect& insetCrop, const android::vec2& cornerRadius,
+                                    SkVector radii[4]) {
+    const bool leftEqual = bounds.fLeft == crop.fLeft;
+    const bool topEqual = bounds.fTop == crop.fTop;
+    const bool rightEqual = bounds.fRight == crop.fRight;
+    const bool bottomEqual = bounds.fBottom == crop.fBottom;
+
+    // In the event that the corners of the bounds only partially align with the crop we
+    // need to ensure that the resulting shape can still be represented as a round rect.
+    // In particular the round rect implementation will scale the value of all corner radii
+    // if the sum of the radius along any edge is greater than the length of that edge.
+    // See https://www.w3.org/TR/css-backgrounds-3/#corner-overlap
+    const bool requiredWidth = bounds.width() > (cornerRadius.x * 2);
+    const bool requiredHeight = bounds.height() > (cornerRadius.y * 2);
+    if (!requiredWidth || !requiredHeight) {
+        return false;
+    }
+
+    // Check each cropped corner to ensure that it exactly matches the crop or its corner is
+    // contained within the cropped shape and does not need rounded.
+    // compute the UpperLeft corner radius
+    if (leftEqual && topEqual) {
+        radii[0].set(cornerRadius.x, cornerRadius.y);
+    } else if ((leftEqual && bounds.fTop >= insetCrop.fTop) ||
+               (topEqual && bounds.fLeft >= insetCrop.fLeft)) {
+        radii[0].set(0, 0);
+    } else {
+        return false;
+    }
+    // compute the UpperRight corner radius
+    if (rightEqual && topEqual) {
+        radii[1].set(cornerRadius.x, cornerRadius.y);
+    } else if ((rightEqual && bounds.fTop >= insetCrop.fTop) ||
+               (topEqual && bounds.fRight <= insetCrop.fRight)) {
+        radii[1].set(0, 0);
+    } else {
+        return false;
+    }
+    // compute the BottomRight corner radius
+    if (rightEqual && bottomEqual) {
+        radii[2].set(cornerRadius.x, cornerRadius.y);
+    } else if ((rightEqual && bounds.fBottom <= insetCrop.fBottom) ||
+               (bottomEqual && bounds.fRight <= insetCrop.fRight)) {
+        radii[2].set(0, 0);
+    } else {
+        return false;
+    }
+    // compute the BottomLeft corner radius
+    if (leftEqual && bottomEqual) {
+        radii[3].set(cornerRadius.x, cornerRadius.y);
+    } else if ((leftEqual && bounds.fBottom <= insetCrop.fBottom) ||
+               (bottomEqual && bounds.fLeft >= insetCrop.fLeft)) {
+        radii[3].set(0, 0);
+    } else {
+        return false;
+    }
+
+    return true;
+}
+
+static inline std::pair<SkRRect, SkRRect> getBoundsAndClip(const android::FloatRect& boundsRect,
+                                                           const android::FloatRect& cropRect,
+                                                           const android::vec2& cornerRadius) {
+    const SkRect bounds = getSkRect(boundsRect);
+    const SkRect crop = getSkRect(cropRect);
+
+    SkRRect clip;
+    if (cornerRadius.x > 0 && cornerRadius.y > 0) {
+        // it the crop and the bounds are equivalent or there is no crop then we don't need a clip
+        if (bounds == crop || crop.isEmpty()) {
+            return {SkRRect::MakeRectXY(bounds, cornerRadius.x, cornerRadius.y), clip};
+        }
+
+        // This makes an effort to speed up common, simple bounds + clip combinations by
+        // converting them to a single RRect draw. It is possible there are other cases
+        // that can be converted.
+        if (crop.contains(bounds)) {
+            const auto insetCrop = crop.makeInset(cornerRadius.x, cornerRadius.y);
+            if (insetCrop.contains(bounds)) {
+                return {SkRRect::MakeRect(bounds), clip}; // clip is empty - no rounding required
+            }
+
+            SkVector radii[4];
+            if (intersectionIsRoundRect(bounds, crop, insetCrop, cornerRadius, radii)) {
+                SkRRect intersectionBounds;
+                intersectionBounds.setRectRadii(bounds, radii);
+                return {intersectionBounds, clip};
+            }
+        }
+
+        // we didn't hit any of our fast paths so set the clip to the cropRect
+        clip.setRectXY(crop, cornerRadius.x, cornerRadius.y);
+    }
+
+    // if we hit this point then we either don't have rounded corners or we are going to rely
+    // on the clip to round the corners for us
+    return {SkRRect::MakeRect(bounds), clip};
+}
+
+static inline bool layerHasBlur(const android::renderengine::LayerSettings& layer,
+                                bool colorTransformModifiesAlpha) {
+    if (layer.backgroundBlurRadius > 0 || layer.blurRegions.size()) {
+        // return false if the content is opaque and would therefore occlude the blur
+        const bool opaqueContent = !layer.source.buffer.buffer || layer.source.buffer.isOpaque;
+        const bool opaqueAlpha = layer.alpha == 1.0f && !colorTransformModifiesAlpha;
+        return layer.skipContentDraw || !(opaqueContent && opaqueAlpha);
+    }
+    return false;
+}
+
+static inline SkColor getSkColor(const android::vec4& color) {
+    return SkColorSetARGB(color.a * 255, color.r * 255, color.g * 255, color.b * 255);
+}
+
+static inline SkM44 getSkM44(const android::mat4& matrix) {
+    return SkM44(matrix[0][0], matrix[1][0], matrix[2][0], matrix[3][0],
+                 matrix[0][1], matrix[1][1], matrix[2][1], matrix[3][1],
+                 matrix[0][2], matrix[1][2], matrix[2][2], matrix[3][2],
+                 matrix[0][3], matrix[1][3], matrix[2][3], matrix[3][3]);
+}
+
+static inline SkPoint3 getSkPoint3(const android::vec3& vector) {
+    return SkPoint3::Make(vector.x, vector.y, vector.z);
+}
+} // namespace
 
 namespace android {
 namespace renderengine {
 namespace skia {
-SkiaRenderEngine::SkiaRenderEngine(RenderEngineType type) : RenderEngine(type) {}
+
+using base::StringAppendF;
+
+std::future<void> SkiaRenderEngine::primeCache() {
+    Cache::primeShaderCache(this);
+    return {};
+}
+
+sk_sp<SkData> SkiaRenderEngine::SkSLCacheMonitor::load(const SkData& key) {
+    // This "cache" does not actually cache anything. It just allows us to
+    // monitor Skia's internal cache. So this method always returns null.
+    return nullptr;
+}
+
+void SkiaRenderEngine::SkSLCacheMonitor::store(const SkData& key, const SkData& data,
+                                               const SkString& description) {
+    mShadersCachedSinceLastCall++;
+    mTotalShadersCompiled++;
+    ATRACE_FORMAT("SF cache: %i shaders", mTotalShadersCompiled);
+}
+
+int SkiaRenderEngine::reportShadersCompiled() {
+    return mSkSLCacheMonitor.totalShadersCompiled();
+}
 
 void SkiaRenderEngine::setEnableTracing(bool tracingEnabled) {
     SkAndroidFrameworkTraceUtil::setEnableTracing(tracingEnabled);
 }
+
+SkiaRenderEngine::SkiaRenderEngine(RenderEngineType type, PixelFormat pixelFormat,
+                                   bool useColorManagement, bool supportsBackgroundBlur)
+      : RenderEngine(type),
+        mDefaultPixelFormat(pixelFormat),
+        mUseColorManagement(useColorManagement) {
+    if (supportsBackgroundBlur) {
+        ALOGD("Background Blurs Enabled");
+        mBlurFilter = new KawaseBlurFilter();
+    }
+    mCapture = std::make_unique<SkiaCapture>();
+}
+
+SkiaRenderEngine::~SkiaRenderEngine() { }
+
+// To be called from backend dtors.
+void SkiaRenderEngine::finishRenderingAndAbandonContext() {
+    std::lock_guard<std::mutex> lock(mRenderingMutex);
+
+    if (mBlurFilter) {
+        delete mBlurFilter;
+    }
+
+    if (mGrContext) {
+        mGrContext->flushAndSubmit(true);
+        mGrContext->abandonContext();
+    }
+
+    if (mProtectedGrContext) {
+        mProtectedGrContext->flushAndSubmit(true);
+        mProtectedGrContext->abandonContext();
+    }
+}
+
+void SkiaRenderEngine::useProtectedContext(bool useProtectedContext) {
+    if (useProtectedContext == mInProtectedContext ||
+        (useProtectedContext && !supportsProtectedContent())) {
+        return;
+    }
+
+    // release any scratch resources before switching into a new mode
+    if (getActiveGrContext()) {
+        getActiveGrContext()->purgeUnlockedResources(true);
+    }
+
+    // Backend-specific way to switch to protected context
+    if (useProtectedContextImpl(
+            useProtectedContext ? GrProtected::kYes : GrProtected::kNo)) {
+        mInProtectedContext = useProtectedContext;
+        // given that we are sharing the same thread between two GrContexts we need to
+        // make sure that the thread state is reset when switching between the two.
+        if (getActiveGrContext()) {
+            getActiveGrContext()->resetContext();
+        }
+    }
+}
+
+GrDirectContext* SkiaRenderEngine::getActiveGrContext() {
+    return mInProtectedContext ? mProtectedGrContext.get() : mGrContext.get();
+}
+
+static float toDegrees(uint32_t transform) {
+    switch (transform) {
+        case ui::Transform::ROT_90:
+            return 90.0;
+        case ui::Transform::ROT_180:
+            return 180.0;
+        case ui::Transform::ROT_270:
+            return 270.0;
+        default:
+            return 0.0;
+    }
+}
+
+static SkColorMatrix toSkColorMatrix(const android::mat4& matrix) {
+    return SkColorMatrix(matrix[0][0], matrix[1][0], matrix[2][0], matrix[3][0], 0, matrix[0][1],
+                         matrix[1][1], matrix[2][1], matrix[3][1], 0, matrix[0][2], matrix[1][2],
+                         matrix[2][2], matrix[3][2], 0, matrix[0][3], matrix[1][3], matrix[2][3],
+                         matrix[3][3], 0);
+}
+
+static bool needsToneMapping(ui::Dataspace sourceDataspace, ui::Dataspace destinationDataspace) {
+    int64_t sourceTransfer = sourceDataspace & HAL_DATASPACE_TRANSFER_MASK;
+    int64_t destTransfer = destinationDataspace & HAL_DATASPACE_TRANSFER_MASK;
+
+    // Treat unsupported dataspaces as srgb
+    if (destTransfer != HAL_DATASPACE_TRANSFER_LINEAR &&
+        destTransfer != HAL_DATASPACE_TRANSFER_HLG &&
+        destTransfer != HAL_DATASPACE_TRANSFER_ST2084) {
+        destTransfer = HAL_DATASPACE_TRANSFER_SRGB;
+    }
+
+    if (sourceTransfer != HAL_DATASPACE_TRANSFER_LINEAR &&
+        sourceTransfer != HAL_DATASPACE_TRANSFER_HLG &&
+        sourceTransfer != HAL_DATASPACE_TRANSFER_ST2084) {
+        sourceTransfer = HAL_DATASPACE_TRANSFER_SRGB;
+    }
+
+    const bool isSourceLinear = sourceTransfer == HAL_DATASPACE_TRANSFER_LINEAR;
+    const bool isSourceSRGB = sourceTransfer == HAL_DATASPACE_TRANSFER_SRGB;
+    const bool isDestLinear = destTransfer == HAL_DATASPACE_TRANSFER_LINEAR;
+    const bool isDestSRGB = destTransfer == HAL_DATASPACE_TRANSFER_SRGB;
+
+    return !(isSourceLinear && isDestSRGB) && !(isSourceSRGB && isDestLinear) &&
+            sourceTransfer != destTransfer;
+}
+
+void SkiaRenderEngine::ensureGrContextsCreated() {
+    if (mGrContext) {
+        return;
+    }
+
+    GrContextOptions options;
+    options.fDisableDriverCorrectnessWorkarounds = true;
+    options.fDisableDistanceFieldPaths = true;
+    options.fReducedShaderVariations = true;
+    options.fPersistentCache = &mSkSLCacheMonitor;
+    std::tie(mGrContext, mProtectedGrContext) = createDirectContexts(options);
+}
+
+void SkiaRenderEngine::mapExternalTextureBuffer(const sp<GraphicBuffer>& buffer,
+                                                  bool isRenderable) {
+    // Only run this if RE is running on its own thread. This
+    // way the access to GL operations is guaranteed to be happening on the
+    // same thread.
+    if (mRenderEngineType != RenderEngineType::SKIA_GL_THREADED &&
+        mRenderEngineType != RenderEngineType::SKIA_VK_THREADED) {
+        return;
+    }
+    // We don't attempt to map a buffer if the buffer contains protected content. In GL this is
+    // important because GPU resources for protected buffers are much more limited. (In Vk we
+    // simply match the existing behavior for protected buffers.)  In Vk, we never cache any
+    // buffers while in a protected context, since Vk cannot share across contexts, and protected
+    // is less common.
+    const bool isProtectedBuffer = buffer->getUsage() & GRALLOC_USAGE_PROTECTED;
+    if (isProtectedBuffer ||
+        (mRenderEngineType == RenderEngineType::SKIA_VK_THREADED && isProtected())) {
+        return;
+    }
+    ATRACE_CALL();
+
+    // If we were to support caching protected buffers then we will need to switch the
+    // currently bound context if we are not already using the protected context (and subsequently
+    // switch back after the buffer is cached).  However, for non-protected content we can bind
+    // the texture in either GL context because they are initialized with the same share_context
+    // which allows the texture state to be shared between them.
+    auto grContext = getActiveGrContext();
+    auto& cache = mTextureCache;
+
+    std::lock_guard<std::mutex> lock(mRenderingMutex);
+    mGraphicBufferExternalRefs[buffer->getId()]++;
+
+    if (const auto& iter = cache.find(buffer->getId()); iter == cache.end()) {
+        std::shared_ptr<AutoBackendTexture::LocalRef> imageTextureRef =
+                std::make_shared<AutoBackendTexture::LocalRef>(grContext,
+                                                               buffer->toAHardwareBuffer(),
+                                                               isRenderable, mTextureCleanupMgr);
+        cache.insert({buffer->getId(), imageTextureRef});
+    }
+}
+
+void SkiaRenderEngine::unmapExternalTextureBuffer(sp<GraphicBuffer>&& buffer) {
+    ATRACE_CALL();
+    std::lock_guard<std::mutex> lock(mRenderingMutex);
+    if (const auto& iter = mGraphicBufferExternalRefs.find(buffer->getId());
+        iter != mGraphicBufferExternalRefs.end()) {
+        if (iter->second == 0) {
+            ALOGW("Attempted to unmap GraphicBuffer <id: %" PRId64
+                  "> from RenderEngine texture, but the "
+                  "ref count was already zero!",
+                  buffer->getId());
+            mGraphicBufferExternalRefs.erase(buffer->getId());
+            return;
+        }
+
+        iter->second--;
+
+        // Swap contexts if needed prior to deleting this buffer
+        // See Issue 1 of
+        // https://www.khronos.org/registry/EGL/extensions/EXT/EGL_EXT_protected_content.txt: even
+        // when a protected context and an unprotected context are part of the same share group,
+        // protected surfaces may not be accessed by an unprotected context, implying that protected
+        // surfaces may only be freed when a protected context is active.
+        const bool inProtected = mInProtectedContext;
+        useProtectedContext(buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
+
+        if (iter->second == 0) {
+            mTextureCache.erase(buffer->getId());
+            mGraphicBufferExternalRefs.erase(buffer->getId());
+        }
+
+        // Swap back to the previous context so that cached values of isProtected in SurfaceFlinger
+        // are up-to-date.
+        if (inProtected != mInProtectedContext) {
+            useProtectedContext(inProtected);
+        }
+    }
+}
+
+std::shared_ptr<AutoBackendTexture::LocalRef> SkiaRenderEngine::getOrCreateBackendTexture(
+        const sp<GraphicBuffer>& buffer, bool isOutputBuffer) {
+    // Do not lookup the buffer in the cache for protected contexts with the SkiaVk back-end
+    if (mRenderEngineType == RenderEngineType::SKIA_GL_THREADED ||
+        (mRenderEngineType == RenderEngineType::SKIA_VK_THREADED && !isProtected())) {
+        if (const auto& it = mTextureCache.find(buffer->getId()); it != mTextureCache.end()) {
+            return it->second;
+        }
+    }
+    return std::make_shared<AutoBackendTexture::LocalRef>(getActiveGrContext(),
+                                                          buffer->toAHardwareBuffer(),
+                                                          isOutputBuffer, mTextureCleanupMgr);
+}
+
+bool SkiaRenderEngine::canSkipPostRenderCleanup() const {
+    std::lock_guard<std::mutex> lock(mRenderingMutex);
+    return mTextureCleanupMgr.isEmpty();
+}
+
+void SkiaRenderEngine::cleanupPostRender() {
+    ATRACE_CALL();
+    std::lock_guard<std::mutex> lock(mRenderingMutex);
+    mTextureCleanupMgr.cleanup();
+}
+
+sk_sp<SkShader> SkiaRenderEngine::createRuntimeEffectShader(
+        const RuntimeEffectShaderParameters& parameters) {
+    // The given surface will be stretched by HWUI via matrix transformation
+    // which gets similar results for most surfaces
+    // Determine later on if we need to leverage the stertch shader within
+    // surface flinger
+    const auto& stretchEffect = parameters.layer.stretchEffect;
+    auto shader = parameters.shader;
+    if (stretchEffect.hasEffect()) {
+        const auto targetBuffer = parameters.layer.source.buffer.buffer;
+        const auto graphicBuffer = targetBuffer ? targetBuffer->getBuffer() : nullptr;
+        if (graphicBuffer && parameters.shader) {
+            shader = mStretchShaderFactory.createSkShader(shader, stretchEffect);
+        }
+    }
+
+    if (parameters.requiresLinearEffect) {
+        auto effect =
+                shaders::LinearEffect{.inputDataspace = parameters.layer.sourceDataspace,
+                                      .outputDataspace = parameters.outputDataSpace,
+                                      .undoPremultipliedAlpha = parameters.undoPremultipliedAlpha};
+
+        auto effectIter = mRuntimeEffects.find(effect);
+        sk_sp<SkRuntimeEffect> runtimeEffect = nullptr;
+        if (effectIter == mRuntimeEffects.end()) {
+            runtimeEffect = buildRuntimeEffect(effect);
+            mRuntimeEffects.insert({effect, runtimeEffect});
+        } else {
+            runtimeEffect = effectIter->second;
+        }
+
+        mat4 colorTransform = parameters.layer.colorTransform;
+
+        colorTransform *=
+                mat4::scale(vec4(parameters.layerDimmingRatio, parameters.layerDimmingRatio,
+                                 parameters.layerDimmingRatio, 1.f));
+
+        const auto targetBuffer = parameters.layer.source.buffer.buffer;
+        const auto graphicBuffer = targetBuffer ? targetBuffer->getBuffer() : nullptr;
+        const auto hardwareBuffer = graphicBuffer ? graphicBuffer->toAHardwareBuffer() : nullptr;
+        return createLinearEffectShader(parameters.shader, effect, runtimeEffect,
+                                        std::move(colorTransform), parameters.display.maxLuminance,
+                                        parameters.display.currentLuminanceNits,
+                                        parameters.layer.source.buffer.maxLuminanceNits,
+                                        hardwareBuffer, parameters.display.renderIntent);
+    }
+    return parameters.shader;
+}
+
+void SkiaRenderEngine::initCanvas(SkCanvas* canvas, const DisplaySettings& display) {
+    if (CC_UNLIKELY(mCapture->isCaptureRunning())) {
+        // Record display settings when capture is running.
+        std::stringstream displaySettings;
+        PrintTo(display, &displaySettings);
+        // Store the DisplaySettings in additional information.
+        canvas->drawAnnotation(SkRect::MakeEmpty(), "DisplaySettings",
+                               SkData::MakeWithCString(displaySettings.str().c_str()));
+    }
+
+    // Before doing any drawing, let's make sure that we'll start at the origin of the display.
+    // Some displays don't start at 0,0 for example when we're mirroring the screen. Also, virtual
+    // displays might have different scaling when compared to the physical screen.
+
+    canvas->clipRect(getSkRect(display.physicalDisplay));
+    canvas->translate(display.physicalDisplay.left, display.physicalDisplay.top);
+
+    const auto clipWidth = display.clip.width();
+    const auto clipHeight = display.clip.height();
+    auto rotatedClipWidth = clipWidth;
+    auto rotatedClipHeight = clipHeight;
+    // Scale is contingent on the rotation result.
+    if (display.orientation & ui::Transform::ROT_90) {
+        std::swap(rotatedClipWidth, rotatedClipHeight);
+    }
+    const auto scaleX = static_cast<SkScalar>(display.physicalDisplay.width()) /
+            static_cast<SkScalar>(rotatedClipWidth);
+    const auto scaleY = static_cast<SkScalar>(display.physicalDisplay.height()) /
+            static_cast<SkScalar>(rotatedClipHeight);
+    canvas->scale(scaleX, scaleY);
+
+    // Canvas rotation is done by centering the clip window at the origin, rotating, translating
+    // back so that the top left corner of the clip is at (0, 0).
+    canvas->translate(rotatedClipWidth / 2, rotatedClipHeight / 2);
+    canvas->rotate(toDegrees(display.orientation));
+    canvas->translate(-clipWidth / 2, -clipHeight / 2);
+    canvas->translate(-display.clip.left, -display.clip.top);
+}
+
+class AutoSaveRestore {
+public:
+    AutoSaveRestore(SkCanvas* canvas) : mCanvas(canvas) { mSaveCount = canvas->save(); }
+    ~AutoSaveRestore() { restore(); }
+    void replace(SkCanvas* canvas) {
+        mCanvas = canvas;
+        mSaveCount = canvas->save();
+    }
+    void restore() {
+        if (mCanvas) {
+            mCanvas->restoreToCount(mSaveCount);
+            mCanvas = nullptr;
+        }
+    }
+
+private:
+    SkCanvas* mCanvas;
+    int mSaveCount;
+};
+
+static SkRRect getBlurRRect(const BlurRegion& region) {
+    const auto rect = SkRect::MakeLTRB(region.left, region.top, region.right, region.bottom);
+    const SkVector radii[4] = {SkVector::Make(region.cornerRadiusTL, region.cornerRadiusTL),
+                               SkVector::Make(region.cornerRadiusTR, region.cornerRadiusTR),
+                               SkVector::Make(region.cornerRadiusBR, region.cornerRadiusBR),
+                               SkVector::Make(region.cornerRadiusBL, region.cornerRadiusBL)};
+    SkRRect roundedRect;
+    roundedRect.setRectRadii(rect, radii);
+    return roundedRect;
+}
+
+// Arbitrary default margin which should be close enough to zero.
+constexpr float kDefaultMargin = 0.0001f;
+static bool equalsWithinMargin(float expected, float value, float margin = kDefaultMargin) {
+    LOG_ALWAYS_FATAL_IF(margin < 0.f, "Margin is negative!");
+    return std::abs(expected - value) < margin;
+}
+
+namespace {
+template <typename T>
+void logSettings(const T& t) {
+    std::stringstream stream;
+    PrintTo(t, &stream);
+    auto string = stream.str();
+    size_t pos = 0;
+    // Perfetto ignores \n, so split up manually into separate ALOGD statements.
+    const size_t size = string.size();
+    while (pos < size) {
+        const size_t end = std::min(string.find("\n", pos), size);
+        ALOGD("%s", string.substr(pos, end - pos).c_str());
+        pos = end + 1;
+    }
+}
+} // namespace
+
+// Helper class intended to be used on the stack to ensure that texture cleanup
+// is deferred until after this class goes out of scope.
+class DeferTextureCleanup final {
+public:
+    DeferTextureCleanup(AutoBackendTexture::CleanupManager& mgr) : mMgr(mgr) {
+        mMgr.setDeferredStatus(true);
+    }
+    ~DeferTextureCleanup() { mMgr.setDeferredStatus(false); }
+
+private:
+    DISALLOW_COPY_AND_ASSIGN(DeferTextureCleanup);
+    AutoBackendTexture::CleanupManager& mMgr;
+};
+
+void SkiaRenderEngine::drawLayersInternal(
+        const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
+        const DisplaySettings& display, const std::vector<LayerSettings>& layers,
+        const std::shared_ptr<ExternalTexture>& buffer, const bool /*useFramebufferCache*/,
+        base::unique_fd&& bufferFence) {
+    ATRACE_FORMAT("%s for %s", __func__, display.namePlusId.c_str());
+
+    std::lock_guard<std::mutex> lock(mRenderingMutex);
+
+    if (buffer == nullptr) {
+        ALOGE("No output buffer provided. Aborting GPU composition.");
+        resultPromise->set_value(base::unexpected(BAD_VALUE));
+        return;
+    }
+
+    validateOutputBufferUsage(buffer->getBuffer());
+
+    auto grContext = getActiveGrContext();
+
+    // any AutoBackendTexture deletions will now be deferred until cleanupPostRender is called
+    DeferTextureCleanup dtc(mTextureCleanupMgr);
+
+    auto surfaceTextureRef = getOrCreateBackendTexture(buffer->getBuffer(), true);
+
+    // wait on the buffer to be ready to use prior to using it
+    waitFence(grContext, bufferFence);
+
+    sk_sp<SkSurface> dstSurface =
+            surfaceTextureRef->getOrCreateSurface(display.outputDataspace, grContext);
+
+    SkCanvas* dstCanvas = mCapture->tryCapture(dstSurface.get());
+    if (dstCanvas == nullptr) {
+        ALOGE("Cannot acquire canvas from Skia.");
+        resultPromise->set_value(base::unexpected(BAD_VALUE));
+        return;
+    }
+
+    // setup color filter if necessary
+    sk_sp<SkColorFilter> displayColorTransform;
+    if (display.colorTransform != mat4() && !display.deviceHandlesColorTransform) {
+        displayColorTransform = SkColorFilters::Matrix(toSkColorMatrix(display.colorTransform));
+    }
+    const bool ctModifiesAlpha =
+            displayColorTransform && !displayColorTransform->isAlphaUnchanged();
+
+    // Find the max layer white point to determine the max luminance of the scene...
+    const float maxLayerWhitePoint = std::transform_reduce(
+            layers.cbegin(), layers.cend(), 0.f,
+            [](float left, float right) { return std::max(left, right); },
+            [&](const auto& l) { return l.whitePointNits; });
+
+    // ...and compute the dimming ratio if dimming is requested
+    const float displayDimmingRatio = display.targetLuminanceNits > 0.f &&
+                    maxLayerWhitePoint > 0.f &&
+                    (kEnableLayerBrightening || display.targetLuminanceNits > maxLayerWhitePoint)
+            ? maxLayerWhitePoint / display.targetLuminanceNits
+            : 1.f;
+
+    // Find if any layers have requested blur, we'll use that info to decide when to render to an
+    // offscreen buffer and when to render to the native buffer.
+    sk_sp<SkSurface> activeSurface(dstSurface);
+    SkCanvas* canvas = dstCanvas;
+    SkiaCapture::OffscreenState offscreenCaptureState;
+    const LayerSettings* blurCompositionLayer = nullptr;
+    if (mBlurFilter) {
+        bool requiresCompositionLayer = false;
+        for (const auto& layer : layers) {
+            // if the layer doesn't have blur or it is not visible then continue
+            if (!layerHasBlur(layer, ctModifiesAlpha)) {
+                continue;
+            }
+            if (layer.backgroundBlurRadius > 0 &&
+                layer.backgroundBlurRadius < mBlurFilter->getMaxCrossFadeRadius()) {
+                requiresCompositionLayer = true;
+            }
+            for (auto region : layer.blurRegions) {
+                if (region.blurRadius < mBlurFilter->getMaxCrossFadeRadius()) {
+                    requiresCompositionLayer = true;
+                }
+            }
+            if (requiresCompositionLayer) {
+                activeSurface = dstSurface->makeSurface(dstSurface->imageInfo());
+                canvas = mCapture->tryOffscreenCapture(activeSurface.get(), &offscreenCaptureState);
+                blurCompositionLayer = &layer;
+                break;
+            }
+        }
+    }
+
+    AutoSaveRestore surfaceAutoSaveRestore(canvas);
+    // Clear the entire canvas with a transparent black to prevent ghost images.
+    canvas->clear(SK_ColorTRANSPARENT);
+    initCanvas(canvas, display);
+
+    if (kPrintLayerSettings) {
+        logSettings(display);
+    }
+    for (const auto& layer : layers) {
+        ATRACE_FORMAT("DrawLayer: %s", layer.name.c_str());
+
+        if (kPrintLayerSettings) {
+            logSettings(layer);
+        }
+
+        sk_sp<SkImage> blurInput;
+        if (blurCompositionLayer == &layer) {
+            LOG_ALWAYS_FATAL_IF(activeSurface == dstSurface);
+            LOG_ALWAYS_FATAL_IF(canvas == dstCanvas);
+
+            // save a snapshot of the activeSurface to use as input to the blur shaders
+            blurInput = activeSurface->makeImageSnapshot();
+
+            // blit the offscreen framebuffer into the destination AHB, but only
+            // if there are blur regions. backgroundBlurRadius blurs the entire
+            // image below, so it can skip this step.
+            if (layer.blurRegions.size()) {
+                SkPaint paint;
+                paint.setBlendMode(SkBlendMode::kSrc);
+                if (CC_UNLIKELY(mCapture->isCaptureRunning())) {
+                    uint64_t id = mCapture->endOffscreenCapture(&offscreenCaptureState);
+                    dstCanvas->drawAnnotation(SkRect::Make(dstCanvas->imageInfo().dimensions()),
+                                              String8::format("SurfaceID|%" PRId64, id).c_str(),
+                                              nullptr);
+                    dstCanvas->drawImage(blurInput, 0, 0, SkSamplingOptions(), &paint);
+                } else {
+                    activeSurface->draw(dstCanvas, 0, 0, SkSamplingOptions(), &paint);
+                }
+            }
+
+            // assign dstCanvas to canvas and ensure that the canvas state is up to date
+            canvas = dstCanvas;
+            surfaceAutoSaveRestore.replace(canvas);
+            initCanvas(canvas, display);
+
+            LOG_ALWAYS_FATAL_IF(activeSurface->getCanvas()->getSaveCount() !=
+                                dstSurface->getCanvas()->getSaveCount());
+            LOG_ALWAYS_FATAL_IF(activeSurface->getCanvas()->getTotalMatrix() !=
+                                dstSurface->getCanvas()->getTotalMatrix());
+
+            // assign dstSurface to activeSurface
+            activeSurface = dstSurface;
+        }
+
+        SkAutoCanvasRestore layerAutoSaveRestore(canvas, true);
+        if (CC_UNLIKELY(mCapture->isCaptureRunning())) {
+            // Record the name of the layer if the capture is running.
+            std::stringstream layerSettings;
+            PrintTo(layer, &layerSettings);
+            // Store the LayerSettings in additional information.
+            canvas->drawAnnotation(SkRect::MakeEmpty(), layer.name.c_str(),
+                                   SkData::MakeWithCString(layerSettings.str().c_str()));
+        }
+        // Layers have a local transform that should be applied to them
+        canvas->concat(getSkM44(layer.geometry.positionTransform).asM33());
+
+        const auto [bounds, roundRectClip] =
+                getBoundsAndClip(layer.geometry.boundaries, layer.geometry.roundedCornersCrop,
+                                 layer.geometry.roundedCornersRadius);
+        if (mBlurFilter && layerHasBlur(layer, ctModifiesAlpha)) {
+            std::unordered_map<uint32_t, sk_sp<SkImage>> cachedBlurs;
+
+            // if multiple layers have blur, then we need to take a snapshot now because
+            // only the lowest layer will have blurImage populated earlier
+            if (!blurInput) {
+                blurInput = activeSurface->makeImageSnapshot();
+            }
+            // rect to be blurred in the coordinate space of blurInput
+            const auto blurRect = canvas->getTotalMatrix().mapRect(bounds.rect());
+
+            // if the clip needs to be applied then apply it now and make sure
+            // it is restored before we attempt to draw any shadows.
+            SkAutoCanvasRestore acr(canvas, true);
+            if (!roundRectClip.isEmpty()) {
+                canvas->clipRRect(roundRectClip, true);
+            }
+
+            // TODO(b/182216890): Filter out empty layers earlier
+            if (blurRect.width() > 0 && blurRect.height() > 0) {
+                if (layer.backgroundBlurRadius > 0) {
+                    ATRACE_NAME("BackgroundBlur");
+                    auto blurredImage = mBlurFilter->generate(grContext, layer.backgroundBlurRadius,
+                                                              blurInput, blurRect);
+
+                    cachedBlurs[layer.backgroundBlurRadius] = blurredImage;
+
+                    mBlurFilter->drawBlurRegion(canvas, bounds, layer.backgroundBlurRadius, 1.0f,
+                                                blurRect, blurredImage, blurInput);
+                }
+
+                canvas->concat(getSkM44(layer.blurRegionTransform).asM33());
+                for (auto region : layer.blurRegions) {
+                    if (cachedBlurs[region.blurRadius] == nullptr) {
+                        ATRACE_NAME("BlurRegion");
+                        cachedBlurs[region.blurRadius] =
+                                mBlurFilter->generate(grContext, region.blurRadius, blurInput,
+                                                      blurRect);
+                    }
+
+                    mBlurFilter->drawBlurRegion(canvas, getBlurRRect(region), region.blurRadius,
+                                                region.alpha, blurRect,
+                                                cachedBlurs[region.blurRadius], blurInput);
+                }
+            }
+        }
+
+        if (layer.shadow.length > 0) {
+            // This would require a new parameter/flag to SkShadowUtils::DrawShadow
+            LOG_ALWAYS_FATAL_IF(layer.disableBlending, "Cannot disableBlending with a shadow");
+
+            SkRRect shadowBounds, shadowClip;
+            if (layer.geometry.boundaries == layer.shadow.boundaries) {
+                shadowBounds = bounds;
+                shadowClip = roundRectClip;
+            } else {
+                std::tie(shadowBounds, shadowClip) =
+                        getBoundsAndClip(layer.shadow.boundaries, layer.geometry.roundedCornersCrop,
+                                         layer.geometry.roundedCornersRadius);
+            }
+
+            // Technically, if bounds is a rect and roundRectClip is not empty,
+            // it means that the bounds and roundedCornersCrop were different
+            // enough that we should intersect them to find the proper shadow.
+            // In practice, this often happens when the two rectangles appear to
+            // not match due to rounding errors. Draw the rounded version, which
+            // looks more like the intent.
+            const auto& rrect =
+                    shadowBounds.isRect() && !shadowClip.isEmpty() ? shadowClip : shadowBounds;
+            drawShadow(canvas, rrect, layer.shadow);
+        }
+
+        const float layerDimmingRatio = layer.whitePointNits <= 0.f
+                ? displayDimmingRatio
+                : (layer.whitePointNits / maxLayerWhitePoint) * displayDimmingRatio;
+
+        const bool dimInLinearSpace = display.dimmingStage !=
+                aidl::android::hardware::graphics::composer3::DimmingStage::GAMMA_OETF;
+
+        const bool isExtendedHdr = (layer.sourceDataspace & ui::Dataspace::RANGE_MASK) ==
+                        static_cast<int32_t>(ui::Dataspace::RANGE_EXTENDED) &&
+                (display.outputDataspace & ui::Dataspace::TRANSFER_MASK) ==
+                        static_cast<int32_t>(ui::Dataspace::TRANSFER_SRGB);
+
+        const ui::Dataspace runtimeEffectDataspace = !dimInLinearSpace && isExtendedHdr
+                ? static_cast<ui::Dataspace>(
+                          (display.outputDataspace & ui::Dataspace::STANDARD_MASK) |
+                          ui::Dataspace::TRANSFER_GAMMA2_2 |
+                          (display.outputDataspace & ui::Dataspace::RANGE_MASK))
+                : display.outputDataspace;
+
+        // If the input dataspace is range extended, the output dataspace transfer is sRGB
+        // and dimmingStage is GAMMA_OETF, dim in linear space instead, and
+        // set the output dataspace's transfer to be GAMMA2_2.
+        // This allows DPU side to use oetf_gamma_2p2 for extended HDR layer
+        // to avoid tone shift.
+        // The reason of tone shift here is because HDR layers manage white point
+        // luminance in linear space, which color pipelines request GAMMA_OETF break
+        // without a gamma 2.2 fixup.
+        const bool requiresLinearEffect = layer.colorTransform != mat4() ||
+                (mUseColorManagement &&
+                 needsToneMapping(layer.sourceDataspace, display.outputDataspace)) ||
+                (dimInLinearSpace && !equalsWithinMargin(1.f, layerDimmingRatio)) ||
+                (!dimInLinearSpace && isExtendedHdr);
+
+        // quick abort from drawing the remaining portion of the layer
+        if (layer.skipContentDraw ||
+            (layer.alpha == 0 && !requiresLinearEffect && !layer.disableBlending &&
+             (!displayColorTransform || displayColorTransform->isAlphaUnchanged()))) {
+            continue;
+        }
+
+        // If color management is disabled, then mark the source image with the same colorspace as
+        // the destination surface so that Skia's color management is a no-op.
+        const ui::Dataspace layerDataspace =
+                !mUseColorManagement ? display.outputDataspace : layer.sourceDataspace;
+
+        SkPaint paint;
+        if (layer.source.buffer.buffer) {
+            ATRACE_NAME("DrawImage");
+            validateInputBufferUsage(layer.source.buffer.buffer->getBuffer());
+            const auto& item = layer.source.buffer;
+            auto imageTextureRef = getOrCreateBackendTexture(item.buffer->getBuffer(), false);
+
+            // if the layer's buffer has a fence, then we must must respect the fence prior to using
+            // the buffer.
+            if (layer.source.buffer.fence != nullptr) {
+                waitFence(grContext, layer.source.buffer.fence->get());
+            }
+
+            // isOpaque means we need to ignore the alpha in the image,
+            // replacing it with the alpha specified by the LayerSettings. See
+            // https://developer.android.com/reference/android/view/SurfaceControl.Builder#setOpaque(boolean)
+            // The proper way to do this is to use an SkColorType that ignores
+            // alpha, like kRGB_888x_SkColorType, and that is used if the
+            // incoming image is kRGBA_8888_SkColorType. However, the incoming
+            // image may be kRGBA_F16_SkColorType, for which there is no RGBX
+            // SkColorType, or kRGBA_1010102_SkColorType, for which we have
+            // kRGB_101010x_SkColorType, but it is not yet supported as a source
+            // on the GPU. (Adding both is tracked in skbug.com/12048.) In the
+            // meantime, we'll use a workaround that works unless we need to do
+            // any color conversion. The workaround requires that we pretend the
+            // image is already premultiplied, so that we do not premultiply it
+            // before applying SkBlendMode::kPlus.
+            const bool useIsOpaqueWorkaround = item.isOpaque &&
+                    (imageTextureRef->colorType() == kRGBA_1010102_SkColorType ||
+                     imageTextureRef->colorType() == kRGBA_F16_SkColorType);
+            const auto alphaType = useIsOpaqueWorkaround ? kPremul_SkAlphaType
+                    : item.isOpaque                      ? kOpaque_SkAlphaType
+                    : item.usePremultipliedAlpha         ? kPremul_SkAlphaType
+                                                         : kUnpremul_SkAlphaType;
+            sk_sp<SkImage> image = imageTextureRef->makeImage(layerDataspace, alphaType, grContext);
+
+            auto texMatrix = getSkM44(item.textureTransform).asM33();
+            // textureTansform was intended to be passed directly into a shader, so when
+            // building the total matrix with the textureTransform we need to first
+            // normalize it, then apply the textureTransform, then scale back up.
+            texMatrix.preScale(1.0f / bounds.width(), 1.0f / bounds.height());
+            texMatrix.postScale(image->width(), image->height());
+
+            SkMatrix matrix;
+            if (!texMatrix.invert(&matrix)) {
+                matrix = texMatrix;
+            }
+            // The shader does not respect the translation, so we add it to the texture
+            // transform for the SkImage. This will make sure that the correct layer contents
+            // are drawn in the correct part of the screen.
+            matrix.postTranslate(bounds.rect().fLeft, bounds.rect().fTop);
+
+            sk_sp<SkShader> shader;
+
+            if (layer.source.buffer.useTextureFiltering) {
+                shader = image->makeShader(SkTileMode::kClamp, SkTileMode::kClamp,
+                                           SkSamplingOptions(
+                                                   {SkFilterMode::kLinear, SkMipmapMode::kNone}),
+                                           &matrix);
+            } else {
+                shader = image->makeShader(SkSamplingOptions(), matrix);
+            }
+
+            if (useIsOpaqueWorkaround) {
+                shader = SkShaders::Blend(SkBlendMode::kPlus, shader,
+                                          SkShaders::Color(SkColors::kBlack,
+                                                           toSkColorSpace(layerDataspace)));
+            }
+
+            paint.setShader(createRuntimeEffectShader(
+                    RuntimeEffectShaderParameters{.shader = shader,
+                                                  .layer = layer,
+                                                  .display = display,
+                                                  .undoPremultipliedAlpha = !item.isOpaque &&
+                                                          item.usePremultipliedAlpha,
+                                                  .requiresLinearEffect = requiresLinearEffect,
+                                                  .layerDimmingRatio = dimInLinearSpace
+                                                          ? layerDimmingRatio
+                                                          : 1.f,
+                                                  .outputDataSpace = runtimeEffectDataspace}));
+
+            // Turn on dithering when dimming beyond this (arbitrary) threshold...
+            static constexpr float kDimmingThreshold = 0.2f;
+            // ...or we're rendering an HDR layer down to an 8-bit target
+            // Most HDR standards require at least 10-bits of color depth for source content, so we
+            // can just extract the transfer function rather than dig into precise gralloc layout.
+            // Furthermore, we can assume that the only 8-bit target we support is RGBA8888.
+            const bool requiresDownsample = isHdrDataspace(layer.sourceDataspace) &&
+                    buffer->getPixelFormat() == PIXEL_FORMAT_RGBA_8888;
+            if (layerDimmingRatio <= kDimmingThreshold || requiresDownsample) {
+                paint.setDither(true);
+            }
+            paint.setAlphaf(layer.alpha);
+
+            if (imageTextureRef->colorType() == kAlpha_8_SkColorType) {
+                LOG_ALWAYS_FATAL_IF(layer.disableBlending, "Cannot disableBlending with A8");
+
+                // SysUI creates the alpha layer as a coverage layer, which is
+                // appropriate for the DPU. Use a color matrix to convert it to
+                // a mask.
+                // TODO (b/219525258): Handle input as a mask.
+                //
+                // The color matrix will convert A8 pixels with no alpha to
+                // black, as described by this vector. If the display handles
+                // the color transform, we need to invert it to find the color
+                // that will result in black after the DPU applies the transform.
+                SkV4 black{0.0f, 0.0f, 0.0f, 1.0f}; // r, g, b, a
+                if (display.colorTransform != mat4() && display.deviceHandlesColorTransform) {
+                    SkM44 colorSpaceMatrix = getSkM44(display.colorTransform);
+                    if (colorSpaceMatrix.invert(&colorSpaceMatrix)) {
+                        black = colorSpaceMatrix * black;
+                    } else {
+                        // We'll just have to use 0,0,0 as black, which should
+                        // be close to correct.
+                        ALOGI("Could not invert colorTransform!");
+                    }
+                }
+                SkColorMatrix colorMatrix(0, 0, 0, 0, black[0],
+                                          0, 0, 0, 0, black[1],
+                                          0, 0, 0, 0, black[2],
+                                          0, 0, 0, -1, 1);
+                if (display.colorTransform != mat4() && !display.deviceHandlesColorTransform) {
+                    // On the other hand, if the device doesn't handle it, we
+                    // have to apply it ourselves.
+                    colorMatrix.postConcat(toSkColorMatrix(display.colorTransform));
+                }
+                paint.setColorFilter(SkColorFilters::Matrix(colorMatrix));
+            }
+        } else {
+            ATRACE_NAME("DrawColor");
+            const auto color = layer.source.solidColor;
+            sk_sp<SkShader> shader = SkShaders::Color(SkColor4f{.fR = color.r,
+                                                                .fG = color.g,
+                                                                .fB = color.b,
+                                                                .fA = layer.alpha},
+                                                      toSkColorSpace(layerDataspace));
+            paint.setShader(createRuntimeEffectShader(
+                    RuntimeEffectShaderParameters{.shader = shader,
+                                                  .layer = layer,
+                                                  .display = display,
+                                                  .undoPremultipliedAlpha = false,
+                                                  .requiresLinearEffect = requiresLinearEffect,
+                                                  .layerDimmingRatio = layerDimmingRatio,
+                                                  .outputDataSpace = runtimeEffectDataspace}));
+        }
+
+        if (layer.disableBlending) {
+            paint.setBlendMode(SkBlendMode::kSrc);
+        }
+
+        // An A8 buffer will already have the proper color filter attached to
+        // its paint, including the displayColorTransform as needed.
+        if (!paint.getColorFilter()) {
+            if (!dimInLinearSpace && !equalsWithinMargin(1.0, layerDimmingRatio)) {
+                // If we don't dim in linear space, then when we gamma correct the dimming ratio we
+                // can assume a gamma 2.2 transfer function.
+                static constexpr float kInverseGamma22 = 1.f / 2.2f;
+                const auto gammaCorrectedDimmingRatio =
+                        std::pow(layerDimmingRatio, kInverseGamma22);
+                auto dimmingMatrix =
+                        mat4::scale(vec4(gammaCorrectedDimmingRatio, gammaCorrectedDimmingRatio,
+                                         gammaCorrectedDimmingRatio, 1.f));
+
+                const auto colorFilter =
+                        SkColorFilters::Matrix(toSkColorMatrix(std::move(dimmingMatrix)));
+                paint.setColorFilter(displayColorTransform
+                                             ? displayColorTransform->makeComposed(colorFilter)
+                                             : colorFilter);
+            } else {
+                paint.setColorFilter(displayColorTransform);
+            }
+        }
+
+        if (!roundRectClip.isEmpty()) {
+            canvas->clipRRect(roundRectClip, true);
+        }
+
+        if (!bounds.isRect()) {
+            paint.setAntiAlias(true);
+            canvas->drawRRect(bounds, paint);
+        } else {
+            canvas->drawRect(bounds.rect(), paint);
+        }
+        if (kFlushAfterEveryLayer) {
+            ATRACE_NAME("flush surface");
+            activeSurface->flush();
+        }
+    }
+    for (const auto& borderRenderInfo : display.borderInfoList) {
+        SkPaint p;
+        p.setColor(SkColor4f{borderRenderInfo.color.r, borderRenderInfo.color.g,
+                             borderRenderInfo.color.b, borderRenderInfo.color.a});
+        p.setAntiAlias(true);
+        p.setStyle(SkPaint::kStroke_Style);
+        p.setStrokeWidth(borderRenderInfo.width);
+        SkRegion sk_region;
+        SkPath path;
+
+        // Construct a final SkRegion using Regions
+        for (const auto& r : borderRenderInfo.combinedRegion) {
+            sk_region.op({r.left, r.top, r.right, r.bottom}, SkRegion::kUnion_Op);
+        }
+
+        sk_region.getBoundaryPath(&path);
+        canvas->drawPath(path, p);
+        path.close();
+    }
+
+    surfaceAutoSaveRestore.restore();
+    mCapture->endCapture();
+    {
+        ATRACE_NAME("flush surface");
+        LOG_ALWAYS_FATAL_IF(activeSurface != dstSurface);
+        activeSurface->flush();
+    }
+
+    auto drawFence = sp<Fence>::make(flushAndSubmit(grContext));
+
+    if (ATRACE_ENABLED()) {
+        static gui::FenceMonitor sMonitor("RE Completion");
+        sMonitor.queueFence(drawFence);
+    }
+    resultPromise->set_value(std::move(drawFence));
+}
+
+size_t SkiaRenderEngine::getMaxTextureSize() const {
+    return mGrContext->maxTextureSize();
+}
+
+size_t SkiaRenderEngine::getMaxViewportDims() const {
+    return mGrContext->maxRenderTargetSize();
+}
+
+void SkiaRenderEngine::drawShadow(SkCanvas* canvas,
+                                  const SkRRect& casterRRect,
+                                  const ShadowSettings& settings) {
+    ATRACE_CALL();
+    const float casterZ = settings.length / 2.0f;
+    const auto flags =
+            settings.casterIsTranslucent ? kTransparentOccluder_ShadowFlag : kNone_ShadowFlag;
+
+    SkShadowUtils::DrawShadow(canvas, SkPath::RRect(casterRRect), SkPoint3::Make(0, 0, casterZ),
+                              getSkPoint3(settings.lightPos), settings.lightRadius,
+                              getSkColor(settings.ambientColor), getSkColor(settings.spotColor),
+                              flags);
+}
+
+void SkiaRenderEngine::onActiveDisplaySizeChanged(ui::Size size) {
+    // This cache multiplier was selected based on review of cache sizes relative
+    // to the screen resolution. Looking at the worst case memory needed by blur (~1.5x),
+    // shadows (~1x), and general data structures (e.g. vertex buffers) we selected this as a
+    // conservative default based on that analysis.
+    const float SURFACE_SIZE_MULTIPLIER = 3.5f * bytesPerPixel(mDefaultPixelFormat);
+    const int maxResourceBytes = size.width * size.height * SURFACE_SIZE_MULTIPLIER;
+
+    // start by resizing the current context
+    getActiveGrContext()->setResourceCacheLimit(maxResourceBytes);
+
+    // if it is possible to switch contexts then we will resize the other context
+    const bool originalProtectedState = mInProtectedContext;
+    useProtectedContext(!mInProtectedContext);
+    if (mInProtectedContext != originalProtectedState) {
+        getActiveGrContext()->setResourceCacheLimit(maxResourceBytes);
+        // reset back to the initial context that was active when this method was called
+        useProtectedContext(originalProtectedState);
+    }
+}
+
+void SkiaRenderEngine::dump(std::string& result) {
+    // Dump for the specific backend (GLES or Vk)
+    appendBackendSpecificInfoToDump(result);
+
+    // Info about protected content
+    StringAppendF(&result, "RenderEngine supports protected context: %d\n",
+                  supportsProtectedContent());
+    StringAppendF(&result, "RenderEngine is in protected context: %d\n", mInProtectedContext);
+    StringAppendF(&result, "RenderEngine shaders cached since last dump/primeCache: %d\n",
+                  mSkSLCacheMonitor.shadersCachedSinceLastCall());
+
+    std::vector<ResourcePair> cpuResourceMap = {
+            {"skia/sk_resource_cache/bitmap_", "Bitmaps"},
+            {"skia/sk_resource_cache/rrect-blur_", "Masks"},
+            {"skia/sk_resource_cache/rects-blur_", "Masks"},
+            {"skia/sk_resource_cache/tessellated", "Shadows"},
+            {"skia", "Other"},
+    };
+    SkiaMemoryReporter cpuReporter(cpuResourceMap, false);
+    SkGraphics::DumpMemoryStatistics(&cpuReporter);
+    StringAppendF(&result, "Skia CPU Caches: ");
+    cpuReporter.logTotals(result);
+    cpuReporter.logOutput(result);
+
+    {
+        std::lock_guard<std::mutex> lock(mRenderingMutex);
+
+        std::vector<ResourcePair> gpuResourceMap = {
+                {"texture_renderbuffer", "Texture/RenderBuffer"},
+                {"texture", "Texture"},
+                {"gr_text_blob_cache", "Text"},
+                {"skia", "Other"},
+        };
+        SkiaMemoryReporter gpuReporter(gpuResourceMap, true);
+        mGrContext->dumpMemoryStatistics(&gpuReporter);
+        StringAppendF(&result, "Skia's GPU Caches: ");
+        gpuReporter.logTotals(result);
+        gpuReporter.logOutput(result);
+        StringAppendF(&result, "Skia's Wrapped Objects:\n");
+        gpuReporter.logOutput(result, true);
+
+        StringAppendF(&result, "RenderEngine tracked buffers: %zu\n",
+                      mGraphicBufferExternalRefs.size());
+        StringAppendF(&result, "Dumping buffer ids...\n");
+        for (const auto& [id, refCounts] : mGraphicBufferExternalRefs) {
+            StringAppendF(&result, "- 0x%" PRIx64 " - %d refs \n", id, refCounts);
+        }
+        StringAppendF(&result, "RenderEngine AHB/BackendTexture cache size: %zu\n",
+                      mTextureCache.size());
+        StringAppendF(&result, "Dumping buffer ids...\n");
+        // TODO(178539829): It would be nice to know which layer these are coming from and what
+        // the texture sizes are.
+        for (const auto& [id, unused] : mTextureCache) {
+            StringAppendF(&result, "- 0x%" PRIx64 "\n", id);
+        }
+        StringAppendF(&result, "\n");
+
+        SkiaMemoryReporter gpuProtectedReporter(gpuResourceMap, true);
+        if (mProtectedGrContext) {
+            mProtectedGrContext->dumpMemoryStatistics(&gpuProtectedReporter);
+        }
+        StringAppendF(&result, "Skia's GPU Protected Caches: ");
+        gpuProtectedReporter.logTotals(result);
+        gpuProtectedReporter.logOutput(result);
+        StringAppendF(&result, "Skia's Protected Wrapped Objects:\n");
+        gpuProtectedReporter.logOutput(result, true);
+
+        StringAppendF(&result, "\n");
+        StringAppendF(&result, "RenderEngine runtime effects: %zu\n", mRuntimeEffects.size());
+        for (const auto& [linearEffect, unused] : mRuntimeEffects) {
+            StringAppendF(&result, "- inputDataspace: %s\n",
+                          dataspaceDetails(
+                                  static_cast<android_dataspace>(linearEffect.inputDataspace))
+                                  .c_str());
+            StringAppendF(&result, "- outputDataspace: %s\n",
+                          dataspaceDetails(
+                                  static_cast<android_dataspace>(linearEffect.outputDataspace))
+                                  .c_str());
+            StringAppendF(&result, "undoPremultipliedAlpha: %s\n",
+                          linearEffect.undoPremultipliedAlpha ? "true" : "false");
+        }
+    }
+    StringAppendF(&result, "\n");
+}
+
 } // namespace skia
 } // namespace renderengine
 } // namespace android
diff --git a/libs/renderengine/skia/SkiaRenderEngine.h b/libs/renderengine/skia/SkiaRenderEngine.h
index 160a186..6457bfa 100644
--- a/libs/renderengine/skia/SkiaRenderEngine.h
+++ b/libs/renderengine/skia/SkiaRenderEngine.h
@@ -20,6 +20,31 @@
 #include <renderengine/RenderEngine.h>
 #include <sys/types.h>
 
+#include <GrBackendSemaphore.h>
+#include <GrDirectContext.h>
+#include <SkSurface.h>
+#include <android-base/thread_annotations.h>
+#include <renderengine/ExternalTexture.h>
+#include <renderengine/RenderEngine.h>
+#include <sys/types.h>
+
+#include <mutex>
+#include <unordered_map>
+
+#include "AutoBackendTexture.h"
+#include "GrContextOptions.h"
+#include "SkImageInfo.h"
+#include "SkiaRenderEngine.h"
+#include "android-base/macros.h"
+#include "debug/SkiaCapture.h"
+#include "filters/BlurFilter.h"
+#include "filters/LinearEffect.h"
+#include "filters/StretchShaderFactory.h"
+
+class SkData;
+
+struct SkPoint3;
+
 namespace android {
 
 namespace renderengine {
@@ -31,35 +56,147 @@
 
 class BlurFilter;
 
-// TODO: Put common skia stuff here that can be shared between the GL & Vulkan backends
-// Currently mostly just handles all the no-op / missing APIs
 class SkiaRenderEngine : public RenderEngine {
 public:
     static std::unique_ptr<SkiaRenderEngine> create(const RenderEngineCreationArgs& args);
-    SkiaRenderEngine(RenderEngineType type);
-    ~SkiaRenderEngine() override {}
+    SkiaRenderEngine(RenderEngineType type,
+                     PixelFormat pixelFormat,
+                     bool useColorManagement,
+                     bool supportsBackgroundBlur);
+    ~SkiaRenderEngine() override;
 
-    virtual std::future<void> primeCache() override { return {}; };
-    virtual void genTextures(size_t /*count*/, uint32_t* /*names*/) override{};
-    virtual void deleteTextures(size_t /*count*/, uint32_t const* /*names*/) override{};
-    virtual bool isProtected() const override { return false; } // mInProtectedContext; }
-    virtual bool supportsProtectedContent() const override { return false; };
-    virtual int getContextPriority() override { return 0; }
-    virtual int reportShadersCompiled() { return 0; }
-    virtual void setEnableTracing(bool tracingEnabled) override;
+    std::future<void> primeCache() override final;
+    void cleanupPostRender() override final;
+    void cleanFramebufferCache() override final{ }
+    bool supportsBackgroundBlur() override final {
+        return mBlurFilter != nullptr;
+    }
+    void onActiveDisplaySizeChanged(ui::Size size) override final;
+    int reportShadersCompiled();
 
+    virtual void genTextures(size_t /*count*/, uint32_t* /*names*/) override final{};
+    virtual void deleteTextures(size_t /*count*/, uint32_t const* /*names*/) override final{};
+    virtual void setEnableTracing(bool tracingEnabled) override final;
+
+    void useProtectedContext(bool useProtectedContext) override;
+    bool supportsProtectedContent() const override {
+        return supportsProtectedContentImpl();
+    }
+    void ensureGrContextsCreated();
 protected:
-    virtual void mapExternalTextureBuffer(const sp<GraphicBuffer>& /*buffer*/,
-                                          bool /*isRenderable*/) override = 0;
-    virtual void unmapExternalTextureBuffer(const sp<GraphicBuffer>& /*buffer*/) override = 0;
+    // This is so backends can stop the generic rendering state first before
+    // cleaning up backend-specific state
+    void finishRenderingAndAbandonContext();
 
-    virtual void drawLayersInternal(
-            const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
-            const DisplaySettings& display, const std::vector<LayerSettings>& layers,
-            const std::shared_ptr<ExternalTexture>& buffer, const bool useFramebufferCache,
-            base::unique_fd&& bufferFence) override {
-        resultPromise->set_value({NO_ERROR, base::unique_fd()});
+    // Functions that a given backend (GLES, Vulkan) must implement
+    using Contexts = std::pair<sk_sp<GrDirectContext>, sk_sp<GrDirectContext>>;
+    virtual Contexts createDirectContexts(const GrContextOptions& options) = 0;
+    virtual bool supportsProtectedContentImpl() const = 0;
+    virtual bool useProtectedContextImpl(GrProtected isProtected) = 0;
+    virtual void waitFence(GrDirectContext* grContext, base::borrowed_fd fenceFd) = 0;
+    virtual base::unique_fd flushAndSubmit(GrDirectContext* context) = 0;
+    virtual void appendBackendSpecificInfoToDump(std::string& result) = 0;
+
+    size_t getMaxTextureSize() const override final;
+    size_t getMaxViewportDims() const override final;
+    GrDirectContext* getActiveGrContext();
+
+    bool isProtected() const { return mInProtectedContext; }
+
+    // Implements PersistentCache as a way to monitor what SkSL shaders Skia has
+    // cached.
+    class SkSLCacheMonitor : public GrContextOptions::PersistentCache {
+    public:
+        SkSLCacheMonitor() = default;
+        ~SkSLCacheMonitor() override = default;
+
+        sk_sp<SkData> load(const SkData& key) override;
+
+        void store(const SkData& key, const SkData& data, const SkString& description) override;
+
+        int shadersCachedSinceLastCall() {
+            const int shadersCachedSinceLastCall = mShadersCachedSinceLastCall;
+            mShadersCachedSinceLastCall = 0;
+            return shadersCachedSinceLastCall;
+        }
+
+        int totalShadersCompiled() const { return mTotalShadersCompiled; }
+
+    private:
+        int mShadersCachedSinceLastCall = 0;
+        int mTotalShadersCompiled = 0;
     };
+
+private:
+    void mapExternalTextureBuffer(const sp<GraphicBuffer>& buffer,
+                                  bool isRenderable) override final;
+    void unmapExternalTextureBuffer(sp<GraphicBuffer>&& buffer) override final;
+    bool canSkipPostRenderCleanup() const override final;
+
+    std::shared_ptr<AutoBackendTexture::LocalRef> getOrCreateBackendTexture(
+            const sp<GraphicBuffer>& buffer, bool isOutputBuffer) REQUIRES(mRenderingMutex);
+    void initCanvas(SkCanvas* canvas, const DisplaySettings& display);
+    void drawShadow(SkCanvas* canvas, const SkRRect& casterRRect,
+                    const ShadowSettings& shadowSettings);
+    void drawLayersInternal(const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
+                            const DisplaySettings& display,
+                            const std::vector<LayerSettings>& layers,
+                            const std::shared_ptr<ExternalTexture>& buffer,
+                            const bool useFramebufferCache,
+                            base::unique_fd&& bufferFence) override final;
+
+    void dump(std::string& result) override final;
+
+    // If requiresLinearEffect is true or the layer has a stretchEffect a new shader is returned.
+    // Otherwise it returns the input shader.
+    struct RuntimeEffectShaderParameters {
+        sk_sp<SkShader> shader;
+        const LayerSettings& layer;
+        const DisplaySettings& display;
+        bool undoPremultipliedAlpha;
+        bool requiresLinearEffect;
+        float layerDimmingRatio;
+        const ui::Dataspace outputDataSpace;
+    };
+    sk_sp<SkShader> createRuntimeEffectShader(const RuntimeEffectShaderParameters&);
+
+    const PixelFormat mDefaultPixelFormat;
+    const bool mUseColorManagement;
+
+    // Identifier used for various mappings of layers to various
+    // textures or shaders
+    using GraphicBufferId = uint64_t;
+
+    // Number of external holders of ExternalTexture references, per GraphicBuffer ID.
+    std::unordered_map<GraphicBufferId, int32_t> mGraphicBufferExternalRefs
+            GUARDED_BY(mRenderingMutex);
+    // For GL, this cache is shared between protected and unprotected contexts. For Vulkan, it is
+    // only used for the unprotected context, because Vulkan does not allow sharing between
+    // contexts, and protected is less common.
+    std::unordered_map<GraphicBufferId, std::shared_ptr<AutoBackendTexture::LocalRef>> mTextureCache
+            GUARDED_BY(mRenderingMutex);
+    std::unordered_map<shaders::LinearEffect, sk_sp<SkRuntimeEffect>, shaders::LinearEffectHasher>
+            mRuntimeEffects;
+    AutoBackendTexture::CleanupManager mTextureCleanupMgr GUARDED_BY(mRenderingMutex);
+
+    StretchShaderFactory mStretchShaderFactory;
+
+    sp<Fence> mLastDrawFence;
+    BlurFilter* mBlurFilter = nullptr;
+
+    // Object to capture commands send to Skia.
+    std::unique_ptr<SkiaCapture> mCapture;
+
+    // Mutex guarding rendering operations, so that internal state related to
+    // rendering that is potentially modified by multiple threads is guaranteed thread-safe.
+    mutable std::mutex mRenderingMutex;
+    SkSLCacheMonitor mSkSLCacheMonitor;
+
+    // Graphics context used for creating surfaces and submitting commands
+    sk_sp<GrDirectContext> mGrContext;
+    // Same as above, but for protected content (eg. DRM)
+    sk_sp<GrDirectContext> mProtectedGrContext;
+    bool mInProtectedContext = false;
 };
 
 } // namespace skia
diff --git a/libs/renderengine/skia/SkiaVkRenderEngine.cpp b/libs/renderengine/skia/SkiaVkRenderEngine.cpp
new file mode 100644
index 0000000..b99e385
--- /dev/null
+++ b/libs/renderengine/skia/SkiaVkRenderEngine.cpp
@@ -0,0 +1,711 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#undef LOG_TAG
+#define LOG_TAG "RenderEngine"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "SkiaVkRenderEngine.h"
+
+#include <GrBackendSemaphore.h>
+#include <GrContextOptions.h>
+#include <vk/GrVkExtensions.h>
+#include <vk/GrVkTypes.h>
+
+#include <android-base/stringprintf.h>
+#include <gui/TraceUtils.h>
+#include <sync/sync.h>
+#include <utils/Trace.h>
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include <vulkan/vulkan.h>
+#include "log/log_main.h"
+
+namespace android {
+namespace renderengine {
+
+struct VulkanFuncs {
+    PFN_vkCreateSemaphore vkCreateSemaphore = nullptr;
+    PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR = nullptr;
+    PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR = nullptr;
+    PFN_vkDestroySemaphore vkDestroySemaphore = nullptr;
+
+    PFN_vkDeviceWaitIdle vkDeviceWaitIdle = nullptr;
+    PFN_vkDestroyDevice vkDestroyDevice = nullptr;
+    PFN_vkDestroyInstance vkDestroyInstance = nullptr;
+};
+
+// Ref-Count a semaphore
+struct DestroySemaphoreInfo {
+    VkSemaphore mSemaphore;
+    // We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
+    // (including by the GPU) and inside SkiaVkRenderEngine. So we always start with two refs, one
+    // owned by Skia and one owned by the SkiaVkRenderEngine. The refs are decremented each time
+    // delete_semaphore* is called with this object. Skia will call destroy_semaphore* once it is
+    // done with the semaphore and the GPU has finished work on the semaphore. SkiaVkRenderEngine
+    // calls delete_semaphore* after sending the semaphore to Skia and exporting it if need be.
+    int mRefs = 2;
+
+    DestroySemaphoreInfo(VkSemaphore semaphore) : mSemaphore(semaphore) {}
+};
+
+struct VulkanInterface {
+    bool initialized = false;
+    VkInstance instance;
+    VkPhysicalDevice physicalDevice;
+    VkDevice device;
+    VkQueue queue;
+    int queueIndex;
+    uint32_t apiVersion;
+    GrVkExtensions grExtensions;
+    VkPhysicalDeviceFeatures2* physicalDeviceFeatures2 = nullptr;
+    VkPhysicalDeviceSamplerYcbcrConversionFeatures* samplerYcbcrConversionFeatures = nullptr;
+    VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
+    GrVkGetProc grGetProc;
+    bool isProtected;
+    bool isRealtimePriority;
+
+    VulkanFuncs funcs;
+
+    std::vector<std::string> instanceExtensionNames;
+    std::vector<std::string> deviceExtensionNames;
+
+    GrVkBackendContext getBackendContext() {
+        GrVkBackendContext backendContext;
+        backendContext.fInstance = instance;
+        backendContext.fPhysicalDevice = physicalDevice;
+        backendContext.fDevice = device;
+        backendContext.fQueue = queue;
+        backendContext.fGraphicsQueueIndex = queueIndex;
+        backendContext.fMaxAPIVersion = apiVersion;
+        backendContext.fVkExtensions = &grExtensions;
+        backendContext.fDeviceFeatures2 = physicalDeviceFeatures2;
+        backendContext.fGetProc = grGetProc;
+        backendContext.fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
+        return backendContext;
+    };
+
+    VkSemaphore createExportableSemaphore() {
+        VkExportSemaphoreCreateInfo exportInfo;
+        exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
+        exportInfo.pNext = nullptr;
+        exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+        VkSemaphoreCreateInfo semaphoreInfo;
+        semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+        semaphoreInfo.pNext = &exportInfo;
+        semaphoreInfo.flags = 0;
+
+        VkSemaphore semaphore;
+        VkResult err = funcs.vkCreateSemaphore(device, &semaphoreInfo, nullptr, &semaphore);
+        if (VK_SUCCESS != err) {
+            ALOGE("%s: failed to create semaphore. err %d\n", __func__, err);
+            return VK_NULL_HANDLE;
+        }
+
+        return semaphore;
+    }
+
+    // syncFd cannot be <= 0
+    VkSemaphore importSemaphoreFromSyncFd(int syncFd) {
+        VkSemaphoreCreateInfo semaphoreInfo;
+        semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+        semaphoreInfo.pNext = nullptr;
+        semaphoreInfo.flags = 0;
+
+        VkSemaphore semaphore;
+        VkResult err = funcs.vkCreateSemaphore(device, &semaphoreInfo, nullptr, &semaphore);
+        if (VK_SUCCESS != err) {
+            ALOGE("%s: failed to create import semaphore", __func__);
+            return VK_NULL_HANDLE;
+        }
+
+        VkImportSemaphoreFdInfoKHR importInfo;
+        importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
+        importInfo.pNext = nullptr;
+        importInfo.semaphore = semaphore;
+        importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
+        importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+        importInfo.fd = syncFd;
+
+        err = funcs.vkImportSemaphoreFdKHR(device, &importInfo);
+        if (VK_SUCCESS != err) {
+            funcs.vkDestroySemaphore(device, semaphore, nullptr);
+            ALOGE("%s: failed to import semaphore", __func__);
+            return VK_NULL_HANDLE;
+        }
+
+        return semaphore;
+    }
+
+    int exportSemaphoreSyncFd(VkSemaphore semaphore) {
+        int res;
+
+        VkSemaphoreGetFdInfoKHR getFdInfo;
+        getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
+        getFdInfo.pNext = nullptr;
+        getFdInfo.semaphore = semaphore;
+        getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+
+        VkResult err = funcs.vkGetSemaphoreFdKHR(device, &getFdInfo, &res);
+        if (VK_SUCCESS != err) {
+            ALOGE("%s: failed to export semaphore, err: %d", __func__, err);
+            return -1;
+        }
+        return res;
+    }
+
+    void destroySemaphore(VkSemaphore semaphore) {
+        funcs.vkDestroySemaphore(device, semaphore, nullptr);
+    }
+};
+
+static GrVkGetProc sGetProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
+    if (device != VK_NULL_HANDLE) {
+        return vkGetDeviceProcAddr(device, proc_name);
+    }
+    return vkGetInstanceProcAddr(instance, proc_name);
+};
+
+#define BAIL(fmt, ...)                                          \
+    {                                                           \
+        ALOGE("%s: " fmt ", bailing", __func__, ##__VA_ARGS__); \
+        return interface;                                       \
+    }
+
+#define CHECK_NONNULL(expr)       \
+    if ((expr) == nullptr) {      \
+        BAIL("[%s] null", #expr); \
+    }
+
+#define VK_CHECK(expr)                              \
+    if ((expr) != VK_SUCCESS) {                     \
+        BAIL("[%s] failed. err = %d", #expr, expr); \
+        return interface;                           \
+    }
+
+#define VK_GET_PROC(F)                                                           \
+    PFN_vk##F vk##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F); \
+    CHECK_NONNULL(vk##F)
+#define VK_GET_INST_PROC(instance, F)                                      \
+    PFN_vk##F vk##F = (PFN_vk##F)vkGetInstanceProcAddr(instance, "vk" #F); \
+    CHECK_NONNULL(vk##F)
+#define VK_GET_DEV_PROC(device, F)                                     \
+    PFN_vk##F vk##F = (PFN_vk##F)vkGetDeviceProcAddr(device, "vk" #F); \
+    CHECK_NONNULL(vk##F)
+
+VulkanInterface initVulkanInterface(bool protectedContent = false) {
+    VulkanInterface interface;
+
+    VK_GET_PROC(EnumerateInstanceVersion);
+    uint32_t instanceVersion;
+    VK_CHECK(vkEnumerateInstanceVersion(&instanceVersion));
+
+    if (instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
+        return interface;
+    }
+
+    const VkApplicationInfo appInfo = {
+            VK_STRUCTURE_TYPE_APPLICATION_INFO, nullptr, "surfaceflinger", 0, "android platform", 0,
+            VK_MAKE_VERSION(1, 1, 0),
+    };
+
+    VK_GET_PROC(EnumerateInstanceExtensionProperties);
+
+    uint32_t extensionCount = 0;
+    VK_CHECK(vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr));
+    std::vector<VkExtensionProperties> instanceExtensions(extensionCount);
+    VK_CHECK(vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
+                                                    instanceExtensions.data()));
+    std::vector<const char*> enabledInstanceExtensionNames;
+    enabledInstanceExtensionNames.reserve(instanceExtensions.size());
+    interface.instanceExtensionNames.reserve(instanceExtensions.size());
+    for (const auto& instExt : instanceExtensions) {
+        enabledInstanceExtensionNames.push_back(instExt.extensionName);
+        interface.instanceExtensionNames.push_back(instExt.extensionName);
+    }
+
+    const VkInstanceCreateInfo instanceCreateInfo = {
+            VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+            nullptr,
+            0,
+            &appInfo,
+            0,
+            nullptr,
+            (uint32_t)enabledInstanceExtensionNames.size(),
+            enabledInstanceExtensionNames.data(),
+    };
+
+    VK_GET_PROC(CreateInstance);
+    VkInstance instance;
+    VK_CHECK(vkCreateInstance(&instanceCreateInfo, nullptr, &instance));
+
+    VK_GET_INST_PROC(instance, DestroyInstance);
+    interface.funcs.vkDestroyInstance = vkDestroyInstance;
+    VK_GET_INST_PROC(instance, EnumeratePhysicalDevices);
+    VK_GET_INST_PROC(instance, EnumerateDeviceExtensionProperties);
+    VK_GET_INST_PROC(instance, GetPhysicalDeviceProperties2);
+    VK_GET_INST_PROC(instance, GetPhysicalDeviceExternalSemaphoreProperties);
+    VK_GET_INST_PROC(instance, GetPhysicalDeviceQueueFamilyProperties);
+    VK_GET_INST_PROC(instance, GetPhysicalDeviceFeatures2);
+    VK_GET_INST_PROC(instance, CreateDevice);
+
+    uint32_t physdevCount;
+    VK_CHECK(vkEnumeratePhysicalDevices(instance, &physdevCount, nullptr));
+    if (physdevCount == 0) {
+        BAIL("Could not find any physical devices");
+    }
+
+    physdevCount = 1;
+    VkPhysicalDevice physicalDevice;
+    VkResult enumeratePhysDevsErr =
+            vkEnumeratePhysicalDevices(instance, &physdevCount, &physicalDevice);
+    if (enumeratePhysDevsErr != VK_SUCCESS && VK_INCOMPLETE != enumeratePhysDevsErr) {
+        BAIL("vkEnumeratePhysicalDevices failed with non-VK_INCOMPLETE error: %d",
+             enumeratePhysDevsErr);
+    }
+
+    VkPhysicalDeviceProperties2 physDevProps = {
+            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
+            0,
+            {},
+    };
+    VkPhysicalDeviceProtectedMemoryProperties protMemProps = {
+            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES,
+            0,
+            {},
+    };
+
+    if (protectedContent) {
+        physDevProps.pNext = &protMemProps;
+    }
+
+    vkGetPhysicalDeviceProperties2(physicalDevice, &physDevProps);
+    if (physDevProps.properties.apiVersion < VK_MAKE_VERSION(1, 1, 0)) {
+        BAIL("Could not find a Vulkan 1.1+ physical device");
+    }
+
+    // Check for syncfd support. Bail if we cannot both import and export them.
+    VkPhysicalDeviceExternalSemaphoreInfo semInfo = {
+            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
+            nullptr,
+            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+    };
+    VkExternalSemaphoreProperties semProps = {
+            VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, nullptr, 0, 0, 0,
+    };
+    vkGetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semInfo, &semProps);
+
+    bool sufficientSemaphoreSyncFdSupport = (semProps.exportFromImportedHandleTypes &
+                                             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) &&
+            (semProps.compatibleHandleTypes & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) &&
+            (semProps.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT) &&
+            (semProps.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT);
+
+    if (!sufficientSemaphoreSyncFdSupport) {
+        BAIL("Vulkan device does not support sufficient external semaphore sync fd features. "
+             "exportFromImportedHandleTypes 0x%x (needed 0x%x) "
+             "compatibleHandleTypes 0x%x (needed 0x%x) "
+             "externalSemaphoreFeatures 0x%x (needed 0x%x) ",
+             semProps.exportFromImportedHandleTypes, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+             semProps.compatibleHandleTypes, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+             semProps.externalSemaphoreFeatures,
+             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+                     VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT);
+    } else {
+        ALOGD("Vulkan device supports sufficient external semaphore sync fd features. "
+              "exportFromImportedHandleTypes 0x%x (needed 0x%x) "
+              "compatibleHandleTypes 0x%x (needed 0x%x) "
+              "externalSemaphoreFeatures 0x%x (needed 0x%x) ",
+              semProps.exportFromImportedHandleTypes, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+              semProps.compatibleHandleTypes, VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+              semProps.externalSemaphoreFeatures,
+              VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+                      VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT);
+    }
+
+    uint32_t queueCount;
+    vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueCount, nullptr);
+    if (queueCount == 0) {
+        BAIL("Could not find queues for physical device");
+    }
+
+    std::vector<VkQueueFamilyProperties> queueProps(queueCount);
+    vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, &queueCount, queueProps.data());
+
+    int graphicsQueueIndex = -1;
+    for (uint32_t i = 0; i < queueCount; ++i) {
+        if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+            graphicsQueueIndex = i;
+            break;
+        }
+    }
+
+    if (graphicsQueueIndex == -1) {
+        BAIL("Could not find a graphics queue family");
+    }
+
+    uint32_t deviceExtensionCount;
+    VK_CHECK(vkEnumerateDeviceExtensionProperties(physicalDevice, nullptr, &deviceExtensionCount,
+                                                  nullptr));
+    std::vector<VkExtensionProperties> deviceExtensions(deviceExtensionCount);
+    VK_CHECK(vkEnumerateDeviceExtensionProperties(physicalDevice, nullptr, &deviceExtensionCount,
+                                                  deviceExtensions.data()));
+
+    std::vector<const char*> enabledDeviceExtensionNames;
+    enabledDeviceExtensionNames.reserve(deviceExtensions.size());
+    interface.deviceExtensionNames.reserve(deviceExtensions.size());
+    for (const auto& devExt : deviceExtensions) {
+        enabledDeviceExtensionNames.push_back(devExt.extensionName);
+        interface.deviceExtensionNames.push_back(devExt.extensionName);
+    }
+
+    interface.grExtensions.init(sGetProc, instance, physicalDevice,
+                                enabledInstanceExtensionNames.size(),
+                                enabledInstanceExtensionNames.data(),
+                                enabledDeviceExtensionNames.size(),
+                                enabledDeviceExtensionNames.data());
+
+    if (!interface.grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1)) {
+        BAIL("Vulkan driver doesn't support external semaphore fd");
+    }
+
+    interface.physicalDeviceFeatures2 = new VkPhysicalDeviceFeatures2;
+    interface.physicalDeviceFeatures2->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+    interface.physicalDeviceFeatures2->pNext = nullptr;
+
+    interface.samplerYcbcrConversionFeatures = new VkPhysicalDeviceSamplerYcbcrConversionFeatures;
+    interface.samplerYcbcrConversionFeatures->sType =
+            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
+    interface.samplerYcbcrConversionFeatures->pNext = nullptr;
+
+    interface.physicalDeviceFeatures2->pNext = interface.samplerYcbcrConversionFeatures;
+    void** tailPnext = &interface.samplerYcbcrConversionFeatures->pNext;
+
+    if (protectedContent) {
+        interface.protectedMemoryFeatures = new VkPhysicalDeviceProtectedMemoryFeatures;
+        interface.protectedMemoryFeatures->sType =
+                VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
+        interface.protectedMemoryFeatures->pNext = nullptr;
+        *tailPnext = interface.protectedMemoryFeatures;
+        tailPnext = &interface.protectedMemoryFeatures->pNext;
+    }
+
+    vkGetPhysicalDeviceFeatures2(physicalDevice, interface.physicalDeviceFeatures2);
+    // Looks like this would slow things down and we can't depend on it on all platforms
+    interface.physicalDeviceFeatures2->features.robustBufferAccess = VK_FALSE;
+
+    float queuePriorities[1] = {0.0f};
+    void* queueNextPtr = nullptr;
+
+    VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo = {
+            VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT,
+            nullptr,
+            // If queue priority is supported, RE should always have realtime priority.
+            VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT,
+    };
+
+    if (interface.grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
+        queueNextPtr = &queuePriorityCreateInfo;
+        interface.isRealtimePriority = true;
+    }
+
+    VkDeviceQueueCreateFlags deviceQueueCreateFlags =
+            (VkDeviceQueueCreateFlags)(protectedContent ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0);
+
+    const VkDeviceQueueCreateInfo queueInfo = {
+            VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+            queueNextPtr,
+            deviceQueueCreateFlags,
+            (uint32_t)graphicsQueueIndex,
+            1,
+            queuePriorities,
+    };
+
+    const VkDeviceCreateInfo deviceInfo = {
+            VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+            interface.physicalDeviceFeatures2,
+            0,
+            1,
+            &queueInfo,
+            0,
+            nullptr,
+            (uint32_t)enabledDeviceExtensionNames.size(),
+            enabledDeviceExtensionNames.data(),
+            nullptr,
+    };
+
+    ALOGD("Trying to create Vk device with protectedContent=%d", protectedContent);
+    VkDevice device;
+    VK_CHECK(vkCreateDevice(physicalDevice, &deviceInfo, nullptr, &device));
+    ALOGD("Trying to create Vk device with protectedContent=%d (success)", protectedContent);
+
+    VkQueue graphicsQueue;
+    VK_GET_DEV_PROC(device, GetDeviceQueue2);
+    const VkDeviceQueueInfo2 deviceQueueInfo2 = {VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, nullptr,
+                                                 deviceQueueCreateFlags,
+                                                 (uint32_t)graphicsQueueIndex, 0};
+    vkGetDeviceQueue2(device, &deviceQueueInfo2, &graphicsQueue);
+
+    VK_GET_DEV_PROC(device, DeviceWaitIdle);
+    VK_GET_DEV_PROC(device, DestroyDevice);
+    interface.funcs.vkDeviceWaitIdle = vkDeviceWaitIdle;
+    interface.funcs.vkDestroyDevice = vkDestroyDevice;
+
+    VK_GET_DEV_PROC(device, CreateSemaphore);
+    VK_GET_DEV_PROC(device, ImportSemaphoreFdKHR);
+    VK_GET_DEV_PROC(device, GetSemaphoreFdKHR);
+    VK_GET_DEV_PROC(device, DestroySemaphore);
+    interface.funcs.vkCreateSemaphore = vkCreateSemaphore;
+    interface.funcs.vkImportSemaphoreFdKHR = vkImportSemaphoreFdKHR;
+    interface.funcs.vkGetSemaphoreFdKHR = vkGetSemaphoreFdKHR;
+    interface.funcs.vkDestroySemaphore = vkDestroySemaphore;
+
+    // At this point, everything's succeeded and we can continue
+    interface.initialized = true;
+    interface.instance = instance;
+    interface.physicalDevice = physicalDevice;
+    interface.device = device;
+    interface.queue = graphicsQueue;
+    interface.queueIndex = graphicsQueueIndex;
+    interface.apiVersion = physDevProps.properties.apiVersion;
+    // grExtensions already constructed
+    // feature pointers already constructed
+    interface.grGetProc = sGetProc;
+    interface.isProtected = protectedContent;
+    // funcs already initialized
+
+    ALOGD("%s: Success init Vulkan interface", __func__);
+    return interface;
+}
+
+void teardownVulkanInterface(VulkanInterface* interface) {
+    interface->initialized = false;
+
+    if (interface->device != VK_NULL_HANDLE) {
+        interface->funcs.vkDeviceWaitIdle(interface->device);
+        interface->funcs.vkDestroyDevice(interface->device, nullptr);
+        interface->device = VK_NULL_HANDLE;
+    }
+    if (interface->instance != VK_NULL_HANDLE) {
+        interface->funcs.vkDestroyInstance(interface->instance, nullptr);
+        interface->instance = VK_NULL_HANDLE;
+    }
+
+    if (interface->protectedMemoryFeatures) {
+        delete interface->protectedMemoryFeatures;
+    }
+
+    if (interface->samplerYcbcrConversionFeatures) {
+        delete interface->samplerYcbcrConversionFeatures;
+    }
+
+    if (interface->physicalDeviceFeatures2) {
+        delete interface->physicalDeviceFeatures2;
+    }
+
+    interface->samplerYcbcrConversionFeatures = nullptr;
+    interface->physicalDeviceFeatures2 = nullptr;
+    interface->protectedMemoryFeatures = nullptr;
+}
+
+static VulkanInterface sVulkanInterface;
+static VulkanInterface sProtectedContentVulkanInterface;
+
+static void sSetupVulkanInterface() {
+    if (!sVulkanInterface.initialized) {
+        sVulkanInterface = initVulkanInterface(false /* no protected content */);
+        // We will have to abort if non-protected VkDevice creation fails (then nothing works).
+        LOG_ALWAYS_FATAL_IF(!sVulkanInterface.initialized,
+                            "Could not initialize Vulkan RenderEngine!");
+    }
+    if (!sProtectedContentVulkanInterface.initialized) {
+        sProtectedContentVulkanInterface = initVulkanInterface(true /* protected content */);
+        if (!sProtectedContentVulkanInterface.initialized) {
+            ALOGE("Could not initialize protected content Vulkan RenderEngine.");
+        }
+    }
+}
+
+namespace skia {
+
+using base::StringAppendF;
+
+bool SkiaVkRenderEngine::canSupportSkiaVkRenderEngine() {
+    VulkanInterface temp = initVulkanInterface(false /* no protected content */);
+    ALOGD("SkiaVkRenderEngine::canSupportSkiaVkRenderEngine(): initialized == %s.",
+          temp.initialized ? "true" : "false");
+    return temp.initialized;
+}
+
+std::unique_ptr<SkiaVkRenderEngine> SkiaVkRenderEngine::create(
+        const RenderEngineCreationArgs& args) {
+    std::unique_ptr<SkiaVkRenderEngine> engine(new SkiaVkRenderEngine(args));
+    engine->ensureGrContextsCreated();
+
+    if (sVulkanInterface.initialized) {
+        ALOGD("SkiaVkRenderEngine::%s: successfully initialized SkiaVkRenderEngine", __func__);
+        return engine;
+    } else {
+        ALOGD("SkiaVkRenderEngine::%s: could not create SkiaVkRenderEngine. "
+              "Likely insufficient Vulkan support",
+              __func__);
+        return {};
+    }
+}
+
+SkiaVkRenderEngine::SkiaVkRenderEngine(const RenderEngineCreationArgs& args)
+      : SkiaRenderEngine(args.renderEngineType, static_cast<PixelFormat>(args.pixelFormat),
+                         args.useColorManagement, args.supportsBackgroundBlur) {}
+
+SkiaVkRenderEngine::~SkiaVkRenderEngine() {
+    finishRenderingAndAbandonContext();
+}
+
+SkiaRenderEngine::Contexts SkiaVkRenderEngine::createDirectContexts(
+        const GrContextOptions& options) {
+    sSetupVulkanInterface();
+
+    SkiaRenderEngine::Contexts contexts;
+    contexts.first = GrDirectContext::MakeVulkan(sVulkanInterface.getBackendContext(), options);
+    if (supportsProtectedContentImpl()) {
+        contexts.second =
+                GrDirectContext::MakeVulkan(sProtectedContentVulkanInterface.getBackendContext(),
+                                            options);
+    }
+
+    return contexts;
+}
+
+bool SkiaVkRenderEngine::supportsProtectedContentImpl() const {
+    return sProtectedContentVulkanInterface.initialized;
+}
+
+bool SkiaVkRenderEngine::useProtectedContextImpl(GrProtected) {
+    return true;
+}
+
+static void delete_semaphore(void* semaphore) {
+    DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(semaphore);
+    --info->mRefs;
+    if (!info->mRefs) {
+        sVulkanInterface.destroySemaphore(info->mSemaphore);
+        delete info;
+    }
+}
+
+static void delete_semaphore_protected(void* semaphore) {
+    DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(semaphore);
+    --info->mRefs;
+    if (!info->mRefs) {
+        sProtectedContentVulkanInterface.destroySemaphore(info->mSemaphore);
+        delete info;
+    }
+}
+
+static VulkanInterface& getVulkanInterface(bool protectedContext) {
+    if (protectedContext) {
+        return sProtectedContentVulkanInterface;
+    }
+    return sVulkanInterface;
+}
+
+void SkiaVkRenderEngine::waitFence(GrDirectContext* grContext, base::borrowed_fd fenceFd) {
+    if (fenceFd.get() < 0) return;
+
+    int dupedFd = dup(fenceFd.get());
+    if (dupedFd < 0) {
+        ALOGE("failed to create duplicate fence fd: %d", dupedFd);
+        sync_wait(fenceFd.get(), -1);
+        return;
+    }
+
+    base::unique_fd fenceDup(dupedFd);
+    VkSemaphore waitSemaphore =
+            getVulkanInterface(isProtected()).importSemaphoreFromSyncFd(fenceDup.release());
+    GrBackendSemaphore beSemaphore;
+    beSemaphore.initVulkan(waitSemaphore);
+    grContext->wait(1, &beSemaphore, true /* delete after wait */);
+}
+
+base::unique_fd SkiaVkRenderEngine::flushAndSubmit(GrDirectContext* grContext) {
+    VulkanInterface& vi = getVulkanInterface(isProtected());
+    VkSemaphore semaphore = vi.createExportableSemaphore();
+
+    GrBackendSemaphore backendSemaphore;
+    backendSemaphore.initVulkan(semaphore);
+
+    GrFlushInfo flushInfo;
+    DestroySemaphoreInfo* destroySemaphoreInfo = nullptr;
+    if (semaphore != VK_NULL_HANDLE) {
+        destroySemaphoreInfo = new DestroySemaphoreInfo(semaphore);
+        flushInfo.fNumSemaphores = 1;
+        flushInfo.fSignalSemaphores = &backendSemaphore;
+        flushInfo.fFinishedProc = isProtected() ? delete_semaphore_protected : delete_semaphore;
+        flushInfo.fFinishedContext = destroySemaphoreInfo;
+    }
+    GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
+    grContext->submit(false /* no cpu sync */);
+    int drawFenceFd = -1;
+    if (semaphore != VK_NULL_HANDLE) {
+        if (GrSemaphoresSubmitted::kYes == submitted) {
+            drawFenceFd = vi.exportSemaphoreSyncFd(semaphore);
+        }
+        // Now that drawFenceFd has been created, we can delete our reference to this semaphore
+        flushInfo.fFinishedProc(destroySemaphoreInfo);
+    }
+    base::unique_fd res(drawFenceFd);
+    return res;
+}
+
+int SkiaVkRenderEngine::getContextPriority() {
+    // EGL_CONTEXT_PRIORITY_REALTIME_NV
+    constexpr int kRealtimePriority = 0x3357;
+    if (getVulkanInterface(isProtected()).isRealtimePriority) {
+        return kRealtimePriority;
+    } else {
+        return 0;
+    }
+}
+
+void SkiaVkRenderEngine::appendBackendSpecificInfoToDump(std::string& result) {
+    StringAppendF(&result, "\n ------------RE Vulkan----------\n");
+    StringAppendF(&result, "\n Vulkan device initialized: %d\n", sVulkanInterface.initialized);
+    StringAppendF(&result, "\n Vulkan protected device initialized: %d\n",
+                  sProtectedContentVulkanInterface.initialized);
+
+    if (!sVulkanInterface.initialized) {
+        return;
+    }
+
+    StringAppendF(&result, "\n Instance extensions:\n");
+    for (const auto& name : sVulkanInterface.instanceExtensionNames) {
+        StringAppendF(&result, "\n %s\n", name.c_str());
+    }
+
+    StringAppendF(&result, "\n Device extensions:\n");
+    for (const auto& name : sVulkanInterface.deviceExtensionNames) {
+        StringAppendF(&result, "\n %s\n", name.c_str());
+    }
+}
+
+} // namespace skia
+} // namespace renderengine
+} // namespace android
diff --git a/libs/renderengine/skia/SkiaVkRenderEngine.h b/libs/renderengine/skia/SkiaVkRenderEngine.h
new file mode 100644
index 0000000..2e0cf45
--- /dev/null
+++ b/libs/renderengine/skia/SkiaVkRenderEngine.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_SKIAVKRENDERENGINE_H_
+#define SF_SKIAVKRENDERENGINE_H_
+
+#include <vk/GrVkBackendContext.h>
+
+#include "SkiaRenderEngine.h"
+
+namespace android {
+namespace renderengine {
+namespace skia {
+
+class SkiaVkRenderEngine : public SkiaRenderEngine {
+public:
+    // Returns false if Vulkan implementation can't support SkiaVkRenderEngine.
+    static bool canSupportSkiaVkRenderEngine();
+    static std::unique_ptr<SkiaVkRenderEngine> create(const RenderEngineCreationArgs& args);
+    ~SkiaVkRenderEngine() override;
+
+    int getContextPriority() override;
+
+protected:
+    // Implementations of abstract SkiaRenderEngine functions specific to
+    // rendering backend
+    virtual SkiaRenderEngine::Contexts createDirectContexts(const GrContextOptions& options);
+    bool supportsProtectedContentImpl() const override;
+    bool useProtectedContextImpl(GrProtected isProtected) override;
+    void waitFence(GrDirectContext* grContext, base::borrowed_fd fenceFd) override;
+    base::unique_fd flushAndSubmit(GrDirectContext* context) override;
+    void appendBackendSpecificInfoToDump(std::string& result) override;
+
+private:
+    SkiaVkRenderEngine(const RenderEngineCreationArgs& args);
+    base::unique_fd flush();
+
+    GrVkBackendContext mBackendContext;
+};
+
+} // namespace skia
+} // namespace renderengine
+} // namespace android
+
+#endif
diff --git a/libs/renderengine/skia/debug/SkiaCapture.cpp b/libs/renderengine/skia/debug/SkiaCapture.cpp
index 856fff4..b21b01c 100644
--- a/libs/renderengine/skia/debug/SkiaCapture.cpp
+++ b/libs/renderengine/skia/debug/SkiaCapture.cpp
@@ -27,6 +27,9 @@
 #include <utils/Trace.h>
 
 #include "CommonPool.h"
+#include "SkCanvas.h"
+#include "SkRect.h"
+#include "SkTypeface.h"
 #include "src/utils/SkMultiPictureDocument.h"
 
 namespace android {
diff --git a/libs/renderengine/skia/debug/SkiaCapture.h b/libs/renderengine/skia/debug/SkiaCapture.h
index f194629..d65a579 100644
--- a/libs/renderengine/skia/debug/SkiaCapture.h
+++ b/libs/renderengine/skia/debug/SkiaCapture.h
@@ -19,13 +19,15 @@
 #include <SkDocument.h>
 #include <SkNWayCanvas.h>
 #include <SkPictureRecorder.h>
+#include <SkRefCnt.h>
+#include <SkStream.h>
 #include <SkSurface.h>
+#include "tools/SkSharingProc.h"
 
 #include <chrono>
 #include <mutex>
 
 #include "CaptureTimer.h"
-#include "tools/SkSharingProc.h"
 
 namespace android {
 namespace renderengine {
diff --git a/libs/renderengine/skia/filters/BlurFilter.cpp b/libs/renderengine/skia/filters/BlurFilter.cpp
index 63cc02b..2557ac9 100644
--- a/libs/renderengine/skia/filters/BlurFilter.cpp
+++ b/libs/renderengine/skia/filters/BlurFilter.cpp
@@ -17,7 +17,6 @@
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 #include "BlurFilter.h"
 #include <SkCanvas.h>
-#include <SkData.h>
 #include <SkPaint.h>
 #include <SkRRect.h>
 #include <SkRuntimeEffect.h>
diff --git a/libs/renderengine/skia/filters/GaussianBlurFilter.cpp b/libs/renderengine/skia/filters/GaussianBlurFilter.cpp
index 55867a9..511d7c9 100644
--- a/libs/renderengine/skia/filters/GaussianBlurFilter.cpp
+++ b/libs/renderengine/skia/filters/GaussianBlurFilter.cpp
@@ -18,7 +18,6 @@
 
 #include "GaussianBlurFilter.h"
 #include <SkCanvas.h>
-#include <SkData.h>
 #include <SkPaint.h>
 #include <SkRRect.h>
 #include <SkRuntimeEffect.h>
@@ -26,6 +25,7 @@
 #include <SkSize.h>
 #include <SkString.h>
 #include <SkSurface.h>
+#include "include/gpu/GpuTypes.h" // from Skia
 #include <log/log.h>
 #include <utils/Trace.h>
 
@@ -45,7 +45,8 @@
     // Create blur surface with the bit depth and colorspace of the original surface
     SkImageInfo scaledInfo = input->imageInfo().makeWH(std::ceil(blurRect.width() * kInputScale),
                                                        std::ceil(blurRect.height() * kInputScale));
-    sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(context, SkBudgeted::kNo, scaledInfo);
+    sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(context,
+                                                           skgpu::Budgeted::kNo, scaledInfo);
 
     SkPaint paint;
     paint.setBlendMode(SkBlendMode::kSrc);
diff --git a/libs/renderengine/skia/filters/KawaseBlurFilter.cpp b/libs/renderengine/skia/filters/KawaseBlurFilter.cpp
index bfde06f..e370c39 100644
--- a/libs/renderengine/skia/filters/KawaseBlurFilter.cpp
+++ b/libs/renderengine/skia/filters/KawaseBlurFilter.cpp
@@ -18,7 +18,6 @@
 
 #include "KawaseBlurFilter.h"
 #include <SkCanvas.h>
-#include <SkData.h>
 #include <SkPaint.h>
 #include <SkRRect.h>
 #include <SkRuntimeEffect.h>
diff --git a/libs/renderengine/tests/Android.bp b/libs/renderengine/tests/Android.bp
index bbab792..50e166d 100644
--- a/libs/renderengine/tests/Android.bp
+++ b/libs/renderengine/tests/Android.bp
@@ -24,7 +24,8 @@
 cc_test {
     name: "librenderengine_test",
     defaults: [
-        "skia_deps",
+        "android.hardware.graphics.composer3-ndk_shared",
+        "librenderengine_deps",
         "surfaceflinger_defaults",
     ],
     test_suites: ["device-tests"],
@@ -49,7 +50,6 @@
     ],
 
     shared_libs: [
-        "android.hardware.graphics.composer3-V1-ndk",
         "libbase",
         "libcutils",
         "libEGL",
diff --git a/libs/renderengine/tests/RenderEngineTest.cpp b/libs/renderengine/tests/RenderEngineTest.cpp
index 8889f76..f3f2da8 100644
--- a/libs/renderengine/tests/RenderEngineTest.cpp
+++ b/libs/renderengine/tests/RenderEngineTest.cpp
@@ -37,8 +37,8 @@
 #include <condition_variable>
 #include <fstream>
 
-#include "../gl/GLESRenderEngine.h"
 #include "../skia/SkiaGLRenderEngine.h"
+#include "../skia/SkiaVkRenderEngine.h"
 #include "../threaded/RenderEngineThreaded.h"
 
 constexpr int DEFAULT_DISPLAY_WIDTH = 128;
@@ -108,25 +108,24 @@
     virtual std::string name() = 0;
     virtual renderengine::RenderEngine::RenderEngineType type() = 0;
     virtual std::unique_ptr<renderengine::RenderEngine> createRenderEngine() = 0;
-    virtual std::unique_ptr<renderengine::gl::GLESRenderEngine> createGLESRenderEngine() {
-        return nullptr;
-    }
+    virtual bool typeSupported() = 0;
     virtual bool useColorManagement() const = 0;
 };
 
-class GLESRenderEngineFactory : public RenderEngineFactory {
+class SkiaVkRenderEngineFactory : public RenderEngineFactory {
 public:
-    std::string name() override { return "GLESRenderEngineFactory"; }
+    std::string name() override { return "SkiaVkRenderEngineFactory"; }
 
     renderengine::RenderEngine::RenderEngineType type() {
-        return renderengine::RenderEngine::RenderEngineType::GLES;
+        return renderengine::RenderEngine::RenderEngineType::SKIA_VK;
     }
 
     std::unique_ptr<renderengine::RenderEngine> createRenderEngine() override {
-        return createGLESRenderEngine();
+        std::unique_ptr<renderengine::RenderEngine> re = createSkiaVkRenderEngine();
+        return re;
     }
 
-    std::unique_ptr<renderengine::gl::GLESRenderEngine> createGLESRenderEngine() {
+    std::unique_ptr<renderengine::skia::SkiaVkRenderEngine> createSkiaVkRenderEngine() {
         renderengine::RenderEngineCreationArgs reCreationArgs =
                 renderengine::RenderEngineCreationArgs::Builder()
                         .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
@@ -139,42 +138,20 @@
                         .setRenderEngineType(type())
                         .setUseColorManagerment(useColorManagement())
                         .build();
-        return renderengine::gl::GLESRenderEngine::create(reCreationArgs);
+        return renderengine::skia::SkiaVkRenderEngine::create(reCreationArgs);
     }
 
+    bool typeSupported() override {
+        return skia::SkiaVkRenderEngine::canSupportSkiaVkRenderEngine();
+    }
     bool useColorManagement() const override { return false; }
+    void skip() { GTEST_SKIP(); }
 };
 
-class GLESCMRenderEngineFactory : public RenderEngineFactory {
+class SkiaVkCMRenderEngineFactory : public SkiaVkRenderEngineFactory {
 public:
-    std::string name() override { return "GLESCMRenderEngineFactory"; }
-
-    renderengine::RenderEngine::RenderEngineType type() {
-        return renderengine::RenderEngine::RenderEngineType::GLES;
-    }
-
-    std::unique_ptr<renderengine::RenderEngine> createRenderEngine() override {
-        return createGLESRenderEngine();
-    }
-
-    std::unique_ptr<renderengine::gl::GLESRenderEngine> createGLESRenderEngine() override {
-        renderengine::RenderEngineCreationArgs reCreationArgs =
-                renderengine::RenderEngineCreationArgs::Builder()
-                        .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
-                        .setImageCacheSize(1)
-                        .setEnableProtectedContext(false)
-                        .setPrecacheToneMapperShaderOnly(false)
-                        .setSupportsBackgroundBlur(true)
-                        .setContextPriority(renderengine::RenderEngine::ContextPriority::MEDIUM)
-                        .setRenderEngineType(type())
-                        .setUseColorManagerment(useColorManagement())
-                        .build();
-        return renderengine::gl::GLESRenderEngine::create(reCreationArgs);
-    }
-
     bool useColorManagement() const override { return true; }
 };
-
 class SkiaGLESRenderEngineFactory : public RenderEngineFactory {
 public:
     std::string name() override { return "SkiaGLRenderEngineFactory"; }
@@ -198,6 +175,7 @@
         return renderengine::skia::SkiaGLRenderEngine::create(reCreationArgs);
     }
 
+    bool typeSupported() override { return true; }
     bool useColorManagement() const override { return false; }
 };
 
@@ -224,6 +202,7 @@
         return renderengine::skia::SkiaGLRenderEngine::create(reCreationArgs);
     }
 
+    bool typeSupported() override { return true; }
     bool useColorManagement() const override { return true; }
 };
 
@@ -232,14 +211,14 @@
     std::shared_ptr<renderengine::ExternalTexture> allocateDefaultBuffer() {
         return std::make_shared<
                 renderengine::impl::
-                        ExternalTexture>(new GraphicBuffer(DEFAULT_DISPLAY_WIDTH,
-                                                           DEFAULT_DISPLAY_HEIGHT,
-                                                           HAL_PIXEL_FORMAT_RGBA_8888, 1,
-                                                           GRALLOC_USAGE_SW_READ_OFTEN |
-                                                                   GRALLOC_USAGE_SW_WRITE_OFTEN |
-                                                                   GRALLOC_USAGE_HW_RENDER |
-                                                                   GRALLOC_USAGE_HW_TEXTURE,
-                                                           "output"),
+                        ExternalTexture>(sp<GraphicBuffer>::
+                                                 make(DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT,
+                                                      HAL_PIXEL_FORMAT_RGBA_8888, 1,
+                                                      GRALLOC_USAGE_SW_READ_OFTEN |
+                                                              GRALLOC_USAGE_SW_WRITE_OFTEN |
+                                                              GRALLOC_USAGE_HW_RENDER |
+                                                              GRALLOC_USAGE_HW_TEXTURE,
+                                                      "output"),
                                          *mRE,
                                          renderengine::impl::ExternalTexture::Usage::READABLE |
                                                  renderengine::impl::ExternalTexture::Usage::
@@ -251,12 +230,12 @@
                                                                         uint32_t height) {
         return std::make_shared<
                 renderengine::impl::
-                        ExternalTexture>(new GraphicBuffer(width, height,
-                                                           HAL_PIXEL_FORMAT_RGBA_8888, 1,
-                                                           GRALLOC_USAGE_SW_READ_OFTEN |
-                                                                   GRALLOC_USAGE_SW_WRITE_OFTEN |
-                                                                   GRALLOC_USAGE_HW_TEXTURE,
-                                                           "input"),
+                        ExternalTexture>(sp<GraphicBuffer>::
+                                                 make(width, height, HAL_PIXEL_FORMAT_RGBA_8888, 1,
+                                                      GRALLOC_USAGE_SW_READ_OFTEN |
+                                                              GRALLOC_USAGE_SW_WRITE_OFTEN |
+                                                              GRALLOC_USAGE_HW_TEXTURE,
+                                                      "input"),
                                          *mRE,
                                          renderengine::impl::ExternalTexture::Usage::READABLE |
                                                  renderengine::impl::ExternalTexture::Usage::
@@ -285,10 +264,12 @@
     }
 
     std::shared_ptr<renderengine::ExternalTexture> allocateR8Buffer(int width, int height) {
-        auto buffer = new GraphicBuffer(width, height, android::PIXEL_FORMAT_R_8, 1,
-                                        GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
-                                                GRALLOC_USAGE_HW_TEXTURE,
-                                        "r8");
+        const auto kUsageFlags =
+                static_cast<uint64_t>(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+                                      GRALLOC_USAGE_HW_TEXTURE);
+        auto buffer =
+                sp<GraphicBuffer>::make(static_cast<uint32_t>(width), static_cast<uint32_t>(height),
+                                        android::PIXEL_FORMAT_R_8, 1u, kUsageFlags, "r8");
         if (buffer->initCheck() != 0) {
             // Devices are not required to support R8.
             return nullptr;
@@ -311,9 +292,6 @@
         }
         for (uint32_t texName : mTexNames) {
             mRE->deleteTextures(1, &texName);
-            if (mGLESRE != nullptr) {
-                EXPECT_FALSE(mGLESRE->isTextureNameKnownForTesting(texName));
-            }
         }
         const ::testing::TestInfo* const test_info =
                 ::testing::UnitTest::GetInstance()->current_test_info();
@@ -526,20 +504,15 @@
 
     void invokeDraw(const renderengine::DisplaySettings& settings,
                     const std::vector<renderengine::LayerSettings>& layers) {
-        std::future<renderengine::RenderEngineResult> result =
+        ftl::Future<FenceResult> future =
                 mRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd());
+        ASSERT_TRUE(future.valid());
 
-        ASSERT_TRUE(result.valid());
-        auto [status, fence] = result.get();
+        auto result = future.get();
+        ASSERT_TRUE(result.ok());
 
-        ASSERT_EQ(NO_ERROR, status);
-        if (fence.ok()) {
-            sync_wait(fence.get(), -1);
-        }
-
-        if (layers.size() > 0 && mGLESRE != nullptr) {
-            ASSERT_TRUE(mGLESRE->isFramebufferImageCachedForTesting(mBuffer->getBuffer()->getId()));
-        }
+        auto fence = result.value();
+        fence->waitForever(LOG_TAG);
     }
 
     void drawEmptyLayers() {
@@ -662,26 +635,13 @@
 
     std::unique_ptr<renderengine::RenderEngine> mRE;
     std::shared_ptr<renderengine::ExternalTexture> mBuffer;
-    // GLESRenderEngine for testing GLES-specific behavior.
-    // Owened by mRE, but this is downcasted.
-    renderengine::gl::GLESRenderEngine* mGLESRE = nullptr;
 
     std::vector<uint32_t> mTexNames;
 };
 
 void RenderEngineTest::initializeRenderEngine() {
     const auto& renderEngineFactory = GetParam();
-    if (renderEngineFactory->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
-        // Only GLESRenderEngine exposes test-only methods. Provide a pointer to the
-        // GLESRenderEngine if we're using it so that we don't need to dynamic_cast
-        // every time.
-        std::unique_ptr<renderengine::gl::GLESRenderEngine> renderEngine =
-                renderEngineFactory->createGLESRenderEngine();
-        mGLESRE = renderEngine.get();
-        mRE = std::move(renderEngine);
-    } else {
-        mRE = renderEngineFactory->createRenderEngine();
-    }
+    mRE = renderEngineFactory->createRenderEngine();
     mBuffer = allocateDefaultBuffer();
 }
 
@@ -1002,9 +962,9 @@
     std::vector<renderengine::LayerSettings> layers;
 
     renderengine::LayerSettings layer;
-    layer.sourceDataspace = sourceDataspace;
     layer.geometry.boundaries = Rect(1, 1).toFloatRect();
     SourceVariant::fillColor(layer, 0.5f, 0.25f, 0.125f, this);
+    layer.sourceDataspace = sourceDataspace;
     layer.alpha = 1.0f;
 
     // construct a fake color matrix
@@ -1030,13 +990,13 @@
 template <typename SourceVariant>
 void RenderEngineTest::fillBufferColorTransformAndSourceDataspace() {
     unordered_map<ui::Dataspace, ubyte4> dataspaceToColorMap;
-    dataspaceToColorMap[ui::Dataspace::V0_BT709] = {172, 0, 0, 255};
-    dataspaceToColorMap[ui::Dataspace::BT2020] = {172, 0, 0, 255};
-    dataspaceToColorMap[ui::Dataspace::ADOBE_RGB] = {172, 0, 0, 255};
+    dataspaceToColorMap[ui::Dataspace::V0_BT709] = {77, 0, 0, 255};
+    dataspaceToColorMap[ui::Dataspace::BT2020] = {101, 0, 0, 255};
+    dataspaceToColorMap[ui::Dataspace::ADOBE_RGB] = {75, 0, 0, 255};
     ui::Dataspace customizedDataspace = static_cast<ui::Dataspace>(
             ui::Dataspace::STANDARD_BT709 | ui::Dataspace::TRANSFER_GAMMA2_2 |
             ui::Dataspace::RANGE_FULL);
-    dataspaceToColorMap[customizedDataspace] = {172, 0, 0, 255};
+    dataspaceToColorMap[customizedDataspace] = {61, 0, 0, 255};
     for (const auto& [sourceDataspace, color] : dataspaceToColorMap) {
         fillBufferWithColorTransformAndSourceDataspace<SourceVariant>(sourceDataspace);
         expectBufferColor(fullscreenRect(), color.r, color.g, color.b, color.a, 1);
@@ -1076,13 +1036,13 @@
 template <typename SourceVariant>
 void RenderEngineTest::fillBufferColorTransformAndOutputDataspace() {
     unordered_map<ui::Dataspace, ubyte4> dataspaceToColorMap;
-    dataspaceToColorMap[ui::Dataspace::V0_BT709] = {202, 0, 0, 255};
-    dataspaceToColorMap[ui::Dataspace::BT2020] = {192, 0, 0, 255};
-    dataspaceToColorMap[ui::Dataspace::ADOBE_RGB] = {202, 0, 0, 255};
+    dataspaceToColorMap[ui::Dataspace::V0_BT709] = {198, 0, 0, 255};
+    dataspaceToColorMap[ui::Dataspace::BT2020] = {187, 0, 0, 255};
+    dataspaceToColorMap[ui::Dataspace::ADOBE_RGB] = {192, 0, 0, 255};
     ui::Dataspace customizedDataspace = static_cast<ui::Dataspace>(
             ui::Dataspace::STANDARD_BT709 | ui::Dataspace::TRANSFER_GAMMA2_6 |
             ui::Dataspace::RANGE_FULL);
-    dataspaceToColorMap[customizedDataspace] = {202, 0, 0, 255};
+    dataspaceToColorMap[customizedDataspace] = {205, 0, 0, 255};
     for (const auto& [outputDataspace, color] : dataspaceToColorMap) {
         fillBufferWithColorTransformAndOutputDataspace<SourceVariant>(outputDataspace);
         expectBufferColor(fullscreenRect(), color.r, color.g, color.b, color.a, 1);
@@ -1496,13 +1456,13 @@
 
     auto buf = std::make_shared<
             renderengine::impl::
-                    ExternalTexture>(new GraphicBuffer(kGreyLevels, 1, HAL_PIXEL_FORMAT_RGBA_8888,
-                                                       1,
-                                                       GRALLOC_USAGE_SW_READ_OFTEN |
-                                                               GRALLOC_USAGE_SW_WRITE_OFTEN |
-                                                               GRALLOC_USAGE_HW_RENDER |
-                                                               GRALLOC_USAGE_HW_TEXTURE,
-                                                       "input"),
+                    ExternalTexture>(sp<GraphicBuffer>::make(kGreyLevels, 1,
+                                                             HAL_PIXEL_FORMAT_RGBA_8888, 1,
+                                                             GRALLOC_USAGE_SW_READ_OFTEN |
+                                                                     GRALLOC_USAGE_SW_WRITE_OFTEN |
+                                                                     GRALLOC_USAGE_HW_RENDER |
+                                                                     GRALLOC_USAGE_HW_TEXTURE,
+                                                             "input"),
                                      *mRE,
                                      renderengine::impl::ExternalTexture::Usage::READABLE |
                                              renderengine::impl::ExternalTexture::Usage::WRITEABLE);
@@ -1529,13 +1489,13 @@
 
     mBuffer = std::make_shared<
             renderengine::impl::
-                    ExternalTexture>(new GraphicBuffer(kGreyLevels, 1, HAL_PIXEL_FORMAT_RGBA_8888,
-                                                       1,
-                                                       GRALLOC_USAGE_SW_READ_OFTEN |
-                                                               GRALLOC_USAGE_SW_WRITE_OFTEN |
-                                                               GRALLOC_USAGE_HW_RENDER |
-                                                               GRALLOC_USAGE_HW_TEXTURE,
-                                                       "output"),
+                    ExternalTexture>(sp<GraphicBuffer>::make(kGreyLevels, 1,
+                                                             HAL_PIXEL_FORMAT_RGBA_8888, 1,
+                                                             GRALLOC_USAGE_SW_READ_OFTEN |
+                                                                     GRALLOC_USAGE_SW_WRITE_OFTEN |
+                                                                     GRALLOC_USAGE_HW_RENDER |
+                                                                     GRALLOC_USAGE_HW_TEXTURE,
+                                                             "output"),
                                      *mRE,
                                      renderengine::impl::ExternalTexture::Usage::READABLE |
                                              renderengine::impl::ExternalTexture::Usage::WRITEABLE);
@@ -1598,17 +1558,50 @@
 }
 
 INSTANTIATE_TEST_SUITE_P(PerRenderEngineType, RenderEngineTest,
-                         testing::Values(std::make_shared<GLESRenderEngineFactory>(),
-                                         std::make_shared<GLESCMRenderEngineFactory>(),
-                                         std::make_shared<SkiaGLESRenderEngineFactory>(),
-                                         std::make_shared<SkiaGLESCMRenderEngineFactory>()));
+                         testing::Values(std::make_shared<SkiaGLESRenderEngineFactory>(),
+                                         std::make_shared<SkiaGLESCMRenderEngineFactory>(),
+                                         std::make_shared<SkiaVkRenderEngineFactory>(),
+                                         std::make_shared<SkiaVkCMRenderEngineFactory>()));
 
 TEST_P(RenderEngineTest, drawLayers_noLayersToDraw) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     drawEmptyLayers();
 }
 
+TEST_P(RenderEngineTest, drawLayers_fillRedBufferAndEmptyBuffer) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
+    initializeRenderEngine();
+    renderengine::DisplaySettings settings;
+    settings.physicalDisplay = fullscreenRect();
+    settings.clip = fullscreenRect();
+    settings.outputDataspace = ui::Dataspace::V0_SRGB_LINEAR;
+
+    // add a red layer
+    renderengine::LayerSettings layerOne{
+            .geometry.boundaries = fullscreenRect().toFloatRect(),
+            .source.solidColor = half3(1.0f, 0.0f, 0.0f),
+            .alpha = 1.f,
+    };
+
+    std::vector<renderengine::LayerSettings> layersFirst{layerOne};
+    invokeDraw(settings, layersFirst);
+    expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+
+    // re-draw with an empty layer above it, and we get a transparent black one
+    std::vector<renderengine::LayerSettings> layersSecond;
+    invokeDraw(settings, layersSecond);
+    expectBufferColor(fullscreenRect(), 0, 0, 0, 0);
+}
+
 TEST_P(RenderEngineTest, drawLayers_withoutBuffers_withColorTransform) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     renderengine::DisplaySettings settings;
@@ -1640,6 +1633,9 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_nullOutputBuffer) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     renderengine::DisplaySettings settings;
@@ -1649,102 +1645,99 @@
     layer.geometry.boundaries = fullscreenRect().toFloatRect();
     BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
     layers.push_back(layer);
-    std::future<renderengine::RenderEngineResult> result =
+    ftl::Future<FenceResult> future =
             mRE->drawLayers(settings, layers, nullptr, true, base::unique_fd());
 
-    ASSERT_TRUE(result.valid());
-    auto [status, fence] = result.get();
-    ASSERT_EQ(BAD_VALUE, status);
-    ASSERT_FALSE(fence.ok());
-}
-
-TEST_P(RenderEngineTest, drawLayers_doesNotCacheFramebuffer) {
-    const auto& renderEngineFactory = GetParam();
-
-    if (renderEngineFactory->type() != renderengine::RenderEngine::RenderEngineType::GLES) {
-        // GLES-specific test
-        return;
-    }
-
-    initializeRenderEngine();
-
-    renderengine::DisplaySettings settings;
-    settings.outputDataspace = ui::Dataspace::V0_SRGB_LINEAR;
-    settings.physicalDisplay = fullscreenRect();
-    settings.clip = fullscreenRect();
-
-    std::vector<renderengine::LayerSettings> layers;
-    renderengine::LayerSettings layer;
-    layer.geometry.boundaries = fullscreenRect().toFloatRect();
-    BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
-    layer.alpha = 1.0;
-    layers.push_back(layer);
-
-    std::future<renderengine::RenderEngineResult> result =
-            mRE->drawLayers(settings, layers, mBuffer, false, base::unique_fd());
-    ASSERT_TRUE(result.valid());
-    auto [status, fence] = result.get();
-
-    ASSERT_EQ(NO_ERROR, status);
-    if (fence.ok()) {
-        sync_wait(fence.get(), -1);
-    }
-
-    ASSERT_FALSE(mGLESRE->isFramebufferImageCachedForTesting(mBuffer->getBuffer()->getId()));
-    expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+    ASSERT_TRUE(future.valid());
+    auto result = future.get();
+    ASSERT_FALSE(result.ok());
+    ASSERT_EQ(BAD_VALUE, result.error());
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillRedBuffer_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillRedBuffer<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillGreenBuffer_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillGreenBuffer<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBlueBuffer_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBlueBuffer<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillRedTransparentBuffer_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillRedTransparentBuffer<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferPhysicalOffset<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate0<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate90<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate180<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate270<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferLayerTransform_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferLayerTransform<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransform_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferColorTransform<ColorSourceVariant>();
 }
@@ -1752,12 +1745,8 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransform_sourceDataspace) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->useColorManagement()) {
-        return;
-    }
-    // skip for GLESRenderEngine
-    if (renderEngineFactory->type() != renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+        GTEST_SKIP();
     }
 
     initializeRenderEngine();
@@ -1767,12 +1756,8 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransform_outputDataspace) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->useColorManagement()) {
-        return;
-    }
-    // skip for GLESRenderEngine
-    if (renderEngineFactory->type() != renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+        GTEST_SKIP();
     }
 
     initializeRenderEngine();
@@ -1780,81 +1765,129 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferRoundedCorners_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferWithRoundedCorners<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformZeroLayerAlpha_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferColorTransformZeroLayerAlpha<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferAndBlurBackground<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillSmallLayerAndBlurBackground_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillSmallLayerAndBlurBackground<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_overlayCorners_colorSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     overlayCorners<ColorSourceVariant>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillRedBuffer_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillRedBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillGreenBuffer_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillGreenBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBlueBuffer_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBlueBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillRedTransparentBuffer_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillRedTransparentBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferPhysicalOffset<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate0<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate90<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate180<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate270<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferLayerTransform_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferLayerTransform<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransform_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferColorTransform<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
@@ -1862,12 +1895,8 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndSourceDataspace_opaqueBufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->useColorManagement()) {
-        return;
-    }
-    // skip for GLESRenderEngine
-    if (renderEngineFactory->type() != renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+        GTEST_SKIP();
     }
 
     initializeRenderEngine();
@@ -1877,12 +1906,8 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndOutputDataspace_opaqueBufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->useColorManagement()) {
-        return;
-    }
-    // skip for GLESRenderEngine
-    if (renderEngineFactory->type() != renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+        GTEST_SKIP();
     }
 
     initializeRenderEngine();
@@ -1890,81 +1915,129 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferRoundedCorners_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferWithRoundedCorners<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformZeroLayerAlpha_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferColorTransformZeroLayerAlpha<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferAndBlurBackground<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillSmallLayerAndBlurBackground_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillSmallLayerAndBlurBackground<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_overlayCorners_opaqueBufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     overlayCorners<BufferSourceVariant<ForceOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillRedBuffer_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillRedBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillGreenBuffer_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillGreenBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBlueBuffer_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBlueBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillRedTransparentBuffer_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillRedTransparentBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferPhysicalOffset<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate0<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate90<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate180<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferCheckersRotate270<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferLayerTransform_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferLayerTransform<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransform_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferColorTransform<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
@@ -1972,12 +2045,8 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndSourceDataspace_bufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->useColorManagement()) {
-        return;
-    }
-    // skip for GLESRenderEngine
-    if (renderEngineFactory->type() != renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+        GTEST_SKIP();
     }
 
     initializeRenderEngine();
@@ -1987,12 +2056,8 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndOutputDataspace_bufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->useColorManagement()) {
-        return;
-    }
-    // skip for GLESRenderEngine
-    if (renderEngineFactory->type() != renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+        GTEST_SKIP();
     }
 
     initializeRenderEngine();
@@ -2000,46 +2065,73 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferRoundedCorners_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferWithRoundedCorners<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformZeroLayerAlpha_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferColorTransformZeroLayerAlpha<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferAndBlurBackground<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillSmallLayerAndBlurBackground_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillSmallLayerAndBlurBackground<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_overlayCorners_bufferSource) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     overlayCorners<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBufferTextureTransform) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferTextureTransform();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBuffer_premultipliesAlpha) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferWithPremultiplyAlpha();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillBuffer_withoutPremultiplyingAlpha) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
     fillBufferWithoutPremultiplyAlpha();
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillShadow_castsWithoutCasterLayer) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const ubyte4 backgroundColor(static_cast<uint8_t>(255), static_cast<uint8_t>(255),
@@ -2056,6 +2148,9 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillShadow_casterLayerMinSize) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const ubyte4 casterColor(static_cast<uint8_t>(255), static_cast<uint8_t>(0),
@@ -2077,6 +2172,9 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillShadow_casterColorLayer) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const ubyte4 casterColor(static_cast<uint8_t>(255), static_cast<uint8_t>(0),
@@ -2099,6 +2197,9 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillShadow_casterOpaqueBufferLayer) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const ubyte4 casterColor(static_cast<uint8_t>(255), static_cast<uint8_t>(0),
@@ -2122,6 +2223,9 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillShadow_casterWithRoundedCorner) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const ubyte4 casterColor(static_cast<uint8_t>(255), static_cast<uint8_t>(0),
@@ -2146,6 +2250,9 @@
 }
 
 TEST_P(RenderEngineTest, drawLayers_fillShadow_translucentCasterWithAlpha) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const ubyte4 casterColor(255, 0, 0, 255);
@@ -2173,6 +2280,9 @@
 }
 
 TEST_P(RenderEngineTest, cleanupPostRender_cleansUpOnce) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     renderengine::DisplaySettings settings;
@@ -2187,28 +2297,36 @@
     layer.alpha = 1.0;
     layers.push_back(layer);
 
-    std::future<renderengine::RenderEngineResult> resultOne =
+    ftl::Future<FenceResult> futureOne =
             mRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd());
-    ASSERT_TRUE(resultOne.valid());
-    auto [statusOne, fenceOne] = resultOne.get();
-    ASSERT_EQ(NO_ERROR, statusOne);
+    ASSERT_TRUE(futureOne.valid());
+    auto resultOne = futureOne.get();
+    ASSERT_TRUE(resultOne.ok());
+    auto fenceOne = resultOne.value();
 
-    std::future<renderengine::RenderEngineResult> resultTwo =
-            mRE->drawLayers(settings, layers, mBuffer, true, std::move(fenceOne));
-    ASSERT_TRUE(resultTwo.valid());
-    auto [statusTwo, fenceTwo] = resultTwo.get();
-    ASSERT_EQ(NO_ERROR, statusTwo);
-    if (fenceTwo.ok()) {
-        sync_wait(fenceTwo.get(), -1);
-    }
+    ftl::Future<FenceResult> futureTwo =
+            mRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd(fenceOne->dup()));
+    ASSERT_TRUE(futureTwo.valid());
+    auto resultTwo = futureTwo.get();
+    ASSERT_TRUE(resultTwo.ok());
+    auto fenceTwo = resultTwo.value();
+    fenceTwo->waitForever(LOG_TAG);
 
     // Only cleanup the first time.
-    EXPECT_FALSE(mRE->canSkipPostRenderCleanup());
-    mRE->cleanupPostRender();
-    EXPECT_TRUE(mRE->canSkipPostRenderCleanup());
+    if (mRE->canSkipPostRenderCleanup()) {
+        // Skia's Vk backend may keep the texture alive beyond drawLayersInternal, so
+        // it never gets added to the cleanup list. In those cases, we can skip.
+        EXPECT_TRUE(GetParam()->type() == renderengine::RenderEngine::RenderEngineType::SKIA_VK);
+    } else {
+        mRE->cleanupPostRender();
+        EXPECT_TRUE(mRE->canSkipPostRenderCleanup());
+    }
 }
 
 TEST_P(RenderEngineTest, testRoundedCornersCrop) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     renderengine::DisplaySettings settings;
@@ -2259,6 +2377,9 @@
 }
 
 TEST_P(RenderEngineTest, testRoundedCornersParentCrop) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     renderengine::DisplaySettings settings;
@@ -2304,6 +2425,9 @@
 }
 
 TEST_P(RenderEngineTest, testRoundedCornersParentCropSmallBounds) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     renderengine::DisplaySettings settings;
@@ -2381,6 +2505,9 @@
 }
 
 TEST_P(RenderEngineTest, testClear) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const auto rect = fullscreenRect();
@@ -2410,6 +2537,9 @@
 }
 
 TEST_P(RenderEngineTest, testDisableBlendingBuffer) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const auto rect = Rect(0, 0, 1, 1);
@@ -2457,11 +2587,59 @@
     expectBufferColor(rect, 0, 128, 0, 128);
 }
 
-TEST_P(RenderEngineTest, testDimming) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
+TEST_P(RenderEngineTest, testBorder) {
+    if (GetParam()->type() != renderengine::RenderEngine::RenderEngineType::SKIA_GL) {
         GTEST_SKIP();
     }
 
+    if (!GetParam()->useColorManagement()) {
+        GTEST_SKIP();
+    }
+
+    initializeRenderEngine();
+
+    const ui::Dataspace dataspace = ui::Dataspace::V0_SRGB;
+
+    const auto displayRect = Rect(1080, 2280);
+    renderengine::DisplaySettings display{
+            .physicalDisplay = displayRect,
+            .clip = displayRect,
+            .outputDataspace = dataspace,
+    };
+    display.borderInfoList.clear();
+    renderengine::BorderRenderInfo info;
+    info.combinedRegion = Region(Rect(99, 99, 199, 199));
+    info.width = 20.0f;
+    info.color = half4{1.0f, 128.0f / 255.0f, 0.0f, 1.0f};
+    display.borderInfoList.emplace_back(info);
+
+    const auto greenBuffer = allocateAndFillSourceBuffer(1, 1, ubyte4(0, 255, 0, 255));
+    const renderengine::LayerSettings greenLayer{
+            .geometry.boundaries = FloatRect(0.f, 0.f, 1.f, 1.f),
+            .source =
+                    renderengine::PixelSource{
+                            .buffer =
+                                    renderengine::Buffer{
+                                            .buffer = greenBuffer,
+                                            .usePremultipliedAlpha = true,
+                                    },
+                    },
+            .alpha = 1.0f,
+            .sourceDataspace = dataspace,
+            .whitePointNits = 200.f,
+    };
+
+    std::vector<renderengine::LayerSettings> layers;
+    layers.emplace_back(greenLayer);
+    invokeDraw(display, layers);
+
+    expectBufferColor(Rect(99, 99, 101, 101), 255, 128, 0, 255, 1);
+}
+
+TEST_P(RenderEngineTest, testDimming) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const ui::Dataspace dataspace = ui::Dataspace::V0_SRGB_LINEAR;
@@ -2534,7 +2712,7 @@
 }
 
 TEST_P(RenderEngineTest, testDimming_inGammaSpace) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
+    if (!GetParam()->typeSupported()) {
         GTEST_SKIP();
     }
     initializeRenderEngine();
@@ -2612,7 +2790,7 @@
 }
 
 TEST_P(RenderEngineTest, testDimming_inGammaSpace_withDisplayColorTransform) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
+    if (!GetParam()->typeSupported()) {
         GTEST_SKIP();
     }
     initializeRenderEngine();
@@ -2675,7 +2853,7 @@
 }
 
 TEST_P(RenderEngineTest, testDimming_inGammaSpace_withDisplayColorTransform_deviceHandles) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
+    if (!GetParam()->typeSupported()) {
         GTEST_SKIP();
     }
     initializeRenderEngine();
@@ -2739,10 +2917,10 @@
 }
 
 TEST_P(RenderEngineTest, testDimming_withoutTargetLuminance) {
-    initializeRenderEngine();
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
     }
+    initializeRenderEngine();
 
     const auto displayRect = Rect(2, 1);
     const renderengine::DisplaySettings display{
@@ -2793,6 +2971,9 @@
 }
 
 TEST_P(RenderEngineTest, test_isOpaque) {
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
+    }
     initializeRenderEngine();
 
     const auto rect = Rect(0, 0, 1, 1);
@@ -2844,11 +3025,7 @@
 }
 
 TEST_P(RenderEngineTest, test_tonemapPQMatches) {
-    if (!GetParam()->useColorManagement()) {
-        GTEST_SKIP();
-    }
-
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
+    if (!GetParam()->typeSupported() || !GetParam()->useColorManagement()) {
         GTEST_SKIP();
     }
 
@@ -2865,11 +3042,7 @@
 }
 
 TEST_P(RenderEngineTest, test_tonemapHLGMatches) {
-    if (!GetParam()->useColorManagement()) {
-        GTEST_SKIP();
-    }
-
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
+    if (!GetParam()->typeSupported() || !GetParam()->useColorManagement()) {
         GTEST_SKIP();
     }
 
@@ -2886,10 +3059,9 @@
 }
 
 TEST_P(RenderEngineTest, r8_behaves_as_mask) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
     }
-
     initializeRenderEngine();
 
     const auto r8Buffer = allocateR8Buffer(2, 1);
@@ -2947,10 +3119,9 @@
 }
 
 TEST_P(RenderEngineTest, r8_respects_color_transform) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
     }
-
     initializeRenderEngine();
 
     const auto r8Buffer = allocateR8Buffer(2, 1);
@@ -3013,10 +3184,9 @@
 }
 
 TEST_P(RenderEngineTest, r8_respects_color_transform_when_device_handles) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
-        return;
+    if (!GetParam()->typeSupported()) {
+        GTEST_SKIP();
     }
-
     initializeRenderEngine();
 
     const auto r8Buffer = allocateR8Buffer(2, 1);
@@ -3082,10 +3252,9 @@
 }
 
 TEST_P(RenderEngineTest, primeShaderCache) {
-    if (GetParam()->type() == renderengine::RenderEngine::RenderEngineType::GLES) {
+    if (!GetParam()->typeSupported()) {
         GTEST_SKIP();
     }
-
     initializeRenderEngine();
 
     auto fut = mRE->primeCache();
diff --git a/libs/renderengine/tests/RenderEngineThreadedTest.cpp b/libs/renderengine/tests/RenderEngineThreadedTest.cpp
index 9685189..fe3a16d 100644
--- a/libs/renderengine/tests/RenderEngineThreadedTest.cpp
+++ b/libs/renderengine/tests/RenderEngineThreadedTest.cpp
@@ -17,8 +17,10 @@
 #include <cutils/properties.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
+#include <hardware/gralloc.h>
 #include <renderengine/impl/ExternalTexture.h>
 #include <renderengine/mock/RenderEngine.h>
+#include <ui/PixelFormat.h>
 #include "../threaded/RenderEngineThreaded.h"
 
 namespace android {
@@ -95,18 +97,6 @@
     ASSERT_EQ(dims, result);
 }
 
-TEST_F(RenderEngineThreadedTest, isProtected_returnsFalse) {
-    EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(false));
-    status_t result = mThreadedRE->isProtected();
-    ASSERT_EQ(false, result);
-}
-
-TEST_F(RenderEngineThreadedTest, isProtected_returnsTrue) {
-    EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(true));
-    size_t result = mThreadedRE->isProtected();
-    ASSERT_EQ(true, result);
-}
-
 TEST_F(RenderEngineThreadedTest, supportsProtectedContent_returnsFalse) {
     EXPECT_CALL(*mRenderEngine, supportsProtectedContent()).WillOnce(Return(false));
     status_t result = mThreadedRE->supportsProtectedContent();
@@ -119,28 +109,6 @@
     ASSERT_EQ(true, result);
 }
 
-TEST_F(RenderEngineThreadedTest, useProtectedContext) {
-    EXPECT_CALL(*mRenderEngine, useProtectedContext(true));
-    auto& ipExpect = EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(false));
-    EXPECT_CALL(*mRenderEngine, supportsProtectedContent()).WillOnce(Return(true));
-    EXPECT_CALL(*mRenderEngine, isProtected()).After(ipExpect).WillOnce(Return(true));
-
-    mThreadedRE->useProtectedContext(true);
-    ASSERT_EQ(true, mThreadedRE->isProtected());
-
-    // call ANY synchronous function to ensure that useProtectedContext has completed.
-    mThreadedRE->getContextPriority();
-    ASSERT_EQ(true, mThreadedRE->isProtected());
-}
-
-TEST_F(RenderEngineThreadedTest, useProtectedContext_quickReject) {
-    EXPECT_CALL(*mRenderEngine, useProtectedContext(false)).Times(0);
-    EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(false));
-    mThreadedRE->useProtectedContext(false);
-    // call ANY synchronous function to ensure that useProtectedContext has completed.
-    mThreadedRE->getContextPriority();
-}
-
 TEST_F(RenderEngineThreadedTest, PostRenderCleanup_skipped) {
     EXPECT_CALL(*mRenderEngine, canSkipPostRenderCleanup()).WillOnce(Return(true));
     EXPECT_CALL(*mRenderEngine, cleanupPostRender()).Times(0);
@@ -176,27 +144,86 @@
     std::vector<renderengine::LayerSettings> layers;
     std::shared_ptr<renderengine::ExternalTexture> buffer = std::make_shared<
             renderengine::impl::
-                    ExternalTexture>(new GraphicBuffer(), *mRenderEngine,
+                    ExternalTexture>(sp<GraphicBuffer>::make(), *mRenderEngine,
                                      renderengine::impl::ExternalTexture::Usage::READABLE |
                                              renderengine::impl::ExternalTexture::Usage::WRITEABLE);
 
     base::unique_fd bufferFence;
 
+    EXPECT_CALL(*mRenderEngine, useProtectedContext(false));
     EXPECT_CALL(*mRenderEngine, drawLayersInternal)
-            .WillOnce([&](const std::shared_ptr<std::promise<renderengine::RenderEngineResult>>&&
-                                  resultPromise,
+            .WillOnce([&](const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
                           const renderengine::DisplaySettings&,
                           const std::vector<renderengine::LayerSettings>&,
                           const std::shared_ptr<renderengine::ExternalTexture>&, const bool,
-                          base::unique_fd&&) -> void {
-                resultPromise->set_value({NO_ERROR, base::unique_fd()});
-            });
+                          base::unique_fd&&) { resultPromise->set_value(Fence::NO_FENCE); });
 
-    std::future<renderengine::RenderEngineResult> result =
+    ftl::Future<FenceResult> future =
             mThreadedRE->drawLayers(settings, layers, buffer, false, std::move(bufferFence));
-    ASSERT_TRUE(result.valid());
-    auto [status, _] = result.get();
-    ASSERT_EQ(NO_ERROR, status);
+    ASSERT_TRUE(future.valid());
+    auto result = future.get();
+    ASSERT_TRUE(result.ok());
+}
+
+TEST_F(RenderEngineThreadedTest, drawLayers_protectedLayer) {
+    renderengine::DisplaySettings settings;
+    auto layerBuffer = sp<GraphicBuffer>::make();
+    layerBuffer->usage |= GRALLOC_USAGE_PROTECTED;
+    renderengine::LayerSettings layer;
+    layer.source.buffer.buffer = std::make_shared<
+            renderengine::impl::ExternalTexture>(std::move(layerBuffer), *mRenderEngine,
+                                                 renderengine::impl::ExternalTexture::Usage::
+                                                         READABLE);
+    std::vector<renderengine::LayerSettings> layers = {std::move(layer)};
+    std::shared_ptr<renderengine::ExternalTexture> buffer = std::make_shared<
+            renderengine::impl::
+                    ExternalTexture>(sp<GraphicBuffer>::make(), *mRenderEngine,
+                                     renderengine::impl::ExternalTexture::Usage::READABLE |
+                                             renderengine::impl::ExternalTexture::Usage::WRITEABLE);
+
+    base::unique_fd bufferFence;
+
+    EXPECT_CALL(*mRenderEngine, useProtectedContext(true));
+    EXPECT_CALL(*mRenderEngine, drawLayersInternal)
+            .WillOnce([&](const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
+                          const renderengine::DisplaySettings&,
+                          const std::vector<renderengine::LayerSettings>&,
+                          const std::shared_ptr<renderengine::ExternalTexture>&, const bool,
+                          base::unique_fd&&) { resultPromise->set_value(Fence::NO_FENCE); });
+
+    ftl::Future<FenceResult> future =
+            mThreadedRE->drawLayers(settings, layers, buffer, false, std::move(bufferFence));
+    ASSERT_TRUE(future.valid());
+    auto result = future.get();
+    ASSERT_TRUE(result.ok());
+}
+
+TEST_F(RenderEngineThreadedTest, drawLayers_protectedOutputBuffer) {
+    renderengine::DisplaySettings settings;
+    std::vector<renderengine::LayerSettings> layers;
+    auto graphicBuffer = sp<GraphicBuffer>::make();
+    graphicBuffer->usage |= GRALLOC_USAGE_PROTECTED;
+    std::shared_ptr<renderengine::ExternalTexture> buffer = std::make_shared<
+            renderengine::impl::
+                    ExternalTexture>(std::move(graphicBuffer), *mRenderEngine,
+                                     renderengine::impl::ExternalTexture::Usage::READABLE |
+                                             renderengine::impl::ExternalTexture::Usage::WRITEABLE);
+
+    base::unique_fd bufferFence;
+
+    EXPECT_CALL(*mRenderEngine, useProtectedContext(true));
+    EXPECT_CALL(*mRenderEngine, drawLayersInternal)
+            .WillOnce([&](const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
+                          const renderengine::DisplaySettings&,
+                          const std::vector<renderengine::LayerSettings>&,
+                          const std::shared_ptr<renderengine::ExternalTexture>&, const bool,
+                          base::unique_fd&&) { resultPromise->set_value(Fence::NO_FENCE); });
+
+    ftl::Future<FenceResult> future =
+            mThreadedRE->drawLayers(settings, layers, buffer, false, std::move(bufferFence));
+    ASSERT_TRUE(future.valid());
+    auto result = future.get();
+    ASSERT_TRUE(result.ok());
 }
 
 } // namespace android
diff --git a/libs/renderengine/threaded/RenderEngineThreaded.cpp b/libs/renderengine/threaded/RenderEngineThreaded.cpp
index 203bb54..6a1561a 100644
--- a/libs/renderengine/threaded/RenderEngineThreaded.cpp
+++ b/libs/renderengine/threaded/RenderEngineThreaded.cpp
@@ -90,7 +90,6 @@
     }
 
     mRenderEngine = factory();
-    mIsProtected = mRenderEngine->isProtected();
 
     pthread_setname_np(pthread_self(), mThreadName);
 
@@ -231,16 +230,17 @@
     mCondition.notify_one();
 }
 
-void RenderEngineThreaded::unmapExternalTextureBuffer(const sp<GraphicBuffer>& buffer) {
+void RenderEngineThreaded::unmapExternalTextureBuffer(sp<GraphicBuffer>&& buffer) {
     ATRACE_CALL();
     // This function is designed so it can run asynchronously, so we do not need to wait
     // for the futures.
     {
         std::lock_guard lock(mThreadMutex);
-        mFunctionCalls.push([=](renderengine::RenderEngine& instance) {
-            ATRACE_NAME("REThreaded::unmapExternalTextureBuffer");
-            instance.unmapExternalTextureBuffer(buffer);
-        });
+        mFunctionCalls.push(
+                [=, buffer = std::move(buffer)](renderengine::RenderEngine& instance) mutable {
+                    ATRACE_NAME("REThreaded::unmapExternalTextureBuffer");
+                    instance.unmapExternalTextureBuffer(std::move(buffer));
+                });
     }
     mCondition.notify_one();
 }
@@ -255,41 +255,11 @@
     return mRenderEngine->getMaxViewportDims();
 }
 
-bool RenderEngineThreaded::isProtected() const {
-    waitUntilInitialized();
-    std::lock_guard lock(mThreadMutex);
-    return mIsProtected;
-}
-
 bool RenderEngineThreaded::supportsProtectedContent() const {
     waitUntilInitialized();
     return mRenderEngine->supportsProtectedContent();
 }
 
-void RenderEngineThreaded::useProtectedContext(bool useProtectedContext) {
-    if (isProtected() == useProtectedContext ||
-        (useProtectedContext && !supportsProtectedContent())) {
-        return;
-    }
-
-    {
-        std::lock_guard lock(mThreadMutex);
-        mFunctionCalls.push([useProtectedContext, this](renderengine::RenderEngine& instance) {
-            ATRACE_NAME("REThreaded::useProtectedContext");
-            instance.useProtectedContext(useProtectedContext);
-            if (instance.isProtected() != useProtectedContext) {
-                ALOGE("Failed to switch RenderEngine context.");
-                // reset the cached mIsProtected value to a good state, but this does not
-                // prevent other callers of this method and isProtected from reading the
-                // invalid cached value.
-                mIsProtected = instance.isProtected();
-            }
-        });
-        mIsProtected = useProtectedContext;
-    }
-    mCondition.notify_one();
-}
-
 void RenderEngineThreaded::cleanupPostRender() {
     if (canSkipPostRenderCleanup()) {
         return;
@@ -313,27 +283,28 @@
 }
 
 void RenderEngineThreaded::drawLayersInternal(
-        const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
+        const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
         const DisplaySettings& display, const std::vector<LayerSettings>& layers,
         const std::shared_ptr<ExternalTexture>& buffer, const bool useFramebufferCache,
         base::unique_fd&& bufferFence) {
-    resultPromise->set_value({NO_ERROR, base::unique_fd()});
+    resultPromise->set_value(Fence::NO_FENCE);
     return;
 }
 
-std::future<RenderEngineResult> RenderEngineThreaded::drawLayers(
+ftl::Future<FenceResult> RenderEngineThreaded::drawLayers(
         const DisplaySettings& display, const std::vector<LayerSettings>& layers,
         const std::shared_ptr<ExternalTexture>& buffer, const bool useFramebufferCache,
         base::unique_fd&& bufferFence) {
     ATRACE_CALL();
-    const auto resultPromise = std::make_shared<std::promise<RenderEngineResult>>();
-    std::future<RenderEngineResult> resultFuture = resultPromise->get_future();
+    const auto resultPromise = std::make_shared<std::promise<FenceResult>>();
+    std::future<FenceResult> resultFuture = resultPromise->get_future();
     int fd = bufferFence.release();
     {
         std::lock_guard lock(mThreadMutex);
         mFunctionCalls.push([resultPromise, display, layers, buffer, useFramebufferCache,
                              fd](renderengine::RenderEngine& instance) {
             ATRACE_NAME("REThreaded::drawLayers");
+            instance.updateProtectedContext(layers, buffer);
             instance.drawLayersInternal(std::move(resultPromise), display, layers, buffer,
                                         useFramebufferCache, base::unique_fd(fd));
         });
diff --git a/libs/renderengine/threaded/RenderEngineThreaded.h b/libs/renderengine/threaded/RenderEngineThreaded.h
index 1340902..6eb108e 100644
--- a/libs/renderengine/threaded/RenderEngineThreaded.h
+++ b/libs/renderengine/threaded/RenderEngineThreaded.h
@@ -51,16 +51,14 @@
     size_t getMaxTextureSize() const override;
     size_t getMaxViewportDims() const override;
 
-    bool isProtected() const override;
     bool supportsProtectedContent() const override;
-    void useProtectedContext(bool useProtectedContext) override;
     void cleanupPostRender() override;
 
-    std::future<RenderEngineResult> drawLayers(const DisplaySettings& display,
-                                               const std::vector<LayerSettings>& layers,
-                                               const std::shared_ptr<ExternalTexture>& buffer,
-                                               const bool useFramebufferCache,
-                                               base::unique_fd&& bufferFence) override;
+    ftl::Future<FenceResult> drawLayers(const DisplaySettings& display,
+                                        const std::vector<LayerSettings>& layers,
+                                        const std::shared_ptr<ExternalTexture>& buffer,
+                                        const bool useFramebufferCache,
+                                        base::unique_fd&& bufferFence) override;
 
     void cleanFramebufferCache() override;
     int getContextPriority() override;
@@ -71,9 +69,9 @@
 
 protected:
     void mapExternalTextureBuffer(const sp<GraphicBuffer>& buffer, bool isRenderable) override;
-    void unmapExternalTextureBuffer(const sp<GraphicBuffer>& buffer) override;
+    void unmapExternalTextureBuffer(sp<GraphicBuffer>&& buffer) override;
     bool canSkipPostRenderCleanup() const override;
-    void drawLayersInternal(const std::shared_ptr<std::promise<RenderEngineResult>>&& resultPromise,
+    void drawLayersInternal(const std::shared_ptr<std::promise<FenceResult>>&& resultPromise,
                             const DisplaySettings& display,
                             const std::vector<LayerSettings>& layers,
                             const std::shared_ptr<ExternalTexture>& buffer,
@@ -84,6 +82,9 @@
     void waitUntilInitialized() const;
     static status_t setSchedFifo(bool enabled);
 
+    // No-op. This method is only called on leaf implementations of RenderEngine.
+    void useProtectedContext(bool) override {}
+
     /* ------------------------------------------------------------------------
      * Threading
      */
@@ -107,7 +108,6 @@
      * Render Engine
      */
     std::unique_ptr<renderengine::RenderEngine> mRenderEngine;
-    std::atomic<bool> mIsProtected = false;
 };
 } // namespace threaded
 } // namespace renderengine
diff --git a/libs/sensor/ISensorServer.cpp b/libs/sensor/ISensorServer.cpp
index 93c95b9..019d6cb 100644
--- a/libs/sensor/ISensorServer.cpp
+++ b/libs/sensor/ISensorServer.cpp
@@ -22,12 +22,12 @@
 #include <cutils/native_handle.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
-#include <utils/Vector.h>
 #include <utils/Timers.h>
+#include <utils/Vector.h>
 
-#include <binder/Parcel.h>
 #include <binder/IInterface.h>
 #include <binder/IResultReceiver.h>
+#include <binder/Parcel.h>
 
 #include <sensor/Sensor.h>
 #include <sensor/ISensorEventConnection.h>
@@ -42,6 +42,7 @@
     GET_DYNAMIC_SENSOR_LIST,
     CREATE_SENSOR_DIRECT_CONNECTION,
     SET_OPERATION_PARAMETER,
+    GET_RUNTIME_SENSOR_LIST,
 };
 
 class BpSensorServer : public BpInterface<ISensorServer>
@@ -98,6 +99,25 @@
         return v;
     }
 
+    virtual Vector<Sensor> getRuntimeSensorList(const String16& opPackageName, int deviceId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ISensorServer::getInterfaceDescriptor());
+        data.writeString16(opPackageName);
+        data.writeInt32(deviceId);
+        remote()->transact(GET_RUNTIME_SENSOR_LIST, data, &reply);
+        Sensor s;
+        Vector<Sensor> v;
+        uint32_t n = reply.readUint32();
+        v.setCapacity(n);
+        while (n) {
+            n--;
+            reply.read(s);
+            v.add(s);
+        }
+        return v;
+    }
+
     virtual sp<ISensorEventConnection> createSensorEventConnection(const String8& packageName,
              int mode, const String16& opPackageName, const String16& attributionTag)
     {
@@ -119,10 +139,12 @@
     }
 
     virtual sp<ISensorEventConnection> createSensorDirectConnection(const String16& opPackageName,
-            uint32_t size, int32_t type, int32_t format, const native_handle_t *resource) {
+            int deviceId, uint32_t size, int32_t type, int32_t format,
+            const native_handle_t *resource) {
         Parcel data, reply;
         data.writeInterfaceToken(ISensorServer::getInterfaceDescriptor());
         data.writeString16(opPackageName);
+        data.writeInt32(deviceId);
         data.writeUint32(size);
         data.writeInt32(type);
         data.writeInt32(format);
@@ -202,9 +224,22 @@
             }
             return NO_ERROR;
         }
+        case GET_RUNTIME_SENSOR_LIST: {
+            CHECK_INTERFACE(ISensorServer, data, reply);
+            const String16& opPackageName = data.readString16();
+            const int deviceId = data.readInt32();
+            Vector<Sensor> v(getRuntimeSensorList(opPackageName, deviceId));
+            size_t n = v.size();
+            reply->writeUint32(static_cast<uint32_t>(n));
+            for (size_t i = 0; i < n; i++) {
+                reply->write(v[i]);
+            }
+            return NO_ERROR;
+        }
         case CREATE_SENSOR_DIRECT_CONNECTION: {
             CHECK_INTERFACE(ISensorServer, data, reply);
             const String16& opPackageName = data.readString16();
+            const int deviceId = data.readInt32();
             uint32_t size = data.readUint32();
             int32_t type = data.readInt32();
             int32_t format = data.readInt32();
@@ -213,9 +248,10 @@
             if (resource == nullptr) {
                 return BAD_VALUE;
             }
-            sp<ISensorEventConnection> ch =
-                    createSensorDirectConnection(opPackageName, size, type, format, resource);
-            native_handle_close(resource);
+            native_handle_set_fdsan_tag(resource);
+            sp<ISensorEventConnection> ch = createSensorDirectConnection(
+                    opPackageName, deviceId, size, type, format, resource);
+            native_handle_close_with_tag(resource);
             native_handle_delete(resource);
             reply->writeStrongBinder(IInterface::asBinder(ch));
             return NO_ERROR;
diff --git a/libs/sensor/Sensor.cpp b/libs/sensor/Sensor.cpp
index f76d5cf..9127b37 100644
--- a/libs/sensor/Sensor.cpp
+++ b/libs/sensor/Sensor.cpp
@@ -264,10 +264,6 @@
         mStringType = SENSOR_STRING_TYPE_HEART_BEAT;
         mFlags |= SENSOR_FLAG_SPECIAL_REPORTING_MODE;
         break;
-
-    // TODO:  Placeholder for LLOB sensor type
-
-
     case SENSOR_TYPE_ACCELEROMETER_UNCALIBRATED:
         mStringType = SENSOR_STRING_TYPE_ACCELEROMETER_UNCALIBRATED;
         mFlags |= SENSOR_FLAG_CONTINUOUS_MODE;
diff --git a/libs/sensor/SensorManager.cpp b/libs/sensor/SensorManager.cpp
index 9f814f1..980f8d1 100644
--- a/libs/sensor/SensorManager.cpp
+++ b/libs/sensor/SensorManager.cpp
@@ -213,6 +213,19 @@
     return static_cast<ssize_t>(count);
 }
 
+ssize_t SensorManager::getRuntimeSensorList(int deviceId, Vector<Sensor>& runtimeSensors) {
+    Mutex::Autolock _l(mLock);
+    status_t err = assertStateLocked();
+    if (err < 0) {
+        return static_cast<ssize_t>(err);
+    }
+
+    runtimeSensors = mSensorServer->getRuntimeSensorList(mOpPackageName, deviceId);
+    size_t count = runtimeSensors.size();
+
+    return static_cast<ssize_t>(count);
+}
+
 ssize_t SensorManager::getDynamicSensorList(Sensor const* const** list) {
     Mutex::Autolock _l(mLock);
     status_t err = assertStateLocked();
@@ -299,6 +312,12 @@
 
 int SensorManager::createDirectChannel(
         size_t size, int channelType, const native_handle_t *resourceHandle) {
+    static constexpr int DEFAULT_DEVICE_ID = 0;
+    return createDirectChannel(DEFAULT_DEVICE_ID, size, channelType, resourceHandle);
+}
+
+int SensorManager::createDirectChannel(
+        int deviceId, size_t size, int channelType, const native_handle_t *resourceHandle) {
     Mutex::Autolock _l(mLock);
     if (assertStateLocked() != NO_ERROR) {
         return NO_INIT;
@@ -311,7 +330,7 @@
     }
 
     sp<ISensorEventConnection> conn =
-              mSensorServer->createSensorDirectConnection(mOpPackageName,
+              mSensorServer->createSensorDirectConnection(mOpPackageName, deviceId,
                   static_cast<uint32_t>(size),
                   static_cast<int32_t>(channelType),
                   SENSOR_DIRECT_FMT_SENSORS_EVENT, resourceHandle);
diff --git a/libs/sensor/include/sensor/ISensorServer.h b/libs/sensor/include/sensor/ISensorServer.h
index ce5c672..5815728 100644
--- a/libs/sensor/include/sensor/ISensorServer.h
+++ b/libs/sensor/include/sensor/ISensorServer.h
@@ -43,13 +43,15 @@
 
     virtual Vector<Sensor> getSensorList(const String16& opPackageName) = 0;
     virtual Vector<Sensor> getDynamicSensorList(const String16& opPackageName) = 0;
+    virtual Vector<Sensor> getRuntimeSensorList(const String16& opPackageName, int deviceId) = 0;
 
     virtual sp<ISensorEventConnection> createSensorEventConnection(const String8& packageName,
              int mode, const String16& opPackageName, const String16& attributionTag) = 0;
     virtual int32_t isDataInjectionEnabled() = 0;
 
     virtual sp<ISensorEventConnection> createSensorDirectConnection(const String16& opPackageName,
-            uint32_t size, int32_t type, int32_t format, const native_handle_t *resource) = 0;
+            int deviceId, uint32_t size, int32_t type, int32_t format,
+            const native_handle_t *resource) = 0;
 
     virtual int setOperationParameter(
             int32_t handle, int32_t type, const Vector<float> &floats, const Vector<int32_t> &ints) = 0;
diff --git a/libs/sensor/include/sensor/SensorManager.h b/libs/sensor/include/sensor/SensorManager.h
index 7c9d604..bb44cb8 100644
--- a/libs/sensor/include/sensor/SensorManager.h
+++ b/libs/sensor/include/sensor/SensorManager.h
@@ -60,11 +60,14 @@
     ssize_t getSensorList(Sensor const* const** list);
     ssize_t getDynamicSensorList(Vector<Sensor>& list);
     ssize_t getDynamicSensorList(Sensor const* const** list);
+    ssize_t getRuntimeSensorList(int deviceId, Vector<Sensor>& list);
     Sensor const* getDefaultSensor(int type);
     sp<SensorEventQueue> createEventQueue(
         String8 packageName = String8(""), int mode = 0, String16 attributionTag = String16(""));
     bool isDataInjectionEnabled();
     int createDirectChannel(size_t size, int channelType, const native_handle_t *channelData);
+    int createDirectChannel(
+        int deviceId, size_t size, int channelType, const native_handle_t *channelData);
     void destroyDirectChannel(int channelNativeHandle);
     int configureDirectChannel(int channelNativeHandle, int sensorHandle, int rateLevel);
     int setOperationParameter(int handle, int type, const Vector<float> &floats, const Vector<int32_t> &ints);
diff --git a/libs/shaders/Android.bp b/libs/shaders/Android.bp
index 8477479..960f845 100644
--- a/libs/shaders/Android.bp
+++ b/libs/shaders/Android.bp
@@ -23,13 +23,14 @@
 
 cc_library_static {
     name: "libshaders",
-
+    defaults: [
+        "android.hardware.graphics.common-ndk_shared",
+        "android.hardware.graphics.composer3-ndk_shared",
+    ],
     export_include_dirs: ["include"],
     local_include_dirs: ["include"],
 
     shared_libs: [
-        "android.hardware.graphics.common-V4-ndk",
-        "android.hardware.graphics.composer3-V1-ndk",
         "android.hardware.graphics.common@1.2",
         "libnativewindow",
     ],
diff --git a/libs/shaders/include/shaders/shaders.h b/libs/shaders/include/shaders/shaders.h
index 2a4a370..5a4aaab 100644
--- a/libs/shaders/include/shaders/shaders.h
+++ b/libs/shaders/include/shaders/shaders.h
@@ -51,29 +51,29 @@
     // Input dataspace of the source colors.
     const ui::Dataspace inputDataspace = ui::Dataspace::SRGB;
 
-    // Working dataspace for the output surface, for conversion from linear space.
+    // Working dataspace for the output surface.
     const ui::Dataspace outputDataspace = ui::Dataspace::SRGB;
 
     // Sets whether alpha premultiplication must be undone.
     // This is required if the source colors use premultiplied alpha and is not opaque.
     const bool undoPremultipliedAlpha = false;
 
-    // "Fake" dataspace of the source colors. This is used for applying an EOTF to compute linear
-    // RGB. This is used when Skia is expected to color manage the input image based on the
-    // dataspace of the provided source image and destination surface. SkRuntimeEffects use the
-    // destination color space as the working color space. RenderEngine deliberately sets the color
-    // space for input images and destination surfaces to be the same whenever LinearEffects are
-    // expected to be used so that color-management is controlled by RenderEngine, but other users
-    // of a LinearEffect may not be able to control the color space of the images and surfaces. So
-    // fakeInputDataspace is used to essentially masquerade the input dataspace to be the output
-    // dataspace for correct conversion to linear colors.
-    ui::Dataspace fakeInputDataspace = ui::Dataspace::UNKNOWN;
+    // "Fake" dataspace of the destination colors. This is used for applying an OETF to compute
+    // non-linear RGB. This is used when Skia is expected to color manage the input image based on
+    // the dataspace of the provided source image and destination surface. Some use-cases in
+    // RenderEngine expect to apply a different OETF than what is expected by Skia. As in,
+    // RenderEngine will color manage to a custom destination and "cast" the result to Skia's
+    // working space.
+    ui::Dataspace fakeOutputDataspace = ui::Dataspace::UNKNOWN;
+
+    enum SkSLType { Shader, ColorFilter };
+    SkSLType type = Shader;
 };
 
 static inline bool operator==(const LinearEffect& lhs, const LinearEffect& rhs) {
     return lhs.inputDataspace == rhs.inputDataspace && lhs.outputDataspace == rhs.outputDataspace &&
             lhs.undoPremultipliedAlpha == rhs.undoPremultipliedAlpha &&
-            lhs.fakeInputDataspace == rhs.fakeInputDataspace;
+            lhs.fakeOutputDataspace == rhs.fakeOutputDataspace;
 }
 
 struct LinearEffectHasher {
@@ -86,7 +86,7 @@
         size_t result = std::hash<ui::Dataspace>{}(le.inputDataspace);
         result = HashCombine(result, std::hash<ui::Dataspace>{}(le.outputDataspace));
         result = HashCombine(result, std::hash<bool>{}(le.undoPremultipliedAlpha));
-        return HashCombine(result, std::hash<ui::Dataspace>{}(le.fakeInputDataspace));
+        return HashCombine(result, std::hash<ui::Dataspace>{}(le.fakeOutputDataspace));
     }
 };
 
diff --git a/libs/shaders/shaders.cpp b/libs/shaders/shaders.cpp
index f80e93f..c85517a 100644
--- a/libs/shaders/shaders.cpp
+++ b/libs/shaders/shaders.cpp
@@ -33,212 +33,111 @@
     return static_cast<aidl::android::hardware::graphics::common::Dataspace>(dataspace);
 }
 
-void generateEOTF(ui::Dataspace dataspace, std::string& shader) {
-    switch (dataspace & HAL_DATASPACE_TRANSFER_MASK) {
-        case HAL_DATASPACE_TRANSFER_ST2084:
-            shader.append(R"(
-
-                float3 EOTF(float3 color) {
-                    float m1 = (2610.0 / 4096.0) / 4.0;
-                    float m2 = (2523.0 / 4096.0) * 128.0;
-                    float c1 = (3424.0 / 4096.0);
-                    float c2 = (2413.0 / 4096.0) * 32.0;
-                    float c3 = (2392.0 / 4096.0) * 32.0;
-
-                    float3 tmp = pow(clamp(color, 0.0, 1.0), 1.0 / float3(m2));
-                    tmp = max(tmp - c1, 0.0) / (c2 - c3 * tmp);
-                    return pow(tmp, 1.0 / float3(m1));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_HLG:
-            shader.append(R"(
-                float EOTF_channel(float channel) {
-                    const float a = 0.17883277;
-                    const float b = 0.28466892;
-                    const float c = 0.55991073;
-                    return channel <= 0.5 ? channel * channel / 3.0 :
-                            (exp((channel - c) / a) + b) / 12.0;
-                }
-
-                float3 EOTF(float3 color) {
-                    return float3(EOTF_channel(color.r), EOTF_channel(color.g),
-                            EOTF_channel(color.b));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_LINEAR:
-            shader.append(R"(
-                float3 EOTF(float3 color) {
-                    return color;
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_SMPTE_170M:
-            shader.append(R"(
-
-                float EOTF_sRGB(float srgb) {
-                    return srgb <= 0.08125 ? srgb / 4.50 : pow((srgb + 0.099) / 1.099, 1 / 0.45);
-                }
-
-                float3 EOTF_sRGB(float3 srgb) {
-                    return float3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
-                }
-
-                float3 EOTF(float3 srgb) {
-                    return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_GAMMA2_2:
-            shader.append(R"(
-
-                float EOTF_sRGB(float srgb) {
-                    return pow(srgb, 2.2);
-                }
-
-                float3 EOTF_sRGB(float3 srgb) {
-                    return float3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
-                }
-
-                float3 EOTF(float3 srgb) {
-                    return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_GAMMA2_6:
-            shader.append(R"(
-
-                float EOTF_sRGB(float srgb) {
-                    return pow(srgb, 2.6);
-                }
-
-                float3 EOTF_sRGB(float3 srgb) {
-                    return float3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
-                }
-
-                float3 EOTF(float3 srgb) {
-                    return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_GAMMA2_8:
-            shader.append(R"(
-
-                float EOTF_sRGB(float srgb) {
-                    return pow(srgb, 2.8);
-                }
-
-                float3 EOTF_sRGB(float3 srgb) {
-                    return float3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
-                }
-
-                float3 EOTF(float3 srgb) {
-                    return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_SRGB:
-        default:
-            shader.append(R"(
-
-                float EOTF_sRGB(float srgb) {
-                    return srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4);
-                }
-
-                float3 EOTF_sRGB(float3 srgb) {
-                    return float3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
-                }
-
-                float3 EOTF(float3 srgb) {
-                    return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
-                }
-            )");
-            break;
-    }
-}
-
 void generateXYZTransforms(std::string& shader) {
     shader.append(R"(
-        uniform float4x4 in_rgbToXyz;
-        uniform float4x4 in_xyzToRgb;
+        uniform float3x3 in_rgbToXyz;
+        uniform float3x3 in_xyzToSrcRgb;
+        uniform float4x4 in_colorTransform;
         float3 ToXYZ(float3 rgb) {
-            return (in_rgbToXyz * float4(rgb, 1.0)).rgb;
+            return in_rgbToXyz * rgb;
         }
 
-        float3 ToRGB(float3 xyz) {
-            return clamp((in_xyzToRgb * float4(xyz, 1.0)).rgb, 0.0, 1.0);
+        float3 ToSrcRGB(float3 xyz) {
+            return in_xyzToSrcRgb * xyz;
+        }
+
+        float3 ApplyColorTransform(float3 rgb) {
+            return (in_colorTransform * float4(rgb, 1.0)).rgb;
         }
     )");
 }
 
-// Conversion from relative light to absolute light (maps from [0, 1] to [0, maxNits])
-void generateLuminanceScalesForOOTF(ui::Dataspace inputDataspace, ui::Dataspace outputDataspace,
-                                    std::string& shader) {
+// Conversion from relative light to absolute light
+// Note that 1.0 == 203 nits.
+void generateLuminanceScalesForOOTF(ui::Dataspace inputDataspace, std::string& shader) {
     switch (inputDataspace & HAL_DATASPACE_TRANSFER_MASK) {
-        case HAL_DATASPACE_TRANSFER_ST2084:
-            shader.append(R"(
-                    float3 ScaleLuminance(float3 xyz) {
-                        return xyz * 10000.0;
-                    }
-                )");
-            break;
         case HAL_DATASPACE_TRANSFER_HLG:
+            // BT. 2408 says that a signal level of 0.75 == 203 nits for HLG, but that's after
+            // applying OOTF. But we haven't applied OOTF yet, so we need to scale by a different
+            // constant instead.
             shader.append(R"(
-                    float3 ScaleLuminance(float3 xyz) {
-                        return xyz * 1000.0;
-                    }
-                )");
+                float3 ScaleLuminance(float3 xyz) {
+                    return xyz * 264.96;
+                }
+            )");
             break;
         default:
-            switch (outputDataspace & HAL_DATASPACE_TRANSFER_MASK) {
-                case HAL_DATASPACE_TRANSFER_ST2084:
-                case HAL_DATASPACE_TRANSFER_HLG:
-                    // SDR -> HDR tonemap
-                    shader.append(R"(
-                            float3 ScaleLuminance(float3 xyz) {
-                                return xyz * in_libtonemap_inputMaxLuminance;
-                            }
-                        )");
-                    break;
-                default:
-                    // Input and output are both SDR, so no tone-mapping is expected so
-                    // no-op the luminance normalization.
-                    shader.append(R"(
-                                float3 ScaleLuminance(float3 xyz) {
-                                    return xyz * in_libtonemap_displayMaxLuminance;
-                                }
-                            )");
-                    break;
-            }
+            shader.append(R"(
+                float3 ScaleLuminance(float3 xyz) {
+                    return xyz * 203.0;
+                }
+            )");
+            break;
     }
 }
 
 // Normalizes from absolute light back to relative light (maps from [0, maxNits] back to [0, 1])
-static void generateLuminanceNormalizationForOOTF(ui::Dataspace outputDataspace,
+static void generateLuminanceNormalizationForOOTF(ui::Dataspace inputDataspace,
+                                                  ui::Dataspace outputDataspace,
                                                   std::string& shader) {
     switch (outputDataspace & HAL_DATASPACE_TRANSFER_MASK) {
         case HAL_DATASPACE_TRANSFER_ST2084:
             shader.append(R"(
-                    float3 NormalizeLuminance(float3 xyz) {
-                        return xyz / 10000.0;
-                    }
-                )");
+                float3 NormalizeLuminance(float3 xyz) {
+                    return xyz / 203.0;
+                }
+            )");
             break;
         case HAL_DATASPACE_TRANSFER_HLG:
-            shader.append(R"(
-                    float3 NormalizeLuminance(float3 xyz) {
-                        return xyz / 1000.0;
-                    }
-                )");
+            switch (inputDataspace & HAL_DATASPACE_TRANSFER_MASK) {
+                case HAL_DATASPACE_TRANSFER_HLG:
+                    shader.append(R"(
+                            float3 NormalizeLuminance(float3 xyz) {
+                                return xyz / 264.96;
+                            }
+                        )");
+                    break;
+                default:
+                    // Transcoding to HLG requires applying the inverse OOTF
+                    // with the expectation that the OOTF is then applied during
+                    // tonemapping downstream.
+                    // BT. 2100-2 operates on normalized luminances, so renormalize to the input to
+                    // correctly adjust gamma.
+                    // Note that following BT. 2408 for HLG OETF actually maps 0.75 == ~264.96 nits,
+                    // rather than 203 nits, because 203 nits == OOTF(invOETF(0.75)), so even though
+                    // we originally scaled by 203 nits we need to re-normalize to 264.96 nits when
+                    // converting to the correct brightness range.
+                    shader.append(R"(
+                            float3 NormalizeLuminance(float3 xyz) {
+                                float ootfGain = pow(xyz.y / 1000.0, -0.2 / 1.2);
+                                return xyz * ootfGain / 264.96;
+                            }
+                        )");
+                    break;
+            }
             break;
         default:
-            shader.append(R"(
-                    float3 NormalizeLuminance(float3 xyz) {
-                        return xyz / in_libtonemap_displayMaxLuminance;
-                    }
-                )");
-            break;
+            switch (inputDataspace & HAL_DATASPACE_TRANSFER_MASK) {
+                case HAL_DATASPACE_TRANSFER_HLG:
+                case HAL_DATASPACE_TRANSFER_ST2084:
+                    // libtonemap outputs a range [0, in_libtonemap_displayMaxLuminance], so
+                    // normalize back to [0, 1] when the output is SDR.
+                    shader.append(R"(
+                        float3 NormalizeLuminance(float3 xyz) {
+                            return xyz / in_libtonemap_displayMaxLuminance;
+                        }
+                    )");
+                    break;
+                default:
+                    // Otherwise normalize back down to the range [0, 1]
+                    // TODO: get this working for extended range outputs
+                    shader.append(R"(
+                        float3 NormalizeLuminance(float3 xyz) {
+                            return xyz / 203.0;
+                        }
+                    )");
+                    break;
+            }
     }
 }
 
@@ -249,159 +148,67 @@
                                                           toAidlDataspace(outputDataspace))
                           .c_str());
 
-    generateLuminanceScalesForOOTF(inputDataspace, outputDataspace, shader);
-    generateLuminanceNormalizationForOOTF(outputDataspace, shader);
+    generateLuminanceScalesForOOTF(inputDataspace, shader);
+    generateLuminanceNormalizationForOOTF(inputDataspace, outputDataspace, shader);
 
+    // Some tonemappers operate on CIE luminance, other tonemappers operate on linear rgb
+    // luminance in the source gamut.
     shader.append(R"(
-            float3 OOTF(float3 linearRGB, float3 xyz) {
+            float3 OOTF(float3 linearRGB) {
                 float3 scaledLinearRGB = ScaleLuminance(linearRGB);
-                float3 scaledXYZ = ScaleLuminance(xyz);
+                float3 scaledXYZ = ToXYZ(scaledLinearRGB);
 
-                float gain = libtonemap_LookupTonemapGain(scaledLinearRGB, scaledXYZ);
+                float gain = libtonemap_LookupTonemapGain(ToSrcRGB(scaledXYZ), scaledXYZ);
 
                 return NormalizeLuminance(scaledXYZ * gain);
             }
         )");
 }
 
-void generateOETF(ui::Dataspace dataspace, std::string& shader) {
-    switch (dataspace & HAL_DATASPACE_TRANSFER_MASK) {
-        case HAL_DATASPACE_TRANSFER_ST2084:
+void generateOETF(std::string& shader) {
+    // Only support gamma 2.2 for now
+    shader.append(R"(
+        float OETF(float3 linear) {
+            return sign(linear) * pow(abs(linear), (1.0 / 2.2));
+        }
+    )");
+}
+
+void generateEffectiveOOTF(bool undoPremultipliedAlpha, LinearEffect::SkSLType type,
+                           bool needsCustomOETF, std::string& shader) {
+    switch (type) {
+        case LinearEffect::SkSLType::ColorFilter:
             shader.append(R"(
-
-                float3 OETF(float3 xyz) {
-                    float m1 = (2610.0 / 4096.0) / 4.0;
-                    float m2 = (2523.0 / 4096.0) * 128.0;
-                    float c1 = (3424.0 / 4096.0);
-                    float c2 = (2413.0 / 4096.0) * 32.0;
-                    float c3 = (2392.0 / 4096.0) * 32.0;
-
-                    float3 tmp = pow(xyz, float3(m1));
-                    tmp = (c1 + c2 * tmp) / (1.0 + c3 * tmp);
-                    return pow(tmp, float3(m2));
-                }
+                half4 main(half4 inputColor) {
+                    float4 c = float4(inputColor);
             )");
             break;
-        case HAL_DATASPACE_TRANSFER_HLG:
+        case LinearEffect::SkSLType::Shader:
             shader.append(R"(
-                float OETF_channel(float channel) {
-                    const float a = 0.17883277;
-                    const float b = 0.28466892;
-                    const float c = 0.55991073;
-                    return channel <= 1.0 / 12.0 ? sqrt(3.0 * channel) :
-                            a * log(12.0 * channel - b) + c;
-                }
-
-                float3 OETF(float3 linear) {
-                    return float3(OETF_channel(linear.r), OETF_channel(linear.g),
-                            OETF_channel(linear.b));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_LINEAR:
-            shader.append(R"(
-                float3 OETF(float3 linear) {
-                    return linear;
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_SMPTE_170M:
-            shader.append(R"(
-                float OETF_sRGB(float linear) {
-                    return linear <= 0.018 ?
-                            linear * 4.50 : (pow(linear, 0.45) * 1.099) - 0.099;
-                }
-
-                float3 OETF_sRGB(float3 linear) {
-                    return float3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
-                }
-
-                float3 OETF(float3 linear) {
-                    return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_GAMMA2_2:
-            shader.append(R"(
-                float OETF_sRGB(float linear) {
-                    return pow(linear, (1.0 / 2.2));
-                }
-
-                float3 OETF_sRGB(float3 linear) {
-                    return float3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
-                }
-
-                float3 OETF(float3 linear) {
-                    return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_GAMMA2_6:
-            shader.append(R"(
-                float OETF_sRGB(float linear) {
-                    return pow(linear, (1.0 / 2.6));
-                }
-
-                float3 OETF_sRGB(float3 linear) {
-                    return float3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
-                }
-
-                float3 OETF(float3 linear) {
-                    return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_GAMMA2_8:
-            shader.append(R"(
-                float OETF_sRGB(float linear) {
-                    return pow(linear, (1.0 / 2.8));
-                }
-
-                float3 OETF_sRGB(float3 linear) {
-                    return float3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
-                }
-
-                float3 OETF(float3 linear) {
-                    return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
-                }
-            )");
-            break;
-        case HAL_DATASPACE_TRANSFER_SRGB:
-        default:
-            shader.append(R"(
-                float OETF_sRGB(float linear) {
-                    return linear <= 0.0031308 ?
-                            linear * 12.92 : (pow(linear, 1.0 / 2.4) * 1.055) - 0.055;
-                }
-
-                float3 OETF_sRGB(float3 linear) {
-                    return float3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
-                }
-
-                float3 OETF(float3 linear) {
-                    return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
-                }
+                uniform shader child;
+                half4 main(float2 xy) {
+                    float4 c = float4(child.eval(xy));
             )");
             break;
     }
-}
-
-void generateEffectiveOOTF(bool undoPremultipliedAlpha, std::string& shader) {
-    shader.append(R"(
-        uniform shader child;
-        half4 main(float2 xy) {
-            float4 c = float4(child.eval(xy));
-    )");
     if (undoPremultipliedAlpha) {
         shader.append(R"(
             c.rgb = c.rgb / (c.a + 0.0019);
         )");
     }
+    // We are using linear sRGB as a working space, with 1.0 == 203 nits
     shader.append(R"(
-        float3 linearRGB = EOTF(c.rgb);
-        float3 xyz = ToXYZ(linearRGB);
-        c.rgb = OETF(ToRGB(OOTF(linearRGB, xyz)));
+        c.rgb = ApplyColorTransform(OOTF(toLinearSrgb(c.rgb)));
     )");
+    if (needsCustomOETF) {
+        shader.append(R"(
+            c.rgb = OETF(c.rgb);
+        )");
+    } else {
+        shader.append(R"(
+            c.rgb = fromLinearSrgb(c.rgb);
+        )");
+    }
     if (undoPremultipliedAlpha) {
         shader.append(R"(
             c.rgb = c.rgb * (c.a + 0.0019);
@@ -413,33 +220,6 @@
     )");
 }
 
-// please keep in sync with toSkColorSpace function in renderengine/skia/ColorSpaces.cpp
-ColorSpace toColorSpace(ui::Dataspace dataspace) {
-    switch (dataspace & HAL_DATASPACE_STANDARD_MASK) {
-        case HAL_DATASPACE_STANDARD_BT709:
-            return ColorSpace::sRGB();
-        case HAL_DATASPACE_STANDARD_DCI_P3:
-            return ColorSpace::DisplayP3();
-        case HAL_DATASPACE_STANDARD_BT2020:
-        case HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE:
-            return ColorSpace::BT2020();
-        case HAL_DATASPACE_STANDARD_ADOBE_RGB:
-            return ColorSpace::AdobeRGB();
-        // TODO(b/208290320): BT601 format and variants return different primaries
-        case HAL_DATASPACE_STANDARD_BT601_625:
-        case HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED:
-        case HAL_DATASPACE_STANDARD_BT601_525:
-        case HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED:
-        // TODO(b/208290329): BT407M format returns different primaries
-        case HAL_DATASPACE_STANDARD_BT470M:
-        // TODO(b/208290904): FILM format returns different primaries
-        case HAL_DATASPACE_STANDARD_FILM:
-        case HAL_DATASPACE_STANDARD_UNSPECIFIED:
-        default:
-            return ColorSpace::sRGB();
-    }
-}
-
 template <typename T, std::enable_if_t<std::is_trivially_copyable<T>::value, bool> = true>
 std::vector<uint8_t> buildUniformValue(T value) {
     std::vector<uint8_t> result;
@@ -452,17 +232,45 @@
 
 std::string buildLinearEffectSkSL(const LinearEffect& linearEffect) {
     std::string shaderString;
-    generateEOTF(linearEffect.fakeInputDataspace == ui::Dataspace::UNKNOWN
-                         ? linearEffect.inputDataspace
-                         : linearEffect.fakeInputDataspace,
-                 shaderString);
     generateXYZTransforms(shaderString);
     generateOOTF(linearEffect.inputDataspace, linearEffect.outputDataspace, shaderString);
-    generateOETF(linearEffect.outputDataspace, shaderString);
-    generateEffectiveOOTF(linearEffect.undoPremultipliedAlpha, shaderString);
+
+    const bool needsCustomOETF = (linearEffect.fakeOutputDataspace & HAL_DATASPACE_TRANSFER_MASK) ==
+            HAL_DATASPACE_TRANSFER_GAMMA2_2;
+    if (needsCustomOETF) {
+        generateOETF(shaderString);
+    }
+    generateEffectiveOOTF(linearEffect.undoPremultipliedAlpha, linearEffect.type, needsCustomOETF,
+                          shaderString);
     return shaderString;
 }
 
+ColorSpace toColorSpace(ui::Dataspace dataspace) {
+    switch (dataspace & HAL_DATASPACE_STANDARD_MASK) {
+        case HAL_DATASPACE_STANDARD_BT709:
+            return ColorSpace::sRGB();
+        case HAL_DATASPACE_STANDARD_DCI_P3:
+            return ColorSpace::DisplayP3();
+        case HAL_DATASPACE_STANDARD_BT2020:
+        case HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE:
+            return ColorSpace::BT2020();
+        case HAL_DATASPACE_STANDARD_ADOBE_RGB:
+            return ColorSpace::AdobeRGB();
+            // TODO(b/208290320): BT601 format and variants return different primaries
+        case HAL_DATASPACE_STANDARD_BT601_625:
+        case HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED:
+        case HAL_DATASPACE_STANDARD_BT601_525:
+        case HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED:
+            // TODO(b/208290329): BT407M format returns different primaries
+        case HAL_DATASPACE_STANDARD_BT470M:
+            // TODO(b/208290904): FILM format returns different primaries
+        case HAL_DATASPACE_STANDARD_FILM:
+        case HAL_DATASPACE_STANDARD_UNSPECIFIED:
+        default:
+            return ColorSpace::sRGB();
+    }
+}
+
 // Generates a list of uniforms to set on the LinearEffect shader above.
 std::vector<tonemap::ShaderUniform> buildLinearEffectUniforms(
         const LinearEffect& linearEffect, const mat4& colorTransform, float maxDisplayLuminance,
@@ -470,29 +278,29 @@
         aidl::android::hardware::graphics::composer3::RenderIntent renderIntent) {
     std::vector<tonemap::ShaderUniform> uniforms;
 
-    const ui::Dataspace inputDataspace = linearEffect.fakeInputDataspace == ui::Dataspace::UNKNOWN
-            ? linearEffect.inputDataspace
-            : linearEffect.fakeInputDataspace;
+    auto inputColorSpace = toColorSpace(linearEffect.inputDataspace);
+    auto outputColorSpace = toColorSpace(linearEffect.outputDataspace);
 
-    if (inputDataspace == linearEffect.outputDataspace) {
-        uniforms.push_back({.name = "in_rgbToXyz", .value = buildUniformValue<mat4>(mat4())});
-        uniforms.push_back(
-                {.name = "in_xyzToRgb", .value = buildUniformValue<mat4>(colorTransform)});
-    } else {
-        ColorSpace inputColorSpace = toColorSpace(inputDataspace);
-        ColorSpace outputColorSpace = toColorSpace(linearEffect.outputDataspace);
-        uniforms.push_back({.name = "in_rgbToXyz",
-                            .value = buildUniformValue<mat4>(mat4(inputColorSpace.getRGBtoXYZ()))});
-        uniforms.push_back({.name = "in_xyzToRgb",
-                            .value = buildUniformValue<mat4>(
-                                    colorTransform * mat4(outputColorSpace.getXYZtoRGB()))});
-    }
+    uniforms.push_back(
+            {.name = "in_rgbToXyz",
+             .value = buildUniformValue<mat3>(ColorSpace::linearExtendedSRGB().getRGBtoXYZ())});
+    uniforms.push_back({.name = "in_xyzToSrcRgb",
+                        .value = buildUniformValue<mat3>(inputColorSpace.getXYZtoRGB())});
+    // Transforms xyz colors to linear source colors, then applies the color transform, then
+    // transforms to linear extended RGB for skia to color manage.
+    uniforms.push_back({.name = "in_colorTransform",
+                        .value = buildUniformValue<mat4>(
+                                mat4(ColorSpace::linearExtendedSRGB().getXYZtoRGB()) *
+                                // TODO: the color transform ideally should be applied
+                                // in the source colorspace, but doing that breaks
+                                // renderengine tests
+                                mat4(outputColorSpace.getRGBtoXYZ()) * colorTransform *
+                                mat4(outputColorSpace.getXYZtoRGB()))});
 
     tonemap::Metadata metadata{.displayMaxLuminance = maxDisplayLuminance,
                                // If the input luminance is unknown, use display luminance (aka,
-                               // no-op any luminance changes)
-                               // This will be the case for eg screenshots in addition to
-                               // uncalibrated displays
+                               // no-op any luminance changes).
+                               // This is expected to only be meaningful for PQ content
                                .contentMaxLuminance =
                                        maxLuminance > 0 ? maxLuminance : maxDisplayLuminance,
                                .currentDisplayLuminance = currentDisplayLuminanceNits > 0
diff --git a/libs/shaders/tests/Android.bp b/libs/shaders/tests/Android.bp
index 718d37b..1e4f45a 100644
--- a/libs/shaders/tests/Android.bp
+++ b/libs/shaders/tests/Android.bp
@@ -23,6 +23,10 @@
 
 cc_test {
     name: "libshaders_test",
+    defaults: [
+        "android.hardware.graphics.common-ndk_shared",
+        "android.hardware.graphics.composer3-ndk_shared",
+    ],
     test_suites: ["device-tests"],
     srcs: [
         "shaders_test.cpp",
@@ -31,8 +35,6 @@
         "libtonemap_headers",
     ],
     shared_libs: [
-        "android.hardware.graphics.common-V4-ndk",
-        "android.hardware.graphics.composer3-V1-ndk",
         "android.hardware.graphics.common@1.2",
         "libnativewindow",
     ],
diff --git a/libs/shaders/tests/shaders_test.cpp b/libs/shaders/tests/shaders_test.cpp
index d45fb24..ba8bed2 100644
--- a/libs/shaders/tests/shaders_test.cpp
+++ b/libs/shaders/tests/shaders_test.cpp
@@ -35,6 +35,10 @@
     return arg.name == name && arg.value == value;
 }
 
+MATCHER_P(UniformNameEq, name, "") {
+    return arg.name == name;
+}
+
 template <typename T, std::enable_if_t<std::is_trivially_copyable<T>::value, bool> = true>
 std::vector<uint8_t> buildUniformValue(T value) {
     std::vector<uint8_t> result;
@@ -49,50 +53,44 @@
     shaders::LinearEffect effect =
             shaders::LinearEffect{.inputDataspace = ui::Dataspace::V0_SRGB_LINEAR,
                                   .outputDataspace = ui::Dataspace::V0_SRGB_LINEAR,
-                                  .fakeInputDataspace = ui::Dataspace::UNKNOWN};
+                                  .fakeOutputDataspace = ui::Dataspace::UNKNOWN};
 
     mat4 colorTransform = mat4::scale(vec4(.9, .9, .9, 1.));
     auto uniforms =
             shaders::buildLinearEffectUniforms(effect, colorTransform, 1.f, 1.f, 1.f, nullptr,
                                                aidl::android::hardware::graphics::composer3::
                                                        RenderIntent::COLORIMETRIC);
-    EXPECT_THAT(uniforms, Contains(UniformEq("in_rgbToXyz", buildUniformValue<mat4>(mat4()))));
     EXPECT_THAT(uniforms,
-                Contains(UniformEq("in_xyzToRgb", buildUniformValue<mat4>(colorTransform))));
+                Contains(UniformEq("in_rgbToXyz",
+                                   buildUniformValue<mat3>(
+                                           ColorSpace::linearExtendedSRGB().getRGBtoXYZ()))));
+    EXPECT_THAT(uniforms,
+                Contains(UniformEq("in_xyzToSrcRgb",
+                                   buildUniformValue<mat3>(
+                                           ColorSpace::linearSRGB().getXYZtoRGB()))));
+    // color transforms are already tested in renderengine's tests
+    EXPECT_THAT(uniforms, Contains(UniformNameEq("in_colorTransform")));
 }
 
 TEST_F(ShadersTest, buildLinearEffectUniforms_selectsGamutTransformMatrices) {
     shaders::LinearEffect effect =
             shaders::LinearEffect{.inputDataspace = ui::Dataspace::V0_SRGB,
                                   .outputDataspace = ui::Dataspace::DISPLAY_P3,
-                                  .fakeInputDataspace = ui::Dataspace::UNKNOWN};
+                                  .fakeOutputDataspace = ui::Dataspace::UNKNOWN};
 
     ColorSpace inputColorSpace = ColorSpace::sRGB();
-    ColorSpace outputColorSpace = ColorSpace::DisplayP3();
     auto uniforms =
             shaders::buildLinearEffectUniforms(effect, mat4(), 1.f, 1.f, 1.f, nullptr,
                                                aidl::android::hardware::graphics::composer3::
                                                        RenderIntent::COLORIMETRIC);
     EXPECT_THAT(uniforms,
                 Contains(UniformEq("in_rgbToXyz",
-                                   buildUniformValue<mat4>(mat4(inputColorSpace.getRGBtoXYZ())))));
+                                   buildUniformValue<mat3>(
+                                           ColorSpace::linearExtendedSRGB().getRGBtoXYZ()))));
     EXPECT_THAT(uniforms,
-                Contains(UniformEq("in_xyzToRgb",
-                                   buildUniformValue<mat4>(mat4(outputColorSpace.getXYZtoRGB())))));
-}
-
-TEST_F(ShadersTest, buildLinearEffectUniforms_respectsFakeInputDataspace) {
-    shaders::LinearEffect effect =
-            shaders::LinearEffect{.inputDataspace = ui::Dataspace::V0_SRGB,
-                                  .outputDataspace = ui::Dataspace::DISPLAY_P3,
-                                  .fakeInputDataspace = ui::Dataspace::DISPLAY_P3};
-
-    auto uniforms =
-            shaders::buildLinearEffectUniforms(effect, mat4(), 1.f, 1.f, 1.f, nullptr,
-                                               aidl::android::hardware::graphics::composer3::
-                                                       RenderIntent::COLORIMETRIC);
-    EXPECT_THAT(uniforms, Contains(UniformEq("in_rgbToXyz", buildUniformValue<mat4>(mat4()))));
-    EXPECT_THAT(uniforms, Contains(UniformEq("in_xyzToRgb", buildUniformValue<mat4>(mat4()))));
+                Contains(UniformEq("in_xyzToSrcRgb",
+                                   buildUniformValue<mat3>(inputColorSpace.getXYZtoRGB()))));
+    EXPECT_THAT(uniforms, Contains(UniformNameEq("in_colorTransform")));
 }
 
 } // namespace android
diff --git a/libs/tonemap/Android.bp b/libs/tonemap/Android.bp
index eca051d..8c8815d 100644
--- a/libs/tonemap/Android.bp
+++ b/libs/tonemap/Android.bp
@@ -23,13 +23,15 @@
 
 cc_library_static {
     name: "libtonemap",
+    defaults: [
+        "android.hardware.graphics.common-ndk_shared",
+        "android.hardware.graphics.composer3-ndk_shared",
+    ],
     vendor_available: true,
 
     local_include_dirs: ["include"],
 
     shared_libs: [
-        "android.hardware.graphics.common-V4-ndk",
-        "android.hardware.graphics.composer3-V1-ndk",
         "liblog",
         "libnativewindow",
     ],
diff --git a/libs/tonemap/tests/Android.bp b/libs/tonemap/tests/Android.bp
index 0002d3a..2abf515 100644
--- a/libs/tonemap/tests/Android.bp
+++ b/libs/tonemap/tests/Android.bp
@@ -23,6 +23,10 @@
 
 cc_test {
     name: "libtonemap_test",
+    defaults: [
+        "android.hardware.graphics.common-ndk_shared",
+        "android.hardware.graphics.composer3-ndk_shared",
+    ],
     test_suites: ["device-tests"],
     srcs: [
         "tonemap_test.cpp",
@@ -31,8 +35,6 @@
         "libtonemap_headers",
     ],
     shared_libs: [
-        "android.hardware.graphics.common-V4-ndk",
-        "android.hardware.graphics.composer3-V1-ndk",
         "libnativewindow",
     ],
     static_libs: [
diff --git a/libs/ui/Android.bp b/libs/ui/Android.bp
index 98d9b94..ec0ab4e 100644
--- a/libs/ui/Android.bp
+++ b/libs/ui/Android.bp
@@ -48,7 +48,6 @@
         integer_overflow: true,
         misc_undefined: ["bounds"],
     },
-
 }
 
 cc_library_static {
@@ -127,7 +126,6 @@
         "DebugUtils.cpp",
         "DeviceProductInfo.cpp",
         "DisplayIdentification.cpp",
-        "DisplayMode.cpp",
         "DynamicDisplayInfo.cpp",
         "Fence.cpp",
         "FenceTime.cpp",
@@ -136,14 +134,13 @@
         "Gralloc2.cpp",
         "Gralloc3.cpp",
         "Gralloc4.cpp",
+        "Gralloc5.cpp",
         "GraphicBuffer.cpp",
         "GraphicBufferAllocator.cpp",
         "GraphicBufferMapper.cpp",
-        "HdrCapabilities.cpp",
         "PixelFormat.cpp",
         "PublicFormat.cpp",
         "StaticAsserts.cpp",
-        "StaticDisplayInfo.cpp",
     ],
 
     include_dirs: [
@@ -179,6 +176,7 @@
         "libsync",
         "libutils",
         "liblog",
+        "libvndksupport",
     ],
 
     export_shared_lib_headers: [
@@ -217,6 +215,8 @@
         "libnativewindow_headers",
         "libhardware_headers",
         "libui_headers",
+        "libimapper_stablec",
+        "libimapper_providerutils",
     ],
 
     export_static_lib_headers: [
@@ -240,10 +240,6 @@
     ],
 
     afdo: true,
-
-    header_abi_checker: {
-        diff_flags: ["-allow-adding-removing-weak-symbols"],
-    },
 }
 
 cc_library_headers {
diff --git a/libs/ui/DeviceProductInfo.cpp b/libs/ui/DeviceProductInfo.cpp
index 04d9d3c..6ae27de 100644
--- a/libs/ui/DeviceProductInfo.cpp
+++ b/libs/ui/DeviceProductInfo.cpp
@@ -14,74 +14,43 @@
  * limitations under the License.
  */
 
+#include <ftl/match.h>
 #include <ui/DeviceProductInfo.h>
 
 #include <android-base/stringprintf.h>
-#include <ui/FlattenableHelpers.h>
-#include <utils/Log.h>
-
-#define RETURN_IF_ERROR(op) \
-    if (const status_t status = (op); status != OK) return status;
 
 namespace android {
 
-using base::StringAppendF;
+std::string to_string(const DeviceProductInfo& info) {
+    using base::StringAppendF;
 
-size_t DeviceProductInfo::getFlattenedSize() const {
-    return FlattenableHelpers::getFlattenedSize(name) +
-            FlattenableHelpers::getFlattenedSize(manufacturerPnpId) +
-            FlattenableHelpers::getFlattenedSize(productId) +
-            FlattenableHelpers::getFlattenedSize(manufactureOrModelDate) +
-            FlattenableHelpers::getFlattenedSize(relativeAddress);
-}
+    std::string result;
+    StringAppendF(&result, "{name=\"%s\", ", info.name.c_str());
+    StringAppendF(&result, "manufacturerPnpId=%s, ", info.manufacturerPnpId.data());
+    StringAppendF(&result, "productId=%s, ", info.productId.c_str());
 
-status_t DeviceProductInfo::flatten(void* buffer, size_t size) const {
-    if (size < getFlattenedSize()) {
-        return NO_MEMORY;
-    }
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, name));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, manufacturerPnpId));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, productId));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, manufactureOrModelDate));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, relativeAddress));
-    return OK;
-}
-
-status_t DeviceProductInfo::unflatten(void const* buffer, size_t size) {
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &name));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &manufacturerPnpId));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &productId));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &manufactureOrModelDate));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &relativeAddress));
-    return OK;
-}
-
-void DeviceProductInfo::dump(std::string& result) const {
-    StringAppendF(&result, "{name=\"%s\", ", name.c_str());
-    StringAppendF(&result, "manufacturerPnpId=%s, ", manufacturerPnpId.data());
-    StringAppendF(&result, "productId=%s, ", productId.c_str());
-
-    if (const auto* model = std::get_if<ModelYear>(&manufactureOrModelDate)) {
-        StringAppendF(&result, "modelYear=%u, ", model->year);
-    } else if (const auto* manufactureWeekAndYear =
-                       std::get_if<ManufactureWeekAndYear>(&manufactureOrModelDate)) {
-        StringAppendF(&result, "manufactureWeek=%u, ", manufactureWeekAndYear->week);
-        StringAppendF(&result, "manufactureYear=%d, ", manufactureWeekAndYear->year);
-    } else if (const auto* manufactureYear =
-                       std::get_if<ManufactureYear>(&manufactureOrModelDate)) {
-        StringAppendF(&result, "manufactureYear=%d, ", manufactureYear->year);
-    } else {
-        ALOGE("Unknown alternative for variant DeviceProductInfo::ManufactureOrModelDate");
-    }
+    ftl::match(
+            info.manufactureOrModelDate,
+            [&](DeviceProductInfo::ModelYear model) {
+                StringAppendF(&result, "modelYear=%u, ", model.year);
+            },
+            [&](DeviceProductInfo::ManufactureWeekAndYear manufacture) {
+                StringAppendF(&result, "manufactureWeek=%u, ", manufacture.week);
+                StringAppendF(&result, "manufactureYear=%d, ", manufacture.year);
+            },
+            [&](DeviceProductInfo::ManufactureYear manufacture) {
+                StringAppendF(&result, "manufactureYear=%d, ", manufacture.year);
+            });
 
     result.append("relativeAddress=[");
-    for (size_t i = 0; i < relativeAddress.size(); i++) {
+    for (size_t i = 0; i < info.relativeAddress.size(); i++) {
         if (i != 0) {
             result.append(", ");
         }
-        StringAppendF(&result, "%u", relativeAddress[i]);
+        StringAppendF(&result, "%u", info.relativeAddress[i]);
     }
     result.append("]}");
+    return result;
 }
 
 } // namespace android
diff --git a/libs/ui/DisplayMode.cpp b/libs/ui/DisplayMode.cpp
deleted file mode 100644
index cf05dbf..0000000
--- a/libs/ui/DisplayMode.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <ui/DisplayMode.h>
-
-#include <cstdint>
-
-#include <ui/FlattenableHelpers.h>
-
-#define RETURN_IF_ERROR(op) \
-    if (const status_t status = (op); status != OK) return status;
-
-namespace android::ui {
-
-size_t DisplayMode::getFlattenedSize() const {
-    return FlattenableHelpers::getFlattenedSize(id) +
-            FlattenableHelpers::getFlattenedSize(resolution) +
-            FlattenableHelpers::getFlattenedSize(xDpi) +
-            FlattenableHelpers::getFlattenedSize(yDpi) +
-            FlattenableHelpers::getFlattenedSize(refreshRate) +
-            FlattenableHelpers::getFlattenedSize(appVsyncOffset) +
-            FlattenableHelpers::getFlattenedSize(sfVsyncOffset) +
-            FlattenableHelpers::getFlattenedSize(presentationDeadline) +
-            FlattenableHelpers::getFlattenedSize(group);
-}
-
-status_t DisplayMode::flatten(void* buffer, size_t size) const {
-    if (size < getFlattenedSize()) {
-        return NO_MEMORY;
-    }
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, id));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, resolution));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, xDpi));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, yDpi));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, refreshRate));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, appVsyncOffset));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, sfVsyncOffset));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, presentationDeadline));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, group));
-    return OK;
-}
-
-status_t DisplayMode::unflatten(const void* buffer, size_t size) {
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &id));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &resolution));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &xDpi));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &yDpi));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &refreshRate));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &appVsyncOffset));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &sfVsyncOffset));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &presentationDeadline));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &group));
-    return OK;
-}
-
-} // namespace android::ui
diff --git a/libs/ui/DynamicDisplayInfo.cpp b/libs/ui/DynamicDisplayInfo.cpp
index 78ba996..f5feea9 100644
--- a/libs/ui/DynamicDisplayInfo.cpp
+++ b/libs/ui/DynamicDisplayInfo.cpp
@@ -18,11 +18,6 @@
 
 #include <cstdint>
 
-#include <ui/FlattenableHelpers.h>
-
-#define RETURN_IF_ERROR(op) \
-    if (const status_t status = (op); status != OK) return status;
-
 namespace android::ui {
 
 std::optional<ui::DisplayMode> DynamicDisplayInfo::getActiveDisplayMode() const {
@@ -34,42 +29,4 @@
     return {};
 }
 
-size_t DynamicDisplayInfo::getFlattenedSize() const {
-    return FlattenableHelpers::getFlattenedSize(supportedDisplayModes) +
-            FlattenableHelpers::getFlattenedSize(activeDisplayModeId) +
-            FlattenableHelpers::getFlattenedSize(supportedColorModes) +
-            FlattenableHelpers::getFlattenedSize(activeColorMode) +
-            FlattenableHelpers::getFlattenedSize(hdrCapabilities) +
-            FlattenableHelpers::getFlattenedSize(autoLowLatencyModeSupported) +
-            FlattenableHelpers::getFlattenedSize(gameContentTypeSupported) +
-            FlattenableHelpers::getFlattenedSize(preferredBootDisplayMode);
-}
-
-status_t DynamicDisplayInfo::flatten(void* buffer, size_t size) const {
-    if (size < getFlattenedSize()) {
-        return NO_MEMORY;
-    }
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, supportedDisplayModes));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, activeDisplayModeId));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, supportedColorModes));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, activeColorMode));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, hdrCapabilities));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, autoLowLatencyModeSupported));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, gameContentTypeSupported));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, preferredBootDisplayMode));
-    return OK;
-}
-
-status_t DynamicDisplayInfo::unflatten(const void* buffer, size_t size) {
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &supportedDisplayModes));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &activeDisplayModeId));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &supportedColorModes));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &activeColorMode));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &hdrCapabilities));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &autoLowLatencyModeSupported));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &gameContentTypeSupported));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &preferredBootDisplayMode));
-    return OK;
-}
-
 } // namespace android::ui
diff --git a/libs/ui/Gralloc2.cpp b/libs/ui/Gralloc2.cpp
index f23f10a..e9b5dec 100644
--- a/libs/ui/Gralloc2.cpp
+++ b/libs/ui/Gralloc2.cpp
@@ -161,7 +161,7 @@
     return static_cast<status_t>((ret.isOk()) ? error : kTransactionError);
 }
 
-status_t Gralloc2Mapper::importBuffer(const hardware::hidl_handle& rawHandle,
+status_t Gralloc2Mapper::importBuffer(const native_handle_t* rawHandle,
                                       buffer_handle_t* outBufferHandle) const {
     Error error;
     auto ret = mMapper->importBuffer(rawHandle,
diff --git a/libs/ui/Gralloc3.cpp b/libs/ui/Gralloc3.cpp
index 15c60bc..474d381 100644
--- a/libs/ui/Gralloc3.cpp
+++ b/libs/ui/Gralloc3.cpp
@@ -138,7 +138,7 @@
     return static_cast<status_t>((ret.isOk()) ? error : kTransactionError);
 }
 
-status_t Gralloc3Mapper::importBuffer(const hardware::hidl_handle& rawHandle,
+status_t Gralloc3Mapper::importBuffer(const native_handle_t* rawHandle,
                                       buffer_handle_t* outBufferHandle) const {
     Error error;
     auto ret = mMapper->importBuffer(rawHandle, [&](const auto& tmpError, const auto& tmpBuffer) {
diff --git a/libs/ui/Gralloc4.cpp b/libs/ui/Gralloc4.cpp
index 53372c9..b6274ab 100644
--- a/libs/ui/Gralloc4.cpp
+++ b/libs/ui/Gralloc4.cpp
@@ -198,7 +198,7 @@
     return static_cast<status_t>((ret.isOk()) ? error : kTransactionError);
 }
 
-status_t Gralloc4Mapper::importBuffer(const hardware::hidl_handle& rawHandle,
+status_t Gralloc4Mapper::importBuffer(const native_handle_t* rawHandle,
                                       buffer_handle_t* outBufferHandle) const {
     Error error;
     auto ret = mMapper->importBuffer(rawHandle, [&](const auto& tmpError, const auto& tmpBuffer) {
@@ -766,162 +766,6 @@
                gralloc4::encodeSmpte2094_10);
 }
 
-template <class T>
-status_t Gralloc4Mapper::getDefault(uint32_t width, uint32_t height, PixelFormat format,
-                                    uint32_t layerCount, uint64_t usage,
-                                    const MetadataType& metadataType,
-                                    DecodeFunction<T> decodeFunction, T* outMetadata) const {
-    if (!outMetadata) {
-        return BAD_VALUE;
-    }
-
-    IMapper::BufferDescriptorInfo descriptorInfo;
-    if (auto error = sBufferDescriptorInfo("getDefault", width, height, format, layerCount, usage,
-                                           &descriptorInfo) != OK) {
-        return error;
-    }
-
-    hidl_vec<uint8_t> vec;
-    Error error;
-    auto ret = mMapper->getFromBufferDescriptorInfo(descriptorInfo, metadataType,
-                                                    [&](const auto& tmpError,
-                                                        const hidl_vec<uint8_t>& tmpVec) {
-                                                        error = tmpError;
-                                                        vec = tmpVec;
-                                                    });
-
-    if (!ret.isOk()) {
-        error = kTransactionError;
-    }
-
-    if (error != Error::NONE) {
-        ALOGE("getDefault(%s, %" PRIu64 ", ...) failed with %d", metadataType.name.c_str(),
-              metadataType.value, error);
-        return static_cast<status_t>(error);
-    }
-
-    return decodeFunction(vec, outMetadata);
-}
-
-status_t Gralloc4Mapper::getDefaultPixelFormatFourCC(uint32_t width, uint32_t height,
-                                                     PixelFormat format, uint32_t layerCount,
-                                                     uint64_t usage,
-                                                     uint32_t* outPixelFormatFourCC) const {
-    return getDefault(width, height, format, layerCount, usage,
-                      gralloc4::MetadataType_PixelFormatFourCC, gralloc4::decodePixelFormatFourCC,
-                      outPixelFormatFourCC);
-}
-
-status_t Gralloc4Mapper::getDefaultPixelFormatModifier(uint32_t width, uint32_t height,
-                                                       PixelFormat format, uint32_t layerCount,
-                                                       uint64_t usage,
-                                                       uint64_t* outPixelFormatModifier) const {
-    return getDefault(width, height, format, layerCount, usage,
-                      gralloc4::MetadataType_PixelFormatModifier,
-                      gralloc4::decodePixelFormatModifier, outPixelFormatModifier);
-}
-
-status_t Gralloc4Mapper::getDefaultAllocationSize(uint32_t width, uint32_t height,
-                                                  PixelFormat format, uint32_t layerCount,
-                                                  uint64_t usage,
-                                                  uint64_t* outAllocationSize) const {
-    return getDefault(width, height, format, layerCount, usage,
-                      gralloc4::MetadataType_AllocationSize, gralloc4::decodeAllocationSize,
-                      outAllocationSize);
-}
-
-status_t Gralloc4Mapper::getDefaultProtectedContent(uint32_t width, uint32_t height,
-                                                    PixelFormat format, uint32_t layerCount,
-                                                    uint64_t usage,
-                                                    uint64_t* outProtectedContent) const {
-    return getDefault(width, height, format, layerCount, usage,
-                      gralloc4::MetadataType_ProtectedContent, gralloc4::decodeProtectedContent,
-                      outProtectedContent);
-}
-
-status_t Gralloc4Mapper::getDefaultCompression(uint32_t width, uint32_t height, PixelFormat format,
-                                               uint32_t layerCount, uint64_t usage,
-                                               ExtendableType* outCompression) const {
-    return getDefault(width, height, format, layerCount, usage, gralloc4::MetadataType_Compression,
-                      gralloc4::decodeCompression, outCompression);
-}
-
-status_t Gralloc4Mapper::getDefaultCompression(uint32_t width, uint32_t height, PixelFormat format,
-                                               uint32_t layerCount, uint64_t usage,
-                                               ui::Compression* outCompression) const {
-    if (!outCompression) {
-        return BAD_VALUE;
-    }
-    ExtendableType compression;
-    status_t error = getDefaultCompression(width, height, format, layerCount, usage, &compression);
-    if (error) {
-        return error;
-    }
-    if (!gralloc4::isStandardCompression(compression)) {
-        return BAD_TYPE;
-    }
-    *outCompression = gralloc4::getStandardCompressionValue(compression);
-    return NO_ERROR;
-}
-
-status_t Gralloc4Mapper::getDefaultInterlaced(uint32_t width, uint32_t height, PixelFormat format,
-                                              uint32_t layerCount, uint64_t usage,
-                                              ExtendableType* outInterlaced) const {
-    return getDefault(width, height, format, layerCount, usage, gralloc4::MetadataType_Interlaced,
-                      gralloc4::decodeInterlaced, outInterlaced);
-}
-
-status_t Gralloc4Mapper::getDefaultInterlaced(uint32_t width, uint32_t height, PixelFormat format,
-                                              uint32_t layerCount, uint64_t usage,
-                                              ui::Interlaced* outInterlaced) const {
-    if (!outInterlaced) {
-        return BAD_VALUE;
-    }
-    ExtendableType interlaced;
-    status_t error = getDefaultInterlaced(width, height, format, layerCount, usage, &interlaced);
-    if (error) {
-        return error;
-    }
-    if (!gralloc4::isStandardInterlaced(interlaced)) {
-        return BAD_TYPE;
-    }
-    *outInterlaced = gralloc4::getStandardInterlacedValue(interlaced);
-    return NO_ERROR;
-}
-
-status_t Gralloc4Mapper::getDefaultChromaSiting(uint32_t width, uint32_t height, PixelFormat format,
-                                                uint32_t layerCount, uint64_t usage,
-                                                ExtendableType* outChromaSiting) const {
-    return getDefault(width, height, format, layerCount, usage, gralloc4::MetadataType_ChromaSiting,
-                      gralloc4::decodeChromaSiting, outChromaSiting);
-}
-
-status_t Gralloc4Mapper::getDefaultChromaSiting(uint32_t width, uint32_t height, PixelFormat format,
-                                                uint32_t layerCount, uint64_t usage,
-                                                ui::ChromaSiting* outChromaSiting) const {
-    if (!outChromaSiting) {
-        return BAD_VALUE;
-    }
-    ExtendableType chromaSiting;
-    status_t error =
-            getDefaultChromaSiting(width, height, format, layerCount, usage, &chromaSiting);
-    if (error) {
-        return error;
-    }
-    if (!gralloc4::isStandardChromaSiting(chromaSiting)) {
-        return BAD_TYPE;
-    }
-    *outChromaSiting = gralloc4::getStandardChromaSitingValue(chromaSiting);
-    return NO_ERROR;
-}
-
-status_t Gralloc4Mapper::getDefaultPlaneLayouts(
-        uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount, uint64_t usage,
-        std::vector<ui::PlaneLayout>* outPlaneLayouts) const {
-    return getDefault(width, height, format, layerCount, usage, gralloc4::MetadataType_PlaneLayouts,
-                      gralloc4::decodePlaneLayouts, outPlaneLayouts);
-}
-
 std::vector<MetadataTypeDescription> Gralloc4Mapper::listSupportedMetadataTypes() const {
     hidl_vec<MetadataTypeDescription> descriptions;
     Error error;
@@ -1242,7 +1086,10 @@
 
     if (mAidlAllocator) {
         AllocationResult result;
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
         auto status = mAidlAllocator->allocate(descriptor, bufferCount, &result);
+#pragma clang diagnostic pop // deprecation
         if (!status.isOk()) {
             error = status.getExceptionCode();
             if (error == EX_SERVICE_SPECIFIC) {
diff --git a/libs/ui/Gralloc5.cpp b/libs/ui/Gralloc5.cpp
new file mode 100644
index 0000000..c3b2d3d
--- /dev/null
+++ b/libs/ui/Gralloc5.cpp
@@ -0,0 +1,835 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Gralloc5"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <ui/Gralloc5.h>
+
+#include <aidlcommonsupport/NativeHandle.h>
+#include <android/binder_manager.h>
+#include <android/hardware/graphics/mapper/utils/IMapperMetadataTypes.h>
+#include <binder/IPCThreadState.h>
+#include <dlfcn.h>
+#include <ui/FatVector.h>
+#include <vndksupport/linker.h>
+
+using namespace aidl::android::hardware::graphics::allocator;
+using namespace aidl::android::hardware::graphics::common;
+using namespace ::android::hardware::graphics::mapper;
+
+namespace android {
+
+static const auto kIAllocatorServiceName = IAllocator::descriptor + std::string("/default");
+static const auto kIAllocatorMinimumVersion = 2;
+
+// TODO(b/72323293, b/72703005): Remove these invalid bits from callers
+static constexpr uint64_t kRemovedUsageBits = static_cast<uint64_t>((1 << 10) | (1 << 13));
+
+typedef AIMapper_Error (*AIMapper_loadIMapperFn)(AIMapper *_Nullable *_Nonnull outImplementation);
+
+struct Gralloc5 {
+    std::shared_ptr<IAllocator> allocator;
+    AIMapper *mapper = nullptr;
+};
+
+static std::shared_ptr<IAllocator> waitForAllocator() {
+    if (__builtin_available(android 31, *)) {
+        if (!AServiceManager_isDeclared(kIAllocatorServiceName.c_str())) {
+            return nullptr;
+        }
+        auto allocator = IAllocator::fromBinder(
+                ndk::SpAIBinder(AServiceManager_waitForService(kIAllocatorServiceName.c_str())));
+        if (!allocator) {
+            ALOGE("AIDL IAllocator declared but failed to get service");
+            return nullptr;
+        }
+
+        int32_t version = 0;
+        if (!allocator->getInterfaceVersion(&version).isOk()) {
+            ALOGE("Failed to query interface version");
+            return nullptr;
+        }
+        if (version < kIAllocatorMinimumVersion) {
+            return nullptr;
+        }
+        return allocator;
+    } else {
+        // TODO: LOG_ALWAYS_FATAL("libui is not backwards compatible");
+        return nullptr;
+    }
+}
+
+static void *loadIMapperLibrary() {
+    static void *imapperLibrary = []() -> void * {
+        auto allocator = waitForAllocator();
+        std::string mapperSuffix;
+        auto status = allocator->getIMapperLibrarySuffix(&mapperSuffix);
+        if (!status.isOk()) {
+            ALOGE("Failed to get IMapper library suffix");
+            return nullptr;
+        }
+
+        std::string lib_name = "mapper." + mapperSuffix + ".so";
+        void *so = android_load_sphal_library(lib_name.c_str(), RTLD_LOCAL | RTLD_NOW);
+        if (!so) {
+            ALOGE("Failed to load %s", lib_name.c_str());
+        }
+        return so;
+    }();
+    return imapperLibrary;
+}
+
+static const Gralloc5 &getInstance() {
+    static Gralloc5 instance = []() {
+        auto allocator = waitForAllocator();
+        if (!allocator) {
+            return Gralloc5{};
+        }
+        void *so = loadIMapperLibrary();
+        if (!so) {
+            return Gralloc5{};
+        }
+        auto loadIMapper = (AIMapper_loadIMapperFn)dlsym(so, "AIMapper_loadIMapper");
+        AIMapper *mapper = nullptr;
+        AIMapper_Error error = loadIMapper(&mapper);
+        if (error != AIMAPPER_ERROR_NONE) {
+            ALOGE("AIMapper_loadIMapper failed %d", error);
+            return Gralloc5{};
+        }
+        return Gralloc5{std::move(allocator), mapper};
+    }();
+    return instance;
+}
+
+template <StandardMetadataType T>
+static auto getStandardMetadata(AIMapper *mapper, buffer_handle_t bufferHandle)
+        -> decltype(StandardMetadata<T>::value::decode(nullptr, 0)) {
+    using Value = typename StandardMetadata<T>::value;
+    // TODO: Tune for common-case better
+    FatVector<uint8_t, 128> buffer;
+    int32_t sizeRequired = mapper->v5.getStandardMetadata(bufferHandle, static_cast<int64_t>(T),
+                                                          buffer.data(), buffer.size());
+    if (sizeRequired < 0) {
+        ALOGW_IF(-AIMAPPER_ERROR_UNSUPPORTED != sizeRequired,
+                 "Unexpected error %d from valid getStandardMetadata call", -sizeRequired);
+        return std::nullopt;
+    }
+    if ((size_t)sizeRequired > buffer.size()) {
+        buffer.resize(sizeRequired);
+        sizeRequired = mapper->v5.getStandardMetadata(bufferHandle, static_cast<int64_t>(T),
+                                                      buffer.data(), buffer.size());
+    }
+    if (sizeRequired < 0 || (size_t)sizeRequired > buffer.size()) {
+        ALOGW("getStandardMetadata failed, received %d with buffer size %zd", sizeRequired,
+              buffer.size());
+        // Generate a fail type
+        return std::nullopt;
+    }
+    return Value::decode(buffer.data(), sizeRequired);
+}
+
+template <StandardMetadataType T>
+static AIMapper_Error setStandardMetadata(AIMapper *mapper, buffer_handle_t bufferHandle,
+                                          const typename StandardMetadata<T>::value_type &value) {
+    using Value = typename StandardMetadata<T>::value;
+    int32_t sizeRequired = Value::encode(value, nullptr, 0);
+    if (sizeRequired < 0) {
+        ALOGW("Failed to calculate required size");
+        return static_cast<AIMapper_Error>(-sizeRequired);
+    }
+    FatVector<uint8_t, 128> buffer;
+    buffer.resize(sizeRequired);
+    sizeRequired = Value::encode(value, buffer.data(), buffer.size());
+    if (sizeRequired < 0 || (size_t)sizeRequired > buffer.size()) {
+        ALOGW("Failed to encode with calculated size %d; buffer size %zd", sizeRequired,
+              buffer.size());
+        return static_cast<AIMapper_Error>(-sizeRequired);
+    }
+    return mapper->v5.setStandardMetadata(bufferHandle, static_cast<int64_t>(T), buffer.data(),
+                                          sizeRequired);
+}
+
+Gralloc5Allocator::Gralloc5Allocator(const Gralloc5Mapper &mapper) : mMapper(mapper) {
+    mAllocator = getInstance().allocator;
+}
+
+bool Gralloc5Allocator::isLoaded() const {
+    return mAllocator != nullptr;
+}
+
+static uint64_t getValidUsageBits() {
+    static const uint64_t validUsageBits = []() -> uint64_t {
+        uint64_t bits = 0;
+        for (const auto bit : ndk::enum_range<BufferUsage>{}) {
+            bits |= static_cast<int64_t>(bit);
+        }
+        return bits;
+    }();
+    return validUsageBits | kRemovedUsageBits;
+}
+
+static std::optional<BufferDescriptorInfo> makeDescriptor(std::string requestorName, uint32_t width,
+                                                          uint32_t height, PixelFormat format,
+                                                          uint32_t layerCount, uint64_t usage) {
+    uint64_t validUsageBits = getValidUsageBits();
+    if (usage & ~validUsageBits) {
+        ALOGE("buffer descriptor contains invalid usage bits 0x%" PRIx64, usage & ~validUsageBits);
+        return std::nullopt;
+    }
+
+    BufferDescriptorInfo descriptorInfo{
+            .width = static_cast<int32_t>(width),
+            .height = static_cast<int32_t>(height),
+            .layerCount = static_cast<int32_t>(layerCount),
+            .format = static_cast<::aidl::android::hardware::graphics::common::PixelFormat>(format),
+            .usage = static_cast<BufferUsage>(usage),
+    };
+    auto nameLength = std::min(requestorName.length(), descriptorInfo.name.size() - 1);
+    memcpy(descriptorInfo.name.data(), requestorName.data(), nameLength);
+    requestorName.data()[nameLength] = 0;
+    return descriptorInfo;
+}
+
+std::string Gralloc5Allocator::dumpDebugInfo(bool less) const {
+    return mMapper.dumpBuffers(less);
+}
+
+status_t Gralloc5Allocator::allocate(std::string requestorName, uint32_t width, uint32_t height,
+                                     android::PixelFormat format, uint32_t layerCount,
+                                     uint64_t usage, uint32_t bufferCount, uint32_t *outStride,
+                                     buffer_handle_t *outBufferHandles, bool importBuffers) const {
+    auto descriptorInfo = makeDescriptor(requestorName, width, height, format, layerCount, usage);
+    if (!descriptorInfo) {
+        return BAD_VALUE;
+    }
+
+    AllocationResult result;
+    auto status = mAllocator->allocate2(*descriptorInfo, bufferCount, &result);
+    if (!status.isOk()) {
+        auto error = status.getExceptionCode();
+        if (error == EX_SERVICE_SPECIFIC) {
+            error = status.getServiceSpecificError();
+        }
+        if (error == OK) {
+            error = UNKNOWN_ERROR;
+        }
+        return error;
+    }
+
+    if (importBuffers) {
+        for (uint32_t i = 0; i < bufferCount; i++) {
+            auto handle = makeFromAidl(result.buffers[i]);
+            auto error = mMapper.importBuffer(handle, &outBufferHandles[i]);
+            native_handle_delete(handle);
+            if (error != NO_ERROR) {
+                for (uint32_t j = 0; j < i; j++) {
+                    mMapper.freeBuffer(outBufferHandles[j]);
+                    outBufferHandles[j] = nullptr;
+                }
+                return error;
+            }
+        }
+    } else {
+        for (uint32_t i = 0; i < bufferCount; i++) {
+            outBufferHandles[i] = dupFromAidl(result.buffers[i]);
+            if (!outBufferHandles[i]) {
+                for (uint32_t j = 0; j < i; j++) {
+                    auto buffer = const_cast<native_handle_t *>(outBufferHandles[j]);
+                    native_handle_close(buffer);
+                    native_handle_delete(buffer);
+                    outBufferHandles[j] = nullptr;
+                }
+                return NO_MEMORY;
+            }
+        }
+    }
+
+    *outStride = result.stride;
+
+    // Release all the resources held by AllocationResult (specifically any remaining FDs)
+    result = {};
+    // make sure the kernel driver sees BC_FREE_BUFFER and closes the fds now
+    // TODO: Re-enable this at some point if it's necessary. We can't do it now because libui
+    // is marked apex_available (b/214400477) and libbinder isn't (which of course is correct)
+    // IPCThreadState::self()->flushCommands();
+
+    return OK;
+}
+
+void Gralloc5Mapper::preload() {
+    // TODO(b/261858155): Implement. We can't bounce off of IAllocator for this because zygote can't
+    // use binder. So when an alternate strategy of retrieving the library prefix is available,
+    // use that here.
+}
+
+Gralloc5Mapper::Gralloc5Mapper() {
+    mMapper = getInstance().mapper;
+}
+
+bool Gralloc5Mapper::isLoaded() const {
+    return mMapper != nullptr && mMapper->version >= AIMAPPER_VERSION_5;
+}
+
+std::string Gralloc5Mapper::dumpBuffer(buffer_handle_t bufferHandle, bool less) const {
+    // TODO(b/261858392): Implement
+    (void)bufferHandle;
+    (void)less;
+    return {};
+}
+
+std::string Gralloc5Mapper::dumpBuffers(bool less) const {
+    // TODO(b/261858392): Implement
+    (void)less;
+    return {};
+}
+
+status_t Gralloc5Mapper::importBuffer(const native_handle_t *rawHandle,
+                                      buffer_handle_t *outBufferHandle) const {
+    return mMapper->v5.importBuffer(rawHandle, outBufferHandle);
+}
+
+void Gralloc5Mapper::freeBuffer(buffer_handle_t bufferHandle) const {
+    mMapper->v5.freeBuffer(bufferHandle);
+}
+
+status_t Gralloc5Mapper::validateBufferSize(buffer_handle_t bufferHandle, uint32_t width,
+                                            uint32_t height, PixelFormat format,
+                                            uint32_t layerCount, uint64_t usage,
+                                            uint32_t stride) const {
+    {
+        auto value = getStandardMetadata<StandardMetadataType::WIDTH>(mMapper, bufferHandle);
+        if (width != value) {
+            ALOGW("Width didn't match, expected %d got %" PRId64, width, value.value_or(-1));
+            return BAD_VALUE;
+        }
+    }
+    {
+        auto value = getStandardMetadata<StandardMetadataType::HEIGHT>(mMapper, bufferHandle);
+        if (height != value) {
+            ALOGW("Height didn't match, expected %d got %" PRId64, height, value.value_or(-1));
+            return BAD_VALUE;
+        }
+    }
+    {
+        auto value =
+                getStandardMetadata<StandardMetadataType::PIXEL_FORMAT_REQUESTED>(mMapper,
+                                                                                  bufferHandle);
+        if (static_cast<::aidl::android::hardware::graphics::common::PixelFormat>(format) !=
+            value) {
+            ALOGW("Format didn't match, expected %d got %s", format,
+                  value.has_value() ? toString(*value).c_str() : "<null>");
+            return BAD_VALUE;
+        }
+    }
+    {
+        auto value = getStandardMetadata<StandardMetadataType::LAYER_COUNT>(mMapper, bufferHandle);
+        if (layerCount != value) {
+            ALOGW("Layer count didn't match, expected %d got %" PRId64, layerCount,
+                  value.value_or(-1));
+            return BAD_VALUE;
+        }
+    }
+    // TODO: This can false-positive fail if the allocator adjusted the USAGE bits internally
+    //       Investigate further & re-enable or remove, but for now ignoring usage should be OK
+    (void)usage;
+    // {
+    //     auto value = getStandardMetadata<StandardMetadataType::USAGE>(mMapper, bufferHandle);
+    //     if (static_cast<BufferUsage>(usage) != value) {
+    //         ALOGW("Usage didn't match, expected %" PRIu64 " got %" PRId64, usage,
+    //               static_cast<int64_t>(value.value_or(BufferUsage::CPU_READ_NEVER)));
+    //         return BAD_VALUE;
+    //     }
+    // }
+    {
+        auto value = getStandardMetadata<StandardMetadataType::STRIDE>(mMapper, bufferHandle);
+        if (stride != value) {
+            ALOGW("Stride didn't match, expected %" PRIu32 " got %" PRId32, stride,
+                  value.value_or(-1));
+            return BAD_VALUE;
+        }
+    }
+    return OK;
+}
+
+void Gralloc5Mapper::getTransportSize(buffer_handle_t bufferHandle, uint32_t *outNumFds,
+                                      uint32_t *outNumInts) const {
+    mMapper->v5.getTransportSize(bufferHandle, outNumFds, outNumInts);
+}
+
+status_t Gralloc5Mapper::lock(buffer_handle_t bufferHandle, uint64_t usage, const Rect &bounds,
+                              int acquireFence, void **outData, int32_t *outBytesPerPixel,
+                              int32_t *outBytesPerStride) const {
+    std::vector<ui::PlaneLayout> planeLayouts;
+    status_t err = getPlaneLayouts(bufferHandle, &planeLayouts);
+
+    if (err == NO_ERROR && !planeLayouts.empty()) {
+        if (outBytesPerPixel) {
+            int32_t bitsPerPixel = planeLayouts.front().sampleIncrementInBits;
+            for (const auto &planeLayout : planeLayouts) {
+                if (bitsPerPixel != planeLayout.sampleIncrementInBits) {
+                    bitsPerPixel = -1;
+                }
+            }
+            if (bitsPerPixel >= 0 && bitsPerPixel % 8 == 0) {
+                *outBytesPerPixel = bitsPerPixel / 8;
+            } else {
+                *outBytesPerPixel = -1;
+            }
+        }
+        if (outBytesPerStride) {
+            int32_t bytesPerStride = planeLayouts.front().strideInBytes;
+            for (const auto &planeLayout : planeLayouts) {
+                if (bytesPerStride != planeLayout.strideInBytes) {
+                    bytesPerStride = -1;
+                }
+            }
+            if (bytesPerStride >= 0) {
+                *outBytesPerStride = bytesPerStride;
+            } else {
+                *outBytesPerStride = -1;
+            }
+        }
+    }
+
+    auto status = mMapper->v5.lock(bufferHandle, usage, bounds, acquireFence, outData);
+
+    ALOGW_IF(status != AIMAPPER_ERROR_NONE, "lock(%p, ...) failed: %d", bufferHandle, status);
+    return static_cast<status_t>(status);
+}
+
+status_t Gralloc5Mapper::lock(buffer_handle_t bufferHandle, uint64_t usage, const Rect &bounds,
+                              int acquireFence, android_ycbcr *outYcbcr) const {
+    if (!outYcbcr) {
+        return BAD_VALUE;
+    }
+
+    // TODO(b/262279301): Change the return type of ::unlock to unique_fd instead of int so that
+    //  ignoring the return value "just works" instead
+    auto unlock = [this](buffer_handle_t bufferHandle) {
+        int fence = this->unlock(bufferHandle);
+        if (fence != -1) {
+            ::close(fence);
+        }
+    };
+
+    std::vector<ui::PlaneLayout> planeLayouts;
+    status_t error = getPlaneLayouts(bufferHandle, &planeLayouts);
+    if (error != NO_ERROR) {
+        return error;
+    }
+
+    void *data = nullptr;
+    error = lock(bufferHandle, usage, bounds, acquireFence, &data, nullptr, nullptr);
+    if (error != NO_ERROR) {
+        return error;
+    }
+
+    android_ycbcr ycbcr;
+
+    ycbcr.y = nullptr;
+    ycbcr.cb = nullptr;
+    ycbcr.cr = nullptr;
+    ycbcr.ystride = 0;
+    ycbcr.cstride = 0;
+    ycbcr.chroma_step = 0;
+
+    for (const auto &planeLayout : planeLayouts) {
+        for (const auto &planeLayoutComponent : planeLayout.components) {
+            if (!gralloc4::isStandardPlaneLayoutComponentType(planeLayoutComponent.type)) {
+                continue;
+            }
+
+            uint8_t *tmpData = static_cast<uint8_t *>(data) + planeLayout.offsetInBytes;
+
+            // Note that `offsetInBits` may not be a multiple of 8 for packed formats (e.g. P010)
+            // but we still want to point to the start of the first byte.
+            tmpData += (planeLayoutComponent.offsetInBits / 8);
+
+            uint64_t sampleIncrementInBytes;
+
+            auto type = static_cast<PlaneLayoutComponentType>(planeLayoutComponent.type.value);
+            switch (type) {
+                case PlaneLayoutComponentType::Y:
+                    if ((ycbcr.y != nullptr) || (planeLayout.sampleIncrementInBits % 8 != 0)) {
+                        unlock(bufferHandle);
+                        return BAD_VALUE;
+                    }
+                    ycbcr.y = tmpData;
+                    ycbcr.ystride = planeLayout.strideInBytes;
+                    break;
+
+                case PlaneLayoutComponentType::CB:
+                case PlaneLayoutComponentType::CR:
+                    if (planeLayout.sampleIncrementInBits % 8 != 0) {
+                        unlock(bufferHandle);
+                        return BAD_VALUE;
+                    }
+
+                    sampleIncrementInBytes = planeLayout.sampleIncrementInBits / 8;
+                    if ((sampleIncrementInBytes != 1) && (sampleIncrementInBytes != 2) &&
+                        (sampleIncrementInBytes != 4)) {
+                        unlock(bufferHandle);
+                        return BAD_VALUE;
+                    }
+
+                    if (ycbcr.cstride == 0 && ycbcr.chroma_step == 0) {
+                        ycbcr.cstride = planeLayout.strideInBytes;
+                        ycbcr.chroma_step = sampleIncrementInBytes;
+                    } else {
+                        if ((static_cast<int64_t>(ycbcr.cstride) != planeLayout.strideInBytes) ||
+                            (ycbcr.chroma_step != sampleIncrementInBytes)) {
+                            unlock(bufferHandle);
+                            return BAD_VALUE;
+                        }
+                    }
+
+                    if (type == PlaneLayoutComponentType::CB) {
+                        if (ycbcr.cb != nullptr) {
+                            unlock(bufferHandle);
+                            return BAD_VALUE;
+                        }
+                        ycbcr.cb = tmpData;
+                    } else {
+                        if (ycbcr.cr != nullptr) {
+                            unlock(bufferHandle);
+                            return BAD_VALUE;
+                        }
+                        ycbcr.cr = tmpData;
+                    }
+                    break;
+                default:
+                    break;
+            };
+        }
+    }
+
+    *outYcbcr = ycbcr;
+    return OK;
+}
+
+int Gralloc5Mapper::unlock(buffer_handle_t bufferHandle) const {
+    int fence = -1;
+    AIMapper_Error error = mMapper->v5.unlock(bufferHandle, &fence);
+    if (error != AIMAPPER_ERROR_NONE) {
+        ALOGW("unlock failed with error %d", error);
+    }
+    return fence;
+}
+
+status_t Gralloc5Mapper::isSupported(uint32_t width, uint32_t height, PixelFormat format,
+                                     uint32_t layerCount, uint64_t usage,
+                                     bool *outSupported) const {
+    auto descriptorInfo = makeDescriptor("", width, height, format, layerCount, usage);
+    if (!descriptorInfo) {
+        *outSupported = false;
+        return OK;
+    }
+    auto status = getInstance().allocator->isSupported(*descriptorInfo, outSupported);
+    if (!status.isOk()) {
+        ALOGW("IAllocator::isSupported error %d (%s)", status.getStatus(), status.getMessage());
+        *outSupported = false;
+    }
+    return OK;
+}
+
+status_t Gralloc5Mapper::getBufferId(buffer_handle_t bufferHandle, uint64_t *outBufferId) const {
+    auto value = getStandardMetadata<StandardMetadataType::BUFFER_ID>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outBufferId = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getName(buffer_handle_t bufferHandle, std::string *outName) const {
+    auto value = getStandardMetadata<StandardMetadataType::NAME>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outName = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getWidth(buffer_handle_t bufferHandle, uint64_t *outWidth) const {
+    auto value = getStandardMetadata<StandardMetadataType::WIDTH>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outWidth = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getHeight(buffer_handle_t bufferHandle, uint64_t *outHeight) const {
+    auto value = getStandardMetadata<StandardMetadataType::HEIGHT>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outHeight = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getLayerCount(buffer_handle_t bufferHandle,
+                                       uint64_t *outLayerCount) const {
+    auto value = getStandardMetadata<StandardMetadataType::LAYER_COUNT>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outLayerCount = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getPixelFormatRequested(buffer_handle_t bufferHandle,
+                                                 ui::PixelFormat *outPixelFormatRequested) const {
+    auto value = getStandardMetadata<StandardMetadataType::PIXEL_FORMAT_REQUESTED>(mMapper,
+                                                                                   bufferHandle);
+    if (value.has_value()) {
+        *outPixelFormatRequested = static_cast<ui::PixelFormat>(*value);
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getPixelFormatFourCC(buffer_handle_t bufferHandle,
+                                              uint32_t *outPixelFormatFourCC) const {
+    auto value =
+            getStandardMetadata<StandardMetadataType::PIXEL_FORMAT_FOURCC>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outPixelFormatFourCC = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getPixelFormatModifier(buffer_handle_t bufferHandle,
+                                                uint64_t *outPixelFormatModifier) const {
+    auto value =
+            getStandardMetadata<StandardMetadataType::PIXEL_FORMAT_MODIFIER>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outPixelFormatModifier = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getUsage(buffer_handle_t bufferHandle, uint64_t *outUsage) const {
+    auto value = getStandardMetadata<StandardMetadataType::USAGE>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outUsage = static_cast<uint64_t>(*value);
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getAllocationSize(buffer_handle_t bufferHandle,
+                                           uint64_t *outAllocationSize) const {
+    auto value = getStandardMetadata<StandardMetadataType::ALLOCATION_SIZE>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outAllocationSize = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getProtectedContent(buffer_handle_t bufferHandle,
+                                             uint64_t *outProtectedContent) const {
+    auto value =
+            getStandardMetadata<StandardMetadataType::PROTECTED_CONTENT>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outProtectedContent = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getCompression(
+        buffer_handle_t bufferHandle,
+        aidl::android::hardware::graphics::common::ExtendableType *outCompression) const {
+    auto value = getStandardMetadata<StandardMetadataType::COMPRESSION>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outCompression = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getCompression(buffer_handle_t bufferHandle,
+                                        ui::Compression *outCompression) const {
+    auto value = getStandardMetadata<StandardMetadataType::COMPRESSION>(mMapper, bufferHandle);
+    if (!value.has_value()) {
+        return UNKNOWN_TRANSACTION;
+    }
+    if (!gralloc4::isStandardCompression(*value)) {
+        return BAD_TYPE;
+    }
+    *outCompression = gralloc4::getStandardCompressionValue(*value);
+    return OK;
+}
+
+status_t Gralloc5Mapper::getInterlaced(
+        buffer_handle_t bufferHandle,
+        aidl::android::hardware::graphics::common::ExtendableType *outInterlaced) const {
+    auto value = getStandardMetadata<StandardMetadataType::INTERLACED>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outInterlaced = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getInterlaced(buffer_handle_t bufferHandle,
+                                       ui::Interlaced *outInterlaced) const {
+    if (!outInterlaced) {
+        return BAD_VALUE;
+    }
+    ExtendableType interlaced;
+    status_t error = getInterlaced(bufferHandle, &interlaced);
+    if (error) {
+        return error;
+    }
+    if (!gralloc4::isStandardInterlaced(interlaced)) {
+        return BAD_TYPE;
+    }
+    *outInterlaced = gralloc4::getStandardInterlacedValue(interlaced);
+    return NO_ERROR;
+}
+
+status_t Gralloc5Mapper::getChromaSiting(
+        buffer_handle_t bufferHandle,
+        aidl::android::hardware::graphics::common::ExtendableType *outChromaSiting) const {
+    auto value = getStandardMetadata<StandardMetadataType::CHROMA_SITING>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outChromaSiting = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getChromaSiting(buffer_handle_t bufferHandle,
+                                         ui::ChromaSiting *outChromaSiting) const {
+    if (!outChromaSiting) {
+        return BAD_VALUE;
+    }
+    ExtendableType chromaSiting;
+    status_t error = getChromaSiting(bufferHandle, &chromaSiting);
+    if (error) {
+        return error;
+    }
+    if (!gralloc4::isStandardChromaSiting(chromaSiting)) {
+        return BAD_TYPE;
+    }
+    *outChromaSiting = gralloc4::getStandardChromaSitingValue(chromaSiting);
+    return NO_ERROR;
+}
+
+status_t Gralloc5Mapper::getPlaneLayouts(buffer_handle_t bufferHandle,
+                                         std::vector<ui::PlaneLayout> *outPlaneLayouts) const {
+    auto value = getStandardMetadata<StandardMetadataType::PLANE_LAYOUTS>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outPlaneLayouts = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getDataspace(buffer_handle_t bufferHandle,
+                                      ui::Dataspace *outDataspace) const {
+    auto value = getStandardMetadata<StandardMetadataType::DATASPACE>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outDataspace = static_cast<ui::Dataspace>(*value);
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::setDataspace(buffer_handle_t bufferHandle, ui::Dataspace dataspace) const {
+    return setStandardMetadata<StandardMetadataType::DATASPACE>(mMapper, bufferHandle,
+                                                                static_cast<Dataspace>(dataspace));
+}
+
+status_t Gralloc5Mapper::getBlendMode(buffer_handle_t bufferHandle,
+                                      ui::BlendMode *outBlendMode) const {
+    auto value = getStandardMetadata<StandardMetadataType::BLEND_MODE>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outBlendMode = static_cast<ui::BlendMode>(*value);
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::getSmpte2086(buffer_handle_t bufferHandle,
+                                      std::optional<ui::Smpte2086> *outSmpte2086) const {
+    auto value = getStandardMetadata<StandardMetadataType::SMPTE2086>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outSmpte2086 = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::setSmpte2086(buffer_handle_t bufferHandle,
+                                      std::optional<ui::Smpte2086> smpte2086) const {
+    return setStandardMetadata<StandardMetadataType::SMPTE2086>(mMapper, bufferHandle, smpte2086);
+}
+
+status_t Gralloc5Mapper::getCta861_3(buffer_handle_t bufferHandle,
+                                     std::optional<ui::Cta861_3> *outCta861_3) const {
+    auto value = getStandardMetadata<StandardMetadataType::CTA861_3>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outCta861_3 = *value;
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::setCta861_3(buffer_handle_t bufferHandle,
+                                     std::optional<ui::Cta861_3> cta861_3) const {
+    return setStandardMetadata<StandardMetadataType::CTA861_3>(mMapper, bufferHandle, cta861_3);
+}
+
+status_t Gralloc5Mapper::getSmpte2094_40(
+        buffer_handle_t bufferHandle, std::optional<std::vector<uint8_t>> *outSmpte2094_40) const {
+    auto value = getStandardMetadata<StandardMetadataType::SMPTE2094_40>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outSmpte2094_40 = std::move(*value);
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::setSmpte2094_40(buffer_handle_t bufferHandle,
+                                         std::optional<std::vector<uint8_t>> smpte2094_40) const {
+    return setStandardMetadata<StandardMetadataType::SMPTE2094_40>(mMapper, bufferHandle,
+                                                                   smpte2094_40);
+}
+
+status_t Gralloc5Mapper::getSmpte2094_10(
+        buffer_handle_t bufferHandle, std::optional<std::vector<uint8_t>> *outSmpte2094_10) const {
+    auto value = getStandardMetadata<StandardMetadataType::SMPTE2094_10>(mMapper, bufferHandle);
+    if (value.has_value()) {
+        *outSmpte2094_10 = std::move(*value);
+        return OK;
+    }
+    return UNKNOWN_TRANSACTION;
+}
+
+status_t Gralloc5Mapper::setSmpte2094_10(buffer_handle_t bufferHandle,
+                                         std::optional<std::vector<uint8_t>> smpte2094_10) const {
+    return setStandardMetadata<StandardMetadataType::SMPTE2094_10>(mMapper, bufferHandle,
+                                                                   smpte2094_10);
+}
+
+} // namespace android
\ No newline at end of file
diff --git a/libs/ui/GraphicBufferAllocator.cpp b/libs/ui/GraphicBufferAllocator.cpp
index 3f958ba..c0abec2 100644
--- a/libs/ui/GraphicBufferAllocator.cpp
+++ b/libs/ui/GraphicBufferAllocator.cpp
@@ -34,6 +34,7 @@
 #include <ui/Gralloc2.h>
 #include <ui/Gralloc3.h>
 #include <ui/Gralloc4.h>
+#include <ui/Gralloc5.h>
 #include <ui/GraphicBufferMapper.h>
 
 namespace android {
@@ -48,23 +49,27 @@
     GraphicBufferAllocator::alloc_rec_t> GraphicBufferAllocator::sAllocList;
 
 GraphicBufferAllocator::GraphicBufferAllocator() : mMapper(GraphicBufferMapper::getInstance()) {
-    mAllocator = std::make_unique<const Gralloc4Allocator>(
-            reinterpret_cast<const Gralloc4Mapper&>(mMapper.getGrallocMapper()));
-    if (mAllocator->isLoaded()) {
-        return;
+    switch (mMapper.getMapperVersion()) {
+        case GraphicBufferMapper::GRALLOC_5:
+            mAllocator = std::make_unique<const Gralloc5Allocator>(
+                    reinterpret_cast<const Gralloc5Mapper&>(mMapper.getGrallocMapper()));
+            break;
+        case GraphicBufferMapper::GRALLOC_4:
+            mAllocator = std::make_unique<const Gralloc4Allocator>(
+                    reinterpret_cast<const Gralloc4Mapper&>(mMapper.getGrallocMapper()));
+            break;
+        case GraphicBufferMapper::GRALLOC_3:
+            mAllocator = std::make_unique<const Gralloc3Allocator>(
+                    reinterpret_cast<const Gralloc3Mapper&>(mMapper.getGrallocMapper()));
+            break;
+        case GraphicBufferMapper::GRALLOC_2:
+            mAllocator = std::make_unique<const Gralloc2Allocator>(
+                    reinterpret_cast<const Gralloc2Mapper&>(mMapper.getGrallocMapper()));
+            break;
     }
-    mAllocator = std::make_unique<const Gralloc3Allocator>(
-            reinterpret_cast<const Gralloc3Mapper&>(mMapper.getGrallocMapper()));
-    if (mAllocator->isLoaded()) {
-        return;
-    }
-    mAllocator = std::make_unique<const Gralloc2Allocator>(
-            reinterpret_cast<const Gralloc2Mapper&>(mMapper.getGrallocMapper()));
-    if (mAllocator->isLoaded()) {
-        return;
-    }
-
-    LOG_ALWAYS_FATAL("gralloc-allocator is missing");
+    LOG_ALWAYS_FATAL_IF(!mAllocator->isLoaded(),
+                        "Failed to load matching allocator for mapper version %d",
+                        mMapper.getMapperVersion());
 }
 
 GraphicBufferAllocator::~GraphicBufferAllocator() {}
diff --git a/libs/ui/GraphicBufferMapper.cpp b/libs/ui/GraphicBufferMapper.cpp
index f582423..7086e04 100644
--- a/libs/ui/GraphicBufferMapper.cpp
+++ b/libs/ui/GraphicBufferMapper.cpp
@@ -36,6 +36,7 @@
 #include <ui/Gralloc2.h>
 #include <ui/Gralloc3.h>
 #include <ui/Gralloc4.h>
+#include <ui/Gralloc5.h>
 #include <ui/GraphicBuffer.h>
 
 #include <system/graphics.h>
@@ -49,9 +50,15 @@
     Gralloc2Mapper::preload();
     Gralloc3Mapper::preload();
     Gralloc4Mapper::preload();
+    Gralloc5Mapper::preload();
 }
 
 GraphicBufferMapper::GraphicBufferMapper() {
+    mMapper = std::make_unique<const Gralloc5Mapper>();
+    if (mMapper->isLoaded()) {
+        mMapperVersion = Version::GRALLOC_5;
+        return;
+    }
     mMapper = std::make_unique<const Gralloc4Mapper>();
     if (mMapper->isLoaded()) {
         mMapperVersion = Version::GRALLOC_4;
@@ -82,15 +89,14 @@
     ALOGD("%s", s.c_str());
 }
 
-status_t GraphicBufferMapper::importBuffer(buffer_handle_t rawHandle,
-        uint32_t width, uint32_t height, uint32_t layerCount,
-        PixelFormat format, uint64_t usage, uint32_t stride,
-        buffer_handle_t* outHandle)
-{
+status_t GraphicBufferMapper::importBuffer(const native_handle_t* rawHandle, uint32_t width,
+                                           uint32_t height, uint32_t layerCount, PixelFormat format,
+                                           uint64_t usage, uint32_t stride,
+                                           buffer_handle_t* outHandle) {
     ATRACE_CALL();
 
     buffer_handle_t bufferHandle;
-    status_t error = mMapper->importBuffer(hardware::hidl_handle(rawHandle), &bufferHandle);
+    status_t error = mMapper->importBuffer(rawHandle, &bufferHandle);
     if (error != NO_ERROR) {
         ALOGW("importBuffer(%p) failed: %d", rawHandle, error);
         return error;
@@ -335,84 +341,5 @@
     return mMapper->setSmpte2094_10(bufferHandle, smpte2094_10);
 }
 
-status_t GraphicBufferMapper::getDefaultPixelFormatFourCC(uint32_t width, uint32_t height,
-                                                          PixelFormat format, uint32_t layerCount,
-                                                          uint64_t usage,
-                                                          uint32_t* outPixelFormatFourCC) {
-    return mMapper->getDefaultPixelFormatFourCC(width, height, format, layerCount, usage,
-                                                outPixelFormatFourCC);
-}
-
-status_t GraphicBufferMapper::getDefaultPixelFormatModifier(uint32_t width, uint32_t height,
-                                                            PixelFormat format, uint32_t layerCount,
-                                                            uint64_t usage,
-                                                            uint64_t* outPixelFormatModifier) {
-    return mMapper->getDefaultPixelFormatModifier(width, height, format, layerCount, usage,
-                                                  outPixelFormatModifier);
-}
-
-status_t GraphicBufferMapper::getDefaultAllocationSize(uint32_t width, uint32_t height,
-                                                       PixelFormat format, uint32_t layerCount,
-                                                       uint64_t usage,
-                                                       uint64_t* outAllocationSize) {
-    return mMapper->getDefaultAllocationSize(width, height, format, layerCount, usage,
-                                             outAllocationSize);
-}
-
-status_t GraphicBufferMapper::getDefaultProtectedContent(uint32_t width, uint32_t height,
-                                                         PixelFormat format, uint32_t layerCount,
-                                                         uint64_t usage,
-                                                         uint64_t* outProtectedContent) {
-    return mMapper->getDefaultProtectedContent(width, height, format, layerCount, usage,
-                                               outProtectedContent);
-}
-
-status_t GraphicBufferMapper::getDefaultCompression(
-        uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount, uint64_t usage,
-        aidl::android::hardware::graphics::common::ExtendableType* outCompression) {
-    return mMapper->getDefaultCompression(width, height, format, layerCount, usage, outCompression);
-}
-
-status_t GraphicBufferMapper::getDefaultCompression(uint32_t width, uint32_t height,
-                                                    PixelFormat format, uint32_t layerCount,
-                                                    uint64_t usage,
-                                                    ui::Compression* outCompression) {
-    return mMapper->getDefaultCompression(width, height, format, layerCount, usage, outCompression);
-}
-
-status_t GraphicBufferMapper::getDefaultInterlaced(
-        uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount, uint64_t usage,
-        aidl::android::hardware::graphics::common::ExtendableType* outInterlaced) {
-    return mMapper->getDefaultInterlaced(width, height, format, layerCount, usage, outInterlaced);
-}
-
-status_t GraphicBufferMapper::getDefaultInterlaced(uint32_t width, uint32_t height,
-                                                   PixelFormat format, uint32_t layerCount,
-                                                   uint64_t usage, ui::Interlaced* outInterlaced) {
-    return mMapper->getDefaultInterlaced(width, height, format, layerCount, usage, outInterlaced);
-}
-
-status_t GraphicBufferMapper::getDefaultChromaSiting(
-        uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount, uint64_t usage,
-        aidl::android::hardware::graphics::common::ExtendableType* outChromaSiting) {
-    return mMapper->getDefaultChromaSiting(width, height, format, layerCount, usage,
-                                           outChromaSiting);
-}
-
-status_t GraphicBufferMapper::getDefaultChromaSiting(uint32_t width, uint32_t height,
-                                                     PixelFormat format, uint32_t layerCount,
-                                                     uint64_t usage,
-                                                     ui::ChromaSiting* outChromaSiting) {
-    return mMapper->getDefaultChromaSiting(width, height, format, layerCount, usage,
-                                           outChromaSiting);
-}
-
-status_t GraphicBufferMapper::getDefaultPlaneLayouts(
-        uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount, uint64_t usage,
-        std::vector<ui::PlaneLayout>* outPlaneLayouts) {
-    return mMapper->getDefaultPlaneLayouts(width, height, format, layerCount, usage,
-                                           outPlaneLayouts);
-}
-
 // ---------------------------------------------------------------------------
 }; // namespace android
diff --git a/libs/ui/HdrCapabilities.cpp b/libs/ui/HdrCapabilities.cpp
deleted file mode 100644
index aec2fac..0000000
--- a/libs/ui/HdrCapabilities.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <ui/HdrCapabilities.h>
-
-namespace android {
-
-#if defined(__clang__)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wundefined-reinterpret-cast"
-#endif
-
-size_t HdrCapabilities::getFlattenedSize() const {
-    return  sizeof(mMaxLuminance) +
-            sizeof(mMaxAverageLuminance) +
-            sizeof(mMinLuminance) +
-            sizeof(int32_t) +
-            mSupportedHdrTypes.size() * sizeof(ui::Hdr);
-}
-
-status_t HdrCapabilities::flatten(void* buffer, size_t size) const {
-
-    if (size < getFlattenedSize()) {
-        return NO_MEMORY;
-    }
-
-    int32_t* const buf = static_cast<int32_t*>(buffer);
-    reinterpret_cast<float&>(buf[0]) = mMaxLuminance;
-    reinterpret_cast<float&>(buf[1]) = mMaxAverageLuminance;
-    reinterpret_cast<float&>(buf[2]) = mMinLuminance;
-    buf[3] = static_cast<int32_t>(mSupportedHdrTypes.size());
-    for (size_t i = 0, c = mSupportedHdrTypes.size(); i < c; ++i) {
-        buf[4 + i] = static_cast<int32_t>(mSupportedHdrTypes[i]);
-    }
-    return NO_ERROR;
-}
-
-status_t HdrCapabilities::unflatten(void const* buffer, size_t size) {
-
-    size_t minSize = sizeof(mMaxLuminance) +
-                     sizeof(mMaxAverageLuminance) +
-                     sizeof(mMinLuminance) +
-                     sizeof(int32_t);
-
-    if (size < minSize) {
-        return NO_MEMORY;
-    }
-
-    int32_t const * const buf = static_cast<int32_t const *>(buffer);
-    const size_t itemCount = size_t(buf[3]);
-
-    // check the buffer is large enough
-    if (size < minSize + itemCount * sizeof(int32_t)) {
-        return BAD_VALUE;
-    }
-
-    mMaxLuminance        = reinterpret_cast<float const&>(buf[0]);
-    mMaxAverageLuminance = reinterpret_cast<float const&>(buf[1]);
-    mMinLuminance        = reinterpret_cast<float const&>(buf[2]);
-    if (itemCount) {
-        mSupportedHdrTypes.resize(itemCount);
-        for (size_t i = 0; i < itemCount; ++i) {
-            mSupportedHdrTypes[i] = static_cast<ui::Hdr>(buf[4 + i]);
-        }
-    }
-    return NO_ERROR;
-}
-
-#if defined(__clang__)
-#pragma clang diagnostic pop
-#endif
-
-} // namespace android
diff --git a/libs/ui/PublicFormat.cpp b/libs/ui/PublicFormat.cpp
index 78e82da..c9663ed 100644
--- a/libs/ui/PublicFormat.cpp
+++ b/libs/ui/PublicFormat.cpp
@@ -14,14 +14,15 @@
  * limitations under the License.
  */
 
-#include <ui/GraphicTypes.h> // ui::Dataspace
+#include "aidl/android/hardware/graphics/common/Dataspace.h"
 #include <ui/PublicFormat.h>
 
+
 // ----------------------------------------------------------------------------
 namespace android {
 // ----------------------------------------------------------------------------
 
-using ui::Dataspace;
+using ::aidl::android::hardware::graphics::common::Dataspace;
 
 int mapPublicFormatToHalFormat(PublicFormat f) {
     switch (f) {
@@ -29,6 +30,7 @@
         case PublicFormat::DEPTH_POINT_CLOUD:
         case PublicFormat::DEPTH_JPEG:
         case PublicFormat::HEIC:
+        case PublicFormat::JPEG_R:
             return HAL_PIXEL_FORMAT_BLOB;
         case PublicFormat::DEPTH16:
             return HAL_PIXEL_FORMAT_Y16;
@@ -47,7 +49,7 @@
     Dataspace dataspace;
     switch (f) {
         case PublicFormat::JPEG:
-            dataspace = Dataspace::V0_JFIF;
+            dataspace = Dataspace::JFIF;
             break;
         case PublicFormat::DEPTH_POINT_CLOUD:
         case PublicFormat::DEPTH16:
@@ -64,7 +66,7 @@
         case PublicFormat::YUV_420_888:
         case PublicFormat::NV21:
         case PublicFormat::YV12:
-            dataspace = Dataspace::V0_JFIF;
+            dataspace = Dataspace::JFIF;
             break;
         case PublicFormat::DEPTH_JPEG:
             dataspace = Dataspace::DYNAMIC_DEPTH;
@@ -72,6 +74,9 @@
         case PublicFormat::HEIC:
             dataspace = Dataspace::HEIF;
             break;
+        case PublicFormat::JPEG_R:
+            dataspace = Dataspace::JPEG_R;
+            break;
         default:
             // Most formats map to UNKNOWN
             dataspace = Dataspace::UNKNOWN;
@@ -139,14 +144,16 @@
             switch (ds) {
                 case Dataspace::DEPTH:
                     return PublicFormat::DEPTH_POINT_CLOUD;
-                case Dataspace::V0_JFIF:
+                case Dataspace::JFIF:
                     return PublicFormat::JPEG;
                 case Dataspace::HEIF:
                     return PublicFormat::HEIC;
                 default:
                     if (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_DYNAMIC_DEPTH)) {
                         return PublicFormat::DEPTH_JPEG;
-                    } else {
+                    } else if (dataSpace == static_cast<android_dataspace>(Dataspace::JPEG_R)) {
+                        return PublicFormat::JPEG_R;
+                    }else {
                         // Assume otherwise-marked blobs are also JPEG
                         return PublicFormat::JPEG;
                     }
diff --git a/libs/ui/StaticDisplayInfo.cpp b/libs/ui/StaticDisplayInfo.cpp
deleted file mode 100644
index 03d15e4..0000000
--- a/libs/ui/StaticDisplayInfo.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <ui/StaticDisplayInfo.h>
-
-#include <cstdint>
-
-#include <ui/FlattenableHelpers.h>
-
-#define RETURN_IF_ERROR(op) \
-    if (const status_t status = (op); status != OK) return status;
-
-namespace android::ui {
-
-size_t StaticDisplayInfo::getFlattenedSize() const {
-    return FlattenableHelpers::getFlattenedSize(connectionType) +
-            FlattenableHelpers::getFlattenedSize(density) +
-            FlattenableHelpers::getFlattenedSize(secure) +
-            FlattenableHelpers::getFlattenedSize(deviceProductInfo) +
-            FlattenableHelpers::getFlattenedSize(installOrientation);
-}
-
-status_t StaticDisplayInfo::flatten(void* buffer, size_t size) const {
-    if (size < getFlattenedSize()) {
-        return NO_MEMORY;
-    }
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, connectionType));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, density));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, secure));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, deviceProductInfo));
-    RETURN_IF_ERROR(FlattenableHelpers::flatten(&buffer, &size, installOrientation));
-    return OK;
-}
-
-status_t StaticDisplayInfo::unflatten(void const* buffer, size_t size) {
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &connectionType));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &density));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &secure));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &deviceProductInfo));
-    RETURN_IF_ERROR(FlattenableHelpers::unflatten(&buffer, &size, &installOrientation));
-    return OK;
-}
-
-} // namespace android::ui
diff --git a/libs/ui/include/ui/ColorMode.h b/libs/ui/include/ui/ColorMode.h
new file mode 100644
index 0000000..a47eaed
--- /dev/null
+++ b/libs/ui/include/ui/ColorMode.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+
+#include <ui/GraphicTypes.h>
+
+namespace android::ui {
+
+using ColorModes = std::vector<ColorMode>;
+
+inline bool isWideColorMode(ColorMode colorMode) {
+    switch (colorMode) {
+        case ColorMode::DISPLAY_P3:
+        case ColorMode::ADOBE_RGB:
+        case ColorMode::DCI_P3:
+        case ColorMode::BT2020:
+        case ColorMode::DISPLAY_BT2020:
+        case ColorMode::BT2100_PQ:
+        case ColorMode::BT2100_HLG:
+            return true;
+        case ColorMode::NATIVE:
+        case ColorMode::STANDARD_BT601_625:
+        case ColorMode::STANDARD_BT601_625_UNADJUSTED:
+        case ColorMode::STANDARD_BT601_525:
+        case ColorMode::STANDARD_BT601_525_UNADJUSTED:
+        case ColorMode::STANDARD_BT709:
+        case ColorMode::SRGB:
+            return false;
+    }
+}
+
+inline Dataspace pickDataspaceFor(ColorMode colorMode) {
+    switch (colorMode) {
+        case ColorMode::DISPLAY_P3:
+        case ColorMode::BT2100_PQ:
+        case ColorMode::BT2100_HLG:
+        case ColorMode::DISPLAY_BT2020:
+            return Dataspace::DISPLAY_P3;
+        default:
+            return Dataspace::V0_SRGB;
+    }
+}
+
+} // namespace android::ui
diff --git a/libs/ui/include/ui/DeviceProductInfo.h b/libs/ui/include/ui/DeviceProductInfo.h
index 807a5d9..4229cf1 100644
--- a/libs/ui/include/ui/DeviceProductInfo.h
+++ b/libs/ui/include/ui/DeviceProductInfo.h
@@ -24,8 +24,6 @@
 #include <variant>
 #include <vector>
 
-#include <utils/Flattenable.h>
-
 namespace android {
 
 // NUL-terminated plug and play ID.
@@ -34,7 +32,7 @@
 // Product-specific information about the display or the directly connected device on the
 // display chain. For example, if the display is transitively connected, this field may contain
 // product information about the intermediate device.
-struct DeviceProductInfo : LightFlattenable<DeviceProductInfo> {
+struct DeviceProductInfo {
     struct ModelYear {
         uint32_t year;
     };
@@ -63,13 +61,8 @@
     // address is unavailable.
     // For example, for HDMI connected device this will be the physical address.
     std::vector<uint8_t> relativeAddress;
-
-    bool isFixedSize() const { return false; }
-    size_t getFlattenedSize() const;
-    status_t flatten(void* buffer, size_t size) const;
-    status_t unflatten(void const* buffer, size_t size);
-
-    void dump(std::string& result) const;
 };
 
+std::string to_string(const DeviceProductInfo&);
+
 } // namespace android
diff --git a/libs/ui/include/ui/DisplayId.h b/libs/ui/include/ui/DisplayId.h
index 9120972..3a31fa0 100644
--- a/libs/ui/include/ui/DisplayId.h
+++ b/libs/ui/include/ui/DisplayId.h
@@ -17,9 +17,11 @@
 #pragma once
 
 #include <cstdint>
-#include <optional>
+#include <ostream>
 #include <string>
 
+#include <ftl/optional.h>
+
 namespace android {
 
 // ID of a physical or a virtual display. This class acts as a type safe wrapper around uint64_t.
@@ -66,9 +68,14 @@
     return std::to_string(displayId.value);
 }
 
+// For tests.
+inline std::ostream& operator<<(std::ostream& stream, DisplayId displayId) {
+    return stream << "DisplayId{" << displayId.value << '}';
+}
+
 // DisplayId of a physical display, such as the internal display or externally connected display.
 struct PhysicalDisplayId : DisplayId {
-    static constexpr std::optional<PhysicalDisplayId> tryCast(DisplayId id) {
+    static constexpr ftl::Optional<PhysicalDisplayId> tryCast(DisplayId id) {
         if (id.value & FLAG_VIRTUAL) {
             return std::nullopt;
         }
diff --git a/libs/ui/include/ui/DisplayMode.h b/libs/ui/include/ui/DisplayMode.h
index 56f68e7..65a8769 100644
--- a/libs/ui/include/ui/DisplayMode.h
+++ b/libs/ui/include/ui/DisplayMode.h
@@ -19,6 +19,7 @@
 #include <cstdint>
 #include <type_traits>
 
+#include <ui/GraphicTypes.h>
 #include <ui/Size.h>
 #include <utils/Flattenable.h>
 #include <utils/Timers.h>
@@ -29,22 +30,18 @@
 using DisplayModeId = int32_t;
 
 // Mode supported by physical display.
-struct DisplayMode : LightFlattenable<DisplayMode> {
+struct DisplayMode {
     DisplayModeId id;
     ui::Size resolution;
     float xDpi = 0;
     float yDpi = 0;
+    std::vector<ui::Hdr> supportedHdrTypes;
 
     float refreshRate = 0;
     nsecs_t appVsyncOffset = 0;
     nsecs_t sfVsyncOffset = 0;
     nsecs_t presentationDeadline = 0;
     int32_t group = -1;
-
-    bool isFixedSize() const { return false; }
-    size_t getFlattenedSize() const;
-    status_t flatten(void* buffer, size_t size) const;
-    status_t unflatten(const void* buffer, size_t size);
 };
 
 } // namespace android::ui
diff --git a/libs/ui/include/ui/DynamicDisplayInfo.h b/libs/ui/include/ui/DynamicDisplayInfo.h
index ce75a65..0b77754 100644
--- a/libs/ui/include/ui/DynamicDisplayInfo.h
+++ b/libs/ui/include/ui/DynamicDisplayInfo.h
@@ -24,18 +24,18 @@
 
 #include <ui/GraphicTypes.h>
 #include <ui/HdrCapabilities.h>
-#include <utils/Flattenable.h>
 
 namespace android::ui {
 
 // Information about a physical display which may change on hotplug reconnect.
-struct DynamicDisplayInfo : LightFlattenable<DynamicDisplayInfo> {
+struct DynamicDisplayInfo {
     std::vector<ui::DisplayMode> supportedDisplayModes;
 
     // This struct is going to be serialized over binder, so
     // we can't use size_t because it may have different width
     // in the client process.
     ui::DisplayModeId activeDisplayModeId;
+    float renderFrameRate;
 
     std::vector<ui::ColorMode> supportedColorModes;
     ui::ColorMode activeColorMode;
@@ -53,11 +53,6 @@
     ui::DisplayModeId preferredBootDisplayMode;
 
     std::optional<ui::DisplayMode> getActiveDisplayMode() const;
-
-    bool isFixedSize() const { return false; }
-    size_t getFlattenedSize() const;
-    status_t flatten(void* buffer, size_t size) const;
-    status_t unflatten(const void* buffer, size_t size);
 };
 
 } // namespace android::ui
diff --git a/libs/ui/include/ui/FenceResult.h b/libs/ui/include/ui/FenceResult.h
new file mode 100644
index 0000000..6d63fc9
--- /dev/null
+++ b/libs/ui/include/ui/FenceResult.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/expected.h>
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+class Fence;
+
+using FenceResult = base::expected<sp<Fence>, status_t>;
+
+inline status_t fenceStatus(const FenceResult& fenceResult) {
+    return fenceResult.ok() ? NO_ERROR : fenceResult.error();
+}
+
+} // namespace android
diff --git a/libs/ui/include/ui/Gralloc.h b/libs/ui/include/ui/Gralloc.h
index 6101d4b..496ba57 100644
--- a/libs/ui/include/ui/Gralloc.h
+++ b/libs/ui/include/ui/Gralloc.h
@@ -39,14 +39,11 @@
         return "";
     }
 
-    virtual status_t createDescriptor(void* bufferDescriptorInfo,
-                                      void* outBufferDescriptor) const = 0;
-
     // Import a buffer that is from another HAL, another process, or is
     // cloned.
     //
     // The returned handle must be freed with freeBuffer.
-    virtual status_t importBuffer(const hardware::hidl_handle& rawHandle,
+    virtual status_t importBuffer(const native_handle_t* rawHandle,
                                   buffer_handle_t* outBufferHandle) const = 0;
 
     virtual void freeBuffer(buffer_handle_t bufferHandle) const = 0;
@@ -203,77 +200,6 @@
                                      std::optional<std::vector<uint8_t>> /*smpte2094_10*/) const {
         return INVALID_OPERATION;
     }
-    virtual status_t getDefaultPixelFormatFourCC(uint32_t /*width*/, uint32_t /*height*/,
-                                                 PixelFormat /*format*/, uint32_t /*layerCount*/,
-                                                 uint64_t /*usage*/,
-                                                 uint32_t* /*outPixelFormatFourCC*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultPixelFormatModifier(uint32_t /*width*/, uint32_t /*height*/,
-                                                   PixelFormat /*format*/, uint32_t /*layerCount*/,
-                                                   uint64_t /*usage*/,
-                                                   uint64_t* /*outPixelFormatModifier*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultAllocationSize(uint32_t /*width*/, uint32_t /*height*/,
-                                              PixelFormat /*format*/, uint32_t /*layerCount*/,
-                                              uint64_t /*usage*/,
-                                              uint64_t* /*outAllocationSize*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultProtectedContent(uint32_t /*width*/, uint32_t /*height*/,
-                                                PixelFormat /*format*/, uint32_t /*layerCount*/,
-                                                uint64_t /*usage*/,
-                                                uint64_t* /*outProtectedContent*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultCompression(
-            uint32_t /*width*/, uint32_t /*height*/, PixelFormat /*format*/,
-            uint32_t /*layerCount*/, uint64_t /*usage*/,
-            aidl::android::hardware::graphics::common::ExtendableType* /*outCompression*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultCompression(uint32_t /*width*/, uint32_t /*height*/,
-                                           PixelFormat /*format*/, uint32_t /*layerCount*/,
-                                           uint64_t /*usage*/,
-                                           ui::Compression* /*outCompression*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultInterlaced(
-            uint32_t /*width*/, uint32_t /*height*/, PixelFormat /*format*/,
-            uint32_t /*layerCount*/, uint64_t /*usage*/,
-            aidl::android::hardware::graphics::common::ExtendableType* /*outInterlaced*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultInterlaced(uint32_t /*width*/, uint32_t /*height*/,
-                                          PixelFormat /*format*/, uint32_t /*layerCount*/,
-                                          uint64_t /*usage*/,
-                                          ui::Interlaced* /*outInterlaced*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultChromaSiting(
-            uint32_t /*width*/, uint32_t /*height*/, PixelFormat /*format*/,
-            uint32_t /*layerCount*/, uint64_t /*usage*/,
-            aidl::android::hardware::graphics::common::ExtendableType* /*outChromaSiting*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultChromaSiting(uint32_t /*width*/, uint32_t /*height*/,
-                                            PixelFormat /*format*/, uint32_t /*layerCount*/,
-                                            uint64_t /*usage*/,
-                                            ui::ChromaSiting* /*outChromaSiting*/) const {
-        return INVALID_OPERATION;
-    }
-    virtual status_t getDefaultPlaneLayouts(
-            uint32_t /*width*/, uint32_t /*height*/, PixelFormat /*format*/,
-            uint32_t /*layerCount*/, uint64_t /*usage*/,
-            std::vector<ui::PlaneLayout>* /*outPlaneLayouts*/) const {
-        return INVALID_OPERATION;
-    }
-
-    virtual std::vector<android::hardware::graphics::mapper::V4_0::IMapper::MetadataTypeDescription>
-    listSupportedMetadataTypes() const {
-        return {};
-    }
 };
 
 // A wrapper to IAllocator
diff --git a/libs/ui/include/ui/Gralloc2.h b/libs/ui/include/ui/Gralloc2.h
index f570c42..a7b6f492 100644
--- a/libs/ui/include/ui/Gralloc2.h
+++ b/libs/ui/include/ui/Gralloc2.h
@@ -38,9 +38,9 @@
 
     bool isLoaded() const override;
 
-    status_t createDescriptor(void* bufferDescriptorInfo, void* outBufferDescriptor) const override;
+    status_t createDescriptor(void* bufferDescriptorInfo, void* outBufferDescriptor) const;
 
-    status_t importBuffer(const hardware::hidl_handle& rawHandle,
+    status_t importBuffer(const native_handle_t* rawHandle,
                           buffer_handle_t* outBufferHandle) const override;
 
     void freeBuffer(buffer_handle_t bufferHandle) const override;
diff --git a/libs/ui/include/ui/Gralloc3.h b/libs/ui/include/ui/Gralloc3.h
index 93a5077..7367549 100644
--- a/libs/ui/include/ui/Gralloc3.h
+++ b/libs/ui/include/ui/Gralloc3.h
@@ -37,9 +37,9 @@
 
     bool isLoaded() const override;
 
-    status_t createDescriptor(void* bufferDescriptorInfo, void* outBufferDescriptor) const override;
+    status_t createDescriptor(void* bufferDescriptorInfo, void* outBufferDescriptor) const;
 
-    status_t importBuffer(const hardware::hidl_handle& rawHandle,
+    status_t importBuffer(const native_handle_t* rawHandle,
                           buffer_handle_t* outBufferHandle) const override;
 
     void freeBuffer(buffer_handle_t bufferHandle) const override;
diff --git a/libs/ui/include/ui/Gralloc4.h b/libs/ui/include/ui/Gralloc4.h
index cf023c9..df43be8 100644
--- a/libs/ui/include/ui/Gralloc4.h
+++ b/libs/ui/include/ui/Gralloc4.h
@@ -42,9 +42,9 @@
     std::string dumpBuffer(buffer_handle_t bufferHandle, bool less = true) const override;
     std::string dumpBuffers(bool less = true) const;
 
-    status_t createDescriptor(void* bufferDescriptorInfo, void* outBufferDescriptor) const override;
+    status_t createDescriptor(void* bufferDescriptorInfo, void* outBufferDescriptor) const;
 
-    status_t importBuffer(const hardware::hidl_handle& rawHandle,
+    status_t importBuffer(const native_handle_t* rawHandle,
                           buffer_handle_t* outBufferHandle) const override;
 
     void freeBuffer(buffer_handle_t bufferHandle) const override;
@@ -120,42 +120,6 @@
                              std::optional<std::vector<uint8_t>>* outSmpte2094_10) const override;
     status_t setSmpte2094_10(buffer_handle_t bufferHandle,
                              std::optional<std::vector<uint8_t>> smpte2094_10) const override;
-    status_t getDefaultPixelFormatFourCC(uint32_t width, uint32_t height, PixelFormat format,
-                                         uint32_t layerCount, uint64_t usage,
-                                         uint32_t* outPixelFormatFourCC) const override;
-    status_t getDefaultPixelFormatModifier(uint32_t width, uint32_t height, PixelFormat format,
-                                           uint32_t layerCount, uint64_t usage,
-                                           uint64_t* outPixelFormatModifier) const override;
-    status_t getDefaultAllocationSize(uint32_t width, uint32_t height, PixelFormat format,
-                                      uint32_t layerCount, uint64_t usage,
-                                      uint64_t* outAllocationSize) const override;
-    status_t getDefaultProtectedContent(uint32_t width, uint32_t height, PixelFormat format,
-                                        uint32_t layerCount, uint64_t usage,
-                                        uint64_t* outProtectedContent) const override;
-    status_t getDefaultCompression(uint32_t width, uint32_t height, PixelFormat format,
-                                   uint32_t layerCount, uint64_t usage,
-                                   aidl::android::hardware::graphics::common::ExtendableType*
-                                           outCompression) const override;
-    status_t getDefaultCompression(uint32_t width, uint32_t height, PixelFormat format,
-                                   uint32_t layerCount, uint64_t usage,
-                                   ui::Compression* outCompression) const override;
-    status_t getDefaultInterlaced(uint32_t width, uint32_t height, PixelFormat format,
-                                  uint32_t layerCount, uint64_t usage,
-                                  aidl::android::hardware::graphics::common::ExtendableType*
-                                          outInterlaced) const override;
-    status_t getDefaultInterlaced(uint32_t width, uint32_t height, PixelFormat format,
-                                  uint32_t layerCount, uint64_t usage,
-                                  ui::Interlaced* outInterlaced) const override;
-    status_t getDefaultChromaSiting(uint32_t width, uint32_t height, PixelFormat format,
-                                    uint32_t layerCount, uint64_t usage,
-                                    aidl::android::hardware::graphics::common::ExtendableType*
-                                            outChromaSiting) const override;
-    status_t getDefaultChromaSiting(uint32_t width, uint32_t height, PixelFormat format,
-                                    uint32_t layerCount, uint64_t usage,
-                                    ui::ChromaSiting* outChromaSiting) const override;
-    status_t getDefaultPlaneLayouts(uint32_t width, uint32_t height, PixelFormat format,
-                                    uint32_t layerCount, uint64_t usage,
-                                    std::vector<ui::PlaneLayout>* outPlaneLayouts) const override;
 
     std::vector<android::hardware::graphics::mapper::V4_0::IMapper::MetadataTypeDescription>
     listSupportedMetadataTypes() const;
diff --git a/libs/ui/include/ui/Gralloc5.h b/libs/ui/include/ui/Gralloc5.h
new file mode 100644
index 0000000..44b97d1
--- /dev/null
+++ b/libs/ui/include/ui/Gralloc5.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/graphics/allocator/IAllocator.h>
+#include <android/hardware/graphics/mapper/IMapper.h>
+#include <ui/Gralloc.h>
+
+namespace android {
+
+class Gralloc5Mapper : public GrallocMapper {
+public:
+public:
+    static void preload();
+
+    Gralloc5Mapper();
+
+    [[nodiscard]] bool isLoaded() const override;
+
+    [[nodiscard]] std::string dumpBuffer(buffer_handle_t bufferHandle, bool less) const override;
+
+    [[nodiscard]] std::string dumpBuffers(bool less = true) const;
+
+    [[nodiscard]] status_t importBuffer(const native_handle_t *rawHandle,
+                                        buffer_handle_t *outBufferHandle) const override;
+
+    void freeBuffer(buffer_handle_t bufferHandle) const override;
+
+    [[nodiscard]] status_t validateBufferSize(buffer_handle_t bufferHandle, uint32_t width,
+                                              uint32_t height, PixelFormat format,
+                                              uint32_t layerCount, uint64_t usage,
+                                              uint32_t stride) const override;
+
+    void getTransportSize(buffer_handle_t bufferHandle, uint32_t *outNumFds,
+                          uint32_t *outNumInts) const override;
+
+    [[nodiscard]] status_t lock(buffer_handle_t bufferHandle, uint64_t usage, const Rect &bounds,
+                                int acquireFence, void **outData, int32_t *outBytesPerPixel,
+                                int32_t *outBytesPerStride) const override;
+
+    [[nodiscard]] status_t lock(buffer_handle_t bufferHandle, uint64_t usage, const Rect &bounds,
+                                int acquireFence, android_ycbcr *ycbcr) const override;
+
+    [[nodiscard]] int unlock(buffer_handle_t bufferHandle) const override;
+
+    [[nodiscard]] status_t isSupported(uint32_t width, uint32_t height, PixelFormat format,
+                                       uint32_t layerCount, uint64_t usage,
+                                       bool *outSupported) const override;
+
+    [[nodiscard]] status_t getBufferId(buffer_handle_t bufferHandle,
+                                       uint64_t *outBufferId) const override;
+
+    [[nodiscard]] status_t getName(buffer_handle_t bufferHandle,
+                                   std::string *outName) const override;
+
+    [[nodiscard]] status_t getWidth(buffer_handle_t bufferHandle,
+                                    uint64_t *outWidth) const override;
+
+    [[nodiscard]] status_t getHeight(buffer_handle_t bufferHandle,
+                                     uint64_t *outHeight) const override;
+
+    [[nodiscard]] status_t getLayerCount(buffer_handle_t bufferHandle,
+                                         uint64_t *outLayerCount) const override;
+
+    [[nodiscard]] status_t getPixelFormatRequested(
+            buffer_handle_t bufferHandle, ui::PixelFormat *outPixelFormatRequested) const override;
+
+    [[nodiscard]] status_t getPixelFormatFourCC(buffer_handle_t bufferHandle,
+                                                uint32_t *outPixelFormatFourCC) const override;
+
+    [[nodiscard]] status_t getPixelFormatModifier(buffer_handle_t bufferHandle,
+                                                  uint64_t *outPixelFormatModifier) const override;
+
+    [[nodiscard]] status_t getUsage(buffer_handle_t bufferHandle,
+                                    uint64_t *outUsage) const override;
+
+    [[nodiscard]] status_t getAllocationSize(buffer_handle_t bufferHandle,
+                                             uint64_t *outAllocationSize) const override;
+
+    [[nodiscard]] status_t getProtectedContent(buffer_handle_t bufferHandle,
+                                               uint64_t *outProtectedContent) const override;
+
+    [[nodiscard]] status_t getCompression(buffer_handle_t bufferHandle,
+                                          aidl::android::hardware::graphics::common::ExtendableType
+                                                  *outCompression) const override;
+
+    [[nodiscard]] status_t getCompression(buffer_handle_t bufferHandle,
+                                          ui::Compression *outCompression) const override;
+
+    [[nodiscard]] status_t getInterlaced(buffer_handle_t bufferHandle,
+                                         aidl::android::hardware::graphics::common::ExtendableType
+                                                 *outInterlaced) const override;
+
+    [[nodiscard]] status_t getInterlaced(buffer_handle_t bufferHandle,
+                                         ui::Interlaced *outInterlaced) const override;
+
+    [[nodiscard]] status_t getChromaSiting(buffer_handle_t bufferHandle,
+                                           aidl::android::hardware::graphics::common::ExtendableType
+                                                   *outChromaSiting) const override;
+
+    [[nodiscard]] status_t getChromaSiting(buffer_handle_t bufferHandle,
+                                           ui::ChromaSiting *outChromaSiting) const override;
+
+    [[nodiscard]] status_t getPlaneLayouts(
+            buffer_handle_t bufferHandle,
+            std::vector<ui::PlaneLayout> *outPlaneLayouts) const override;
+
+    [[nodiscard]] status_t getDataspace(buffer_handle_t bufferHandle,
+                                        ui::Dataspace *outDataspace) const override;
+
+    [[nodiscard]] status_t setDataspace(buffer_handle_t bufferHandle,
+                                        ui::Dataspace dataspace) const override;
+
+    [[nodiscard]] status_t getBlendMode(buffer_handle_t bufferHandle,
+                                        ui::BlendMode *outBlendMode) const override;
+
+    [[nodiscard]] status_t getSmpte2086(buffer_handle_t bufferHandle,
+                                        std::optional<ui::Smpte2086> *outSmpte2086) const override;
+
+    [[nodiscard]] status_t setSmpte2086(buffer_handle_t bufferHandle,
+                                        std::optional<ui::Smpte2086> smpte2086) const override;
+
+    [[nodiscard]] status_t getCta861_3(buffer_handle_t bufferHandle,
+                                       std::optional<ui::Cta861_3> *outCta861_3) const override;
+
+    [[nodiscard]] status_t setCta861_3(buffer_handle_t bufferHandle,
+                                       std::optional<ui::Cta861_3> cta861_3) const override;
+
+    [[nodiscard]] status_t getSmpte2094_40(
+            buffer_handle_t bufferHandle,
+            std::optional<std::vector<uint8_t>> *outSmpte2094_40) const override;
+
+    [[nodiscard]] status_t setSmpte2094_40(
+            buffer_handle_t bufferHandle,
+            std::optional<std::vector<uint8_t>> smpte2094_40) const override;
+
+    [[nodiscard]] status_t getSmpte2094_10(
+            buffer_handle_t bufferHandle,
+            std::optional<std::vector<uint8_t>> *outSmpte2094_10) const override;
+
+    [[nodiscard]] status_t setSmpte2094_10(
+            buffer_handle_t bufferHandle,
+            std::optional<std::vector<uint8_t>> smpte2094_10) const override;
+
+private:
+    void unlockBlocking(buffer_handle_t bufferHandle) const;
+
+    AIMapper *mMapper = nullptr;
+};
+
+class Gralloc5Allocator : public GrallocAllocator {
+public:
+    Gralloc5Allocator(const Gralloc5Mapper &mapper);
+
+    [[nodiscard]] bool isLoaded() const override;
+
+    [[nodiscard]] std::string dumpDebugInfo(bool less) const override;
+
+    [[nodiscard]] status_t allocate(std::string requestorName, uint32_t width, uint32_t height,
+                                    PixelFormat format, uint32_t layerCount, uint64_t usage,
+                                    uint32_t bufferCount, uint32_t *outStride,
+                                    buffer_handle_t *outBufferHandles,
+                                    bool importBuffers) const override;
+
+private:
+    const Gralloc5Mapper &mMapper;
+    std::shared_ptr<aidl::android::hardware::graphics::allocator::IAllocator> mAllocator;
+};
+
+} // namespace android
diff --git a/libs/ui/include/ui/GraphicBuffer.h b/libs/ui/include/ui/GraphicBuffer.h
index 57be686..dbe475b 100644
--- a/libs/ui/include/ui/GraphicBuffer.h
+++ b/libs/ui/include/ui/GraphicBuffer.h
@@ -270,7 +270,7 @@
 
     // Send a callback when a GraphicBuffer dies.
     //
-    // This is used for BufferStateLayer caching. GraphicBuffers are refcounted per process. When
+    // This is used for layer caching. GraphicBuffers are refcounted per process. When
     // A GraphicBuffer doesn't have any more sp<> in a process, it is destroyed. This causes
     // problems when trying to implicitcly cache across process boundaries. Ideally, both sides
     // of the cache would hold onto wp<> references. When an app dropped its sp<>, the GraphicBuffer
diff --git a/libs/ui/include/ui/GraphicBufferMapper.h b/libs/ui/include/ui/GraphicBufferMapper.h
index 37a2e1c..3a5167a 100644
--- a/libs/ui/include/ui/GraphicBufferMapper.h
+++ b/libs/ui/include/ui/GraphicBufferMapper.h
@@ -42,9 +42,10 @@
 {
 public:
     enum Version {
-        GRALLOC_2,
+        GRALLOC_2 = 2,
         GRALLOC_3,
         GRALLOC_4,
+        GRALLOC_5,
     };
     static void preloadHal();
     static inline GraphicBufferMapper& get() { return getInstance(); }
@@ -54,10 +55,9 @@
 
     // The imported outHandle must be freed with freeBuffer when no longer
     // needed. rawHandle is owned by the caller.
-    status_t importBuffer(buffer_handle_t rawHandle,
-            uint32_t width, uint32_t height, uint32_t layerCount,
-            PixelFormat format, uint64_t usage, uint32_t stride,
-            buffer_handle_t* outHandle);
+    status_t importBuffer(const native_handle_t* rawHandle, uint32_t width, uint32_t height,
+                          uint32_t layerCount, PixelFormat format, uint64_t usage, uint32_t stride,
+                          buffer_handle_t* outHandle);
 
     status_t importBufferNoValidate(const native_handle_t* rawHandle, buffer_handle_t* outHandle);
 
@@ -138,48 +138,6 @@
     status_t setSmpte2094_10(buffer_handle_t bufferHandle,
                              std::optional<std::vector<uint8_t>> smpte2094_10);
 
-    /**
-     * Gets the default metadata for a gralloc buffer allocated with the given parameters.
-     *
-     * These functions are supported by gralloc 4.0+.
-     */
-    status_t getDefaultPixelFormatFourCC(uint32_t width, uint32_t height, PixelFormat format,
-                                         uint32_t layerCount, uint64_t usage,
-                                         uint32_t* outPixelFormatFourCC);
-    status_t getDefaultPixelFormatModifier(uint32_t width, uint32_t height, PixelFormat format,
-                                           uint32_t layerCount, uint64_t usage,
-                                           uint64_t* outPixelFormatModifier);
-    status_t getDefaultAllocationSize(uint32_t width, uint32_t height, PixelFormat format,
-                                      uint32_t layerCount, uint64_t usage,
-                                      uint64_t* outAllocationSize);
-    status_t getDefaultProtectedContent(uint32_t width, uint32_t height, PixelFormat format,
-                                        uint32_t layerCount, uint64_t usage,
-                                        uint64_t* outProtectedContent);
-    status_t getDefaultCompression(
-            uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount,
-            uint64_t usage,
-            aidl::android::hardware::graphics::common::ExtendableType* outCompression);
-    status_t getDefaultCompression(uint32_t width, uint32_t height, PixelFormat format,
-                                   uint32_t layerCount, uint64_t usage,
-                                   ui::Compression* outCompression);
-    status_t getDefaultInterlaced(
-            uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount,
-            uint64_t usage,
-            aidl::android::hardware::graphics::common::ExtendableType* outInterlaced);
-    status_t getDefaultInterlaced(uint32_t width, uint32_t height, PixelFormat format,
-                                  uint32_t layerCount, uint64_t usage,
-                                  ui::Interlaced* outInterlaced);
-    status_t getDefaultChromaSiting(
-            uint32_t width, uint32_t height, PixelFormat format, uint32_t layerCount,
-            uint64_t usage,
-            aidl::android::hardware::graphics::common::ExtendableType* outChromaSiting);
-    status_t getDefaultChromaSiting(uint32_t width, uint32_t height, PixelFormat format,
-                                    uint32_t layerCount, uint64_t usage,
-                                    ui::ChromaSiting* outChromaSiting);
-    status_t getDefaultPlaneLayouts(uint32_t width, uint32_t height, PixelFormat format,
-                                    uint32_t layerCount, uint64_t usage,
-                                    std::vector<ui::PlaneLayout>* outPlaneLayouts);
-
     const GrallocMapper& getGrallocMapper() const {
         return reinterpret_cast<const GrallocMapper&>(*mMapper);
     }
diff --git a/libs/ui/include/ui/GraphicTypes.h b/libs/ui/include/ui/GraphicTypes.h
index 8661c36..1775d39 100644
--- a/libs/ui/include/ui/GraphicTypes.h
+++ b/libs/ui/include/ui/GraphicTypes.h
@@ -20,6 +20,7 @@
 #include <aidl/android/hardware/graphics/common/ChromaSiting.h>
 #include <aidl/android/hardware/graphics/common/Compression.h>
 #include <aidl/android/hardware/graphics/common/Cta861_3.h>
+#include <aidl/android/hardware/graphics/common/Hdr.h>
 #include <aidl/android/hardware/graphics/common/Interlaced.h>
 #include <aidl/android/hardware/graphics/common/PlaneLayout.h>
 #include <aidl/android/hardware/graphics/common/Smpte2086.h>
@@ -42,7 +43,6 @@
 using android::hardware::graphics::common::V1_1::RenderIntent;
 using android::hardware::graphics::common::V1_2::ColorMode;
 using android::hardware::graphics::common::V1_2::Dataspace;
-using android::hardware::graphics::common::V1_2::Hdr;
 using android::hardware::graphics::common::V1_2::PixelFormat;
 
 /**
@@ -50,6 +50,7 @@
  */
 using aidl::android::hardware::graphics::common::BlendMode;
 using aidl::android::hardware::graphics::common::Cta861_3;
+using aidl::android::hardware::graphics::common::Hdr;
 using aidl::android::hardware::graphics::common::PlaneLayout;
 using aidl::android::hardware::graphics::common::Smpte2086;
 
diff --git a/libs/ui/include/ui/HdrCapabilities.h b/libs/ui/include/ui/HdrCapabilities.h
index 813adde..ae54223 100644
--- a/libs/ui/include/ui/HdrCapabilities.h
+++ b/libs/ui/include/ui/HdrCapabilities.h
@@ -22,12 +22,10 @@
 #include <vector>
 
 #include <ui/GraphicTypes.h>
-#include <utils/Flattenable.h>
 
 namespace android {
 
-class HdrCapabilities : public LightFlattenable<HdrCapabilities>
-{
+class HdrCapabilities {
 public:
     HdrCapabilities(const std::vector<ui::Hdr>& types,
             float maxLuminance, float maxAverageLuminance, float minLuminance)
@@ -49,12 +47,6 @@
     float getDesiredMaxAverageLuminance() const { return mMaxAverageLuminance; }
     float getDesiredMinLuminance() const { return mMinLuminance; }
 
-    // Flattenable protocol
-    bool isFixedSize() const { return false; }
-    size_t getFlattenedSize() const;
-    status_t flatten(void* buffer, size_t size) const;
-    status_t unflatten(void const* buffer, size_t size);
-
 private:
     std::vector<ui::Hdr> mSupportedHdrTypes;
     float mMaxLuminance;
diff --git a/libs/ui/include/ui/PublicFormat.h b/libs/ui/include/ui/PublicFormat.h
index aa58805..2248cca 100644
--- a/libs/ui/include/ui/PublicFormat.h
+++ b/libs/ui/include/ui/PublicFormat.h
@@ -57,6 +57,7 @@
     YCBCR_P010 = 0x36,
     DEPTH16 = 0x44363159,
     DEPTH_JPEG = 0x69656963,
+    JPEG_R = 0x1005,
     HEIC = 0x48454946,
 };
 
diff --git a/libs/ui/include/ui/Rotation.h b/libs/ui/include/ui/Rotation.h
index 83d431d..c1d60f4 100644
--- a/libs/ui/include/ui/Rotation.h
+++ b/libs/ui/include/ui/Rotation.h
@@ -20,7 +20,14 @@
 
 namespace android::ui {
 
-enum class Rotation { Rotation0 = 0, Rotation90 = 1, Rotation180 = 2, Rotation270 = 3 };
+enum class Rotation {
+    Rotation0 = 0,
+    Rotation90 = 1,
+    Rotation180 = 2,
+    Rotation270 = 3,
+
+    ftl_last = Rotation270
+};
 
 // Equivalent to Surface.java constants.
 constexpr auto ROTATION_0 = Rotation::Rotation0;
diff --git a/libs/ui/include/ui/StaticDisplayInfo.h b/libs/ui/include/ui/StaticDisplayInfo.h
index cc7c869..83da821 100644
--- a/libs/ui/include/ui/StaticDisplayInfo.h
+++ b/libs/ui/include/ui/StaticDisplayInfo.h
@@ -20,24 +20,18 @@
 
 #include <ui/DeviceProductInfo.h>
 #include <ui/Rotation.h>
-#include <utils/Flattenable.h>
 
 namespace android::ui {
 
-enum class DisplayConnectionType { Internal, External };
+enum class DisplayConnectionType { Internal, External, ftl_last = External };
 
 // Immutable information about physical display.
-struct StaticDisplayInfo : LightFlattenable<StaticDisplayInfo> {
+struct StaticDisplayInfo {
     DisplayConnectionType connectionType = DisplayConnectionType::Internal;
     float density = 0.f;
     bool secure = false;
     std::optional<DeviceProductInfo> deviceProductInfo;
     Rotation installOrientation = ROTATION_0;
-
-    bool isFixedSize() const { return false; }
-    size_t getFlattenedSize() const;
-    status_t flatten(void* buffer, size_t size) const;
-    status_t unflatten(void const* buffer, size_t size);
 };
 
 } // namespace android::ui
diff --git a/libs/ultrahdr/Android.bp b/libs/ultrahdr/Android.bp
new file mode 100644
index 0000000..e3f709b
--- /dev/null
+++ b/libs/ultrahdr/Android.bp
@@ -0,0 +1,83 @@
+// Copyright 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_native_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_native_license"],
+}
+
+cc_library {
+    name: "libultrahdr",
+    host_supported: true,
+    vendor_available: true,
+    export_include_dirs: ["include"],
+    local_include_dirs: ["include"],
+
+    srcs: [
+        "icc.cpp",
+        "jpegr.cpp",
+        "gainmapmath.cpp",
+        "jpegrutils.cpp",
+        "multipictureformat.cpp",
+    ],
+
+    shared_libs: [
+        "libimage_io",
+        "libjpeg",
+        "libjpegencoder",
+        "libjpegdecoder",
+        "liblog",
+        "libutils",
+    ],
+}
+
+cc_library {
+    name: "libjpegencoder",
+    host_supported: true,
+    vendor_available: true,
+
+    shared_libs: [
+        "libjpeg",
+        "liblog",
+        "libutils",
+    ],
+
+    export_include_dirs: ["include"],
+
+    srcs: [
+        "jpegencoderhelper.cpp",
+    ],
+}
+
+cc_library {
+    name: "libjpegdecoder",
+    host_supported: true,
+    vendor_available: true,
+
+    shared_libs: [
+        "libjpeg",
+        "liblog",
+        "libutils",
+    ],
+
+    export_include_dirs: ["include"],
+
+    srcs: [
+        "jpegdecoderhelper.cpp",
+    ],
+}
diff --git a/libs/ultrahdr/OWNERS b/libs/ultrahdr/OWNERS
new file mode 100644
index 0000000..6ace354
--- /dev/null
+++ b/libs/ultrahdr/OWNERS
@@ -0,0 +1,3 @@
+arifdikici@google.com
+dichenzhang@google.com
+kyslov@google.com
\ No newline at end of file
diff --git a/libs/ultrahdr/fuzzer/Android.bp b/libs/ultrahdr/fuzzer/Android.bp
new file mode 100644
index 0000000..6c0a2f5
--- /dev/null
+++ b/libs/ultrahdr/fuzzer/Android.bp
@@ -0,0 +1,69 @@
+// Copyright 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_native_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_native_license"],
+}
+
+cc_defaults {
+    name: "ultrahdr_fuzzer_defaults",
+    host_supported: true,
+    shared_libs: [
+        "libimage_io",
+        "libjpeg",
+    ],
+    static_libs: [
+        "libjpegdecoder",
+        "libjpegencoder",
+        "libultrahdr",
+        "libutils",
+        "liblog",
+    ],
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        description: "The fuzzers target the APIs of jpeg hdr",
+        service_privilege: "constrained",
+        users: "multi_user",
+        fuzzed_code_usage: "future_version",
+        vector: "local_no_privileges_required",
+    },
+}
+
+cc_fuzz {
+    name: "ultrahdr_enc_fuzzer",
+    defaults: ["ultrahdr_fuzzer_defaults"],
+    srcs: [
+        "ultrahdr_enc_fuzzer.cpp",
+    ],
+}
+
+cc_fuzz {
+    name: "ultrahdr_dec_fuzzer",
+    defaults: ["ultrahdr_fuzzer_defaults"],
+    srcs: [
+        "ultrahdr_dec_fuzzer.cpp",
+    ],
+}
diff --git a/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp b/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp
new file mode 100644
index 0000000..ad1d57a
--- /dev/null
+++ b/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// System include files
+#include <fuzzer/FuzzedDataProvider.h>
+#include <iostream>
+#include <vector>
+
+// User include files
+#include "ultrahdr/jpegr.h"
+
+using namespace android::ultrahdr;
+
+// Transfer functions for image data, sync with ultrahdr.h
+const int kOfMin = ULTRAHDR_OUTPUT_UNSPECIFIED + 1;
+const int kOfMax = ULTRAHDR_OUTPUT_MAX;
+
+class UltraHdrDecFuzzer {
+public:
+    UltraHdrDecFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+
+private:
+    FuzzedDataProvider mFdp;
+};
+
+void UltraHdrDecFuzzer::process() {
+    // hdr_of
+    auto of = static_cast<ultrahdr_output_format>(mFdp.ConsumeIntegralInRange<int>(kOfMin, kOfMax));
+    auto buffer = mFdp.ConsumeRemainingBytes<uint8_t>();
+    jpegr_compressed_struct jpegImgR{buffer.data(), (int)buffer.size(), (int)buffer.size(),
+                                     ULTRAHDR_COLORGAMUT_UNSPECIFIED};
+
+    std::vector<uint8_t> iccData(0);
+    std::vector<uint8_t> exifData(0);
+    jpegr_info_struct info{0, 0, &iccData, &exifData};
+    JpegR jpegHdr;
+    (void)jpegHdr.getJPEGRInfo(&jpegImgR, &info);
+//#define DUMP_PARAM
+#ifdef DUMP_PARAM
+    std::cout << "input buffer size " << jpegImgR.length << std::endl;
+    std::cout << "image dimensions " << info.width << " x " << info.width << std::endl;
+#endif
+    size_t outSize = info.width * info.height * ((of == ULTRAHDR_OUTPUT_SDR) ? 4 : 8);
+    jpegr_uncompressed_struct decodedJpegR;
+    auto decodedRaw = std::make_unique<uint8_t[]>(outSize);
+    decodedJpegR.data = decodedRaw.get();
+    ultrahdr_metadata_struct metadata;
+    jpegr_uncompressed_struct decodedGainMap{};
+    (void)jpegHdr.decodeJPEGR(&jpegImgR, &decodedJpegR,
+                              mFdp.ConsumeFloatingPointInRange<float>(1.0, FLT_MAX), nullptr, of,
+                              &decodedGainMap, &metadata);
+    if (decodedGainMap.data) free(decodedGainMap.data);
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    UltraHdrDecFuzzer fuzzHandle(data, size);
+    fuzzHandle.process();
+    return 0;
+}
diff --git a/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp b/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
new file mode 100644
index 0000000..bbe58e0
--- /dev/null
+++ b/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// System include files
+#include <fuzzer/FuzzedDataProvider.h>
+#include <algorithm>
+#include <iostream>
+#include <random>
+#include <vector>
+
+// User include files
+#include "ultrahdr/gainmapmath.h"
+#include "ultrahdr/jpegencoderhelper.h"
+#include "utils/Log.h"
+
+using namespace android::ultrahdr;
+
+// constants
+const int kMinWidth = 8;
+const int kMaxWidth = 7680;
+
+const int kMinHeight = 8;
+const int kMaxHeight = 4320;
+
+const int kScaleFactor = 4;
+
+const int kJpegBlock = 16;
+
+// Color gamuts for image data, sync with ultrahdr.h
+const int kCgMin = ULTRAHDR_COLORGAMUT_UNSPECIFIED + 1;
+const int kCgMax = ULTRAHDR_COLORGAMUT_MAX;
+
+// Transfer functions for image data, sync with ultrahdr.h
+const int kTfMin = ULTRAHDR_TF_UNSPECIFIED + 1;
+const int kTfMax = ULTRAHDR_TF_PQ;
+
+// Transfer functions for image data, sync with ultrahdr.h
+const int kOfMin = ULTRAHDR_OUTPUT_UNSPECIFIED + 1;
+const int kOfMax = ULTRAHDR_OUTPUT_MAX;
+
+// quality factor
+const int kQfMin = 0;
+const int kQfMax = 100;
+
+class UltraHdrEncFuzzer {
+public:
+    UltraHdrEncFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+    void process();
+    void fillP010Buffer(uint16_t* data, int width, int height, int stride);
+    void fill420Buffer(uint8_t* data, int size);
+
+private:
+    FuzzedDataProvider mFdp;
+};
+
+void UltraHdrEncFuzzer::fillP010Buffer(uint16_t* data, int width, int height, int stride) {
+    uint16_t* tmp = data;
+    std::vector<uint16_t> buffer(16);
+    for (int i = 0; i < buffer.size(); i++) {
+        buffer[i] = mFdp.ConsumeIntegralInRange<int>(0, (1 << 10) - 1);
+    }
+    for (int j = 0; j < height; j++) {
+        for (int i = 0; i < width; i += buffer.size()) {
+            memcpy(data + i, buffer.data(), std::min((int)buffer.size(), (width - i)));
+            std::shuffle(buffer.begin(), buffer.end(),
+                         std::default_random_engine(std::random_device{}()));
+        }
+        tmp += stride;
+    }
+}
+
+void UltraHdrEncFuzzer::fill420Buffer(uint8_t* data, int size) {
+    std::vector<uint8_t> buffer(16);
+    mFdp.ConsumeData(buffer.data(), buffer.size());
+    for (int i = 0; i < size; i += buffer.size()) {
+        memcpy(data + i, buffer.data(), std::min((int)buffer.size(), (size - i)));
+        std::shuffle(buffer.begin(), buffer.end(),
+                     std::default_random_engine(std::random_device{}()));
+    }
+}
+
+void UltraHdrEncFuzzer::process() {
+    while (mFdp.remaining_bytes()) {
+        struct jpegr_uncompressed_struct p010Img {};
+        struct jpegr_uncompressed_struct yuv420Img {};
+        struct jpegr_uncompressed_struct grayImg {};
+        struct jpegr_compressed_struct jpegImgR {};
+        struct jpegr_compressed_struct jpegImg {};
+        struct jpegr_compressed_struct jpegGainMap {};
+
+        // which encode api to select
+        int muxSwitch = mFdp.ConsumeIntegralInRange<int>(0, 4);
+
+        // quality factor
+        int quality = mFdp.ConsumeIntegralInRange<int>(kQfMin, kQfMax);
+
+        // hdr_tf
+        auto tf = static_cast<ultrahdr_transfer_function>(
+                mFdp.ConsumeIntegralInRange<int>(kTfMin, kTfMax));
+
+        // p010 Cg
+        auto p010Cg =
+                static_cast<ultrahdr_color_gamut>(mFdp.ConsumeIntegralInRange<int>(kCgMin, kCgMax));
+
+        // 420 Cg
+        auto yuv420Cg =
+                static_cast<ultrahdr_color_gamut>(mFdp.ConsumeIntegralInRange<int>(kCgMin, kCgMax));
+
+        // hdr_of
+        auto of = static_cast<ultrahdr_output_format>(
+                mFdp.ConsumeIntegralInRange<int>(kOfMin, kOfMax));
+
+        int width = mFdp.ConsumeIntegralInRange<int>(kMinWidth, kMaxWidth);
+        width = (width >> 1) << 1;
+
+        int height = mFdp.ConsumeIntegralInRange<int>(kMinHeight, kMaxHeight);
+        height = (height >> 1) << 1;
+
+        std::unique_ptr<uint16_t[]> bufferY = nullptr;
+        std::unique_ptr<uint16_t[]> bufferUV = nullptr;
+        std::unique_ptr<uint8_t[]> yuv420ImgRaw = nullptr;
+        std::unique_ptr<uint8_t[]> grayImgRaw = nullptr;
+        if (muxSwitch != 4) {
+            // init p010 image
+            bool isUVContiguous = mFdp.ConsumeBool();
+            bool hasYStride = mFdp.ConsumeBool();
+            int yStride = hasYStride ? mFdp.ConsumeIntegralInRange<int>(width, width + 128) : width;
+            p010Img.width = width;
+            p010Img.height = height;
+            p010Img.colorGamut = p010Cg;
+            p010Img.luma_stride = hasYStride ? yStride : 0;
+            int bppP010 = 2;
+            if (isUVContiguous) {
+                size_t p010Size = yStride * height * 3 / 2;
+                bufferY = std::make_unique<uint16_t[]>(p010Size);
+                p010Img.data = bufferY.get();
+                p010Img.chroma_data = nullptr;
+                p010Img.chroma_stride = 0;
+                fillP010Buffer(bufferY.get(), width, height, yStride);
+                fillP010Buffer(bufferY.get() + yStride * height, width, height / 2, yStride);
+            } else {
+                int uvStride = mFdp.ConsumeIntegralInRange<int>(width, width + 128);
+                size_t p010YSize = yStride * height;
+                bufferY = std::make_unique<uint16_t[]>(p010YSize);
+                p010Img.data = bufferY.get();
+                fillP010Buffer(bufferY.get(), width, height, yStride);
+                size_t p010UVSize = uvStride * p010Img.height / 2;
+                bufferUV = std::make_unique<uint16_t[]>(p010UVSize);
+                p010Img.chroma_data = bufferUV.get();
+                p010Img.chroma_stride = uvStride;
+                fillP010Buffer(bufferUV.get(), width, height / 2, uvStride);
+            }
+        } else {
+            int map_width = width / kScaleFactor;
+            int map_height = height / kScaleFactor;
+            map_width = static_cast<size_t>(floor((map_width + kJpegBlock - 1) / kJpegBlock)) *
+                    kJpegBlock;
+            map_height = ((map_height + 1) >> 1) << 1;
+            // init 400 image
+            grayImg.width = map_width;
+            grayImg.height = map_height;
+            grayImg.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+
+            const size_t graySize = map_width * map_height;
+            grayImgRaw = std::make_unique<uint8_t[]>(graySize);
+            grayImg.data = grayImgRaw.get();
+            fill420Buffer(grayImgRaw.get(), graySize);
+            grayImg.chroma_data = nullptr;
+            grayImg.luma_stride = 0;
+            grayImg.chroma_stride = 0;
+        }
+
+        if (muxSwitch > 0) {
+            // init 420 image
+            yuv420Img.width = width;
+            yuv420Img.height = height;
+            yuv420Img.colorGamut = yuv420Cg;
+
+            const size_t yuv420Size = (yuv420Img.width * yuv420Img.height * 3) / 2;
+            yuv420ImgRaw = std::make_unique<uint8_t[]>(yuv420Size);
+            yuv420Img.data = yuv420ImgRaw.get();
+            fill420Buffer(yuv420ImgRaw.get(), yuv420Size);
+            yuv420Img.chroma_data = nullptr;
+            yuv420Img.luma_stride = 0;
+            yuv420Img.chroma_stride = 0;
+        }
+
+        // dest
+        // 2 * p010 size as input data is random, DCT compression might not behave as expected
+        jpegImgR.maxLength = std::max(8 * 1024 /* min size 8kb */, width * height * 3 * 2);
+        auto jpegImgRaw = std::make_unique<uint8_t[]>(jpegImgR.maxLength);
+        jpegImgR.data = jpegImgRaw.get();
+
+//#define DUMP_PARAM
+#ifdef DUMP_PARAM
+        std::cout << "Api Select " << muxSwitch << std::endl;
+        std::cout << "image dimensions " << width << " x " << height << std::endl;
+        std::cout << "p010 color gamut " << p010Img.colorGamut << std::endl;
+        std::cout << "p010 luma stride " << p010Img.luma_stride << std::endl;
+        std::cout << "p010 chroma stride " << p010Img.chroma_stride << std::endl;
+        std::cout << "420 color gamut " << yuv420Img.colorGamut << std::endl;
+        std::cout << "quality factor " << quality << std::endl;
+#endif
+
+        JpegR jpegHdr;
+        android::status_t status = android::UNKNOWN_ERROR;
+        if (muxSwitch == 0) { // api 0
+            jpegImgR.length = 0;
+            status = jpegHdr.encodeJPEGR(&p010Img, tf, &jpegImgR, quality, nullptr);
+        } else if (muxSwitch == 1) { // api 1
+            jpegImgR.length = 0;
+            status = jpegHdr.encodeJPEGR(&p010Img, &yuv420Img, tf, &jpegImgR, quality, nullptr);
+        } else {
+            // compressed img
+            JpegEncoderHelper encoder;
+            if (encoder.compressImage(yuv420Img.data, yuv420Img.width, yuv420Img.height, quality,
+                                      nullptr, 0)) {
+                jpegImg.length = encoder.getCompressedImageSize();
+                jpegImg.maxLength = jpegImg.length;
+                jpegImg.data = encoder.getCompressedImagePtr();
+                jpegImg.colorGamut = yuv420Cg;
+
+                if (muxSwitch == 2) { // api 2
+                    jpegImgR.length = 0;
+                    status = jpegHdr.encodeJPEGR(&p010Img, &yuv420Img, &jpegImg, tf, &jpegImgR);
+                } else if (muxSwitch == 3) { // api 3
+                    jpegImgR.length = 0;
+                    status = jpegHdr.encodeJPEGR(&p010Img, &jpegImg, tf, &jpegImgR);
+                } else if (muxSwitch == 4) { // api 4
+                    jpegImgR.length = 0;
+                    JpegEncoderHelper gainMapEncoder;
+                    if (gainMapEncoder.compressImage(grayImg.data, grayImg.width, grayImg.height,
+                                                     quality, nullptr, 0, true)) {
+                        jpegGainMap.length = gainMapEncoder.getCompressedImageSize();
+                        jpegGainMap.maxLength = jpegImg.length;
+                        jpegGainMap.data = gainMapEncoder.getCompressedImagePtr();
+                        jpegGainMap.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+                        ultrahdr_metadata_struct metadata;
+                        metadata.version = "1.0";
+                        if (tf == ULTRAHDR_TF_HLG) {
+                            metadata.maxContentBoost = kHlgMaxNits / kSdrWhiteNits;
+                        } else if (tf == ULTRAHDR_TF_PQ) {
+                            metadata.maxContentBoost = kPqMaxNits / kSdrWhiteNits;
+                        } else {
+                            metadata.maxContentBoost = 1.0f;
+                        }
+                        metadata.minContentBoost = 1.0f;
+                        metadata.gamma = 1.0f;
+                        metadata.offsetSdr = 0.0f;
+                        metadata.offsetHdr = 0.0f;
+                        metadata.hdrCapacityMin = 1.0f;
+                        metadata.hdrCapacityMax = metadata.maxContentBoost;
+                        status = jpegHdr.encodeJPEGR(&jpegImg, &jpegGainMap, &metadata, &jpegImgR);
+                    }
+                }
+            }
+        }
+        if (status == android::OK) {
+            std::vector<uint8_t> iccData(0);
+            std::vector<uint8_t> exifData(0);
+            jpegr_info_struct info{0, 0, &iccData, &exifData};
+            status = jpegHdr.getJPEGRInfo(&jpegImgR, &info);
+            if (status == android::OK) {
+                size_t outSize = info.width * info.height * ((of == ULTRAHDR_OUTPUT_SDR) ? 4 : 8);
+                jpegr_uncompressed_struct decodedJpegR;
+                auto decodedRaw = std::make_unique<uint8_t[]>(outSize);
+                decodedJpegR.data = decodedRaw.get();
+                ultrahdr_metadata_struct metadata;
+                jpegr_uncompressed_struct decodedGainMap{};
+                status = jpegHdr.decodeJPEGR(&jpegImgR, &decodedJpegR,
+                                             mFdp.ConsumeFloatingPointInRange<float>(1.0, FLT_MAX),
+                                             nullptr, of, &decodedGainMap, &metadata);
+                if (status != android::OK) {
+                    ALOGE("encountered error during decoding %d", status);
+                }
+                if (decodedGainMap.data) free(decodedGainMap.data);
+            } else {
+                ALOGE("encountered error during get jpeg info %d", status);
+            }
+        } else {
+            ALOGE("encountered error during encoding %d", status);
+        }
+    }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    UltraHdrEncFuzzer fuzzHandle(data, size);
+    fuzzHandle.process();
+    return 0;
+}
diff --git a/libs/ultrahdr/gainmapmath.cpp b/libs/ultrahdr/gainmapmath.cpp
new file mode 100644
index 0000000..ee15363
--- /dev/null
+++ b/libs/ultrahdr/gainmapmath.cpp
@@ -0,0 +1,781 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cmath>
+#include <vector>
+#include <ultrahdr/gainmapmath.h>
+
+namespace android::ultrahdr {
+
+static const std::vector<float> kPqOETF = [] {
+    std::vector<float> result;
+    for (int idx = 0; idx < kPqOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqOETFNumEntries - 1);
+      result.push_back(pqOetf(value));
+    }
+    return result;
+}();
+
+static const std::vector<float> kPqInvOETF = [] {
+    std::vector<float> result;
+    for (int idx = 0; idx < kPqInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqInvOETFNumEntries - 1);
+      result.push_back(pqInvOetf(value));
+    }
+    return result;
+}();
+
+static const std::vector<float> kHlgOETF = [] {
+    std::vector<float> result;
+    for (int idx = 0; idx < kHlgOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgOETFNumEntries - 1);
+      result.push_back(hlgOetf(value));
+    }
+    return result;
+}();
+
+static const std::vector<float> kHlgInvOETF = [] {
+    std::vector<float> result;
+    for (int idx = 0; idx < kHlgInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgInvOETFNumEntries - 1);
+      result.push_back(hlgInvOetf(value));
+    }
+    return result;
+}();
+
+static const std::vector<float> kSrgbInvOETF = [] {
+    std::vector<float> result;
+    for (int idx = 0; idx < kSrgbInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kSrgbInvOETFNumEntries - 1);
+      result.push_back(srgbInvOetf(value));
+    }
+    return result;
+}();
+
+// Use Shepard's method for inverse distance weighting. For more information:
+// en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method
+
+float ShepardsIDW::euclideanDistance(float x1, float x2, float y1, float y2) {
+  return sqrt(((y2 - y1) * (y2 - y1)) + (x2 - x1) * (x2 - x1));
+}
+
+void ShepardsIDW::fillShepardsIDW(float *weights, int incR, int incB) {
+  for (int y = 0; y < mMapScaleFactor; y++) {
+    for (int x = 0; x < mMapScaleFactor; x++) {
+      float pos_x = ((float)x) / mMapScaleFactor;
+      float pos_y = ((float)y) / mMapScaleFactor;
+      int curr_x = floor(pos_x);
+      int curr_y = floor(pos_y);
+      int next_x = curr_x + incR;
+      int next_y = curr_y + incB;
+      float e1_distance = euclideanDistance(pos_x, curr_x, pos_y, curr_y);
+      int index = y * mMapScaleFactor * 4 + x * 4;
+      if (e1_distance == 0) {
+        weights[index++] = 1.f;
+        weights[index++] = 0.f;
+        weights[index++] = 0.f;
+        weights[index++] = 0.f;
+      } else {
+        float e1_weight = 1.f / e1_distance;
+
+        float e2_distance = euclideanDistance(pos_x, curr_x, pos_y, next_y);
+        float e2_weight = 1.f / e2_distance;
+
+        float e3_distance = euclideanDistance(pos_x, next_x, pos_y, curr_y);
+        float e3_weight = 1.f / e3_distance;
+
+        float e4_distance = euclideanDistance(pos_x, next_x, pos_y, next_y);
+        float e4_weight = 1.f / e4_distance;
+
+        float total_weight = e1_weight + e2_weight + e3_weight + e4_weight;
+
+        weights[index++] = e1_weight / total_weight;
+        weights[index++] = e2_weight / total_weight;
+        weights[index++] = e3_weight / total_weight;
+        weights[index++] = e4_weight / total_weight;
+      }
+    }
+  }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// sRGB transformations
+
+static const float kMaxPixelFloat = 1.0f;
+static float clampPixelFloat(float value) {
+    return (value < 0.0f) ? 0.0f : (value > kMaxPixelFloat) ? kMaxPixelFloat : value;
+}
+
+// See IEC 61966-2-1/Amd 1:2003, Equation F.7.
+static const float kSrgbR = 0.2126f, kSrgbG = 0.7152f, kSrgbB = 0.0722f;
+
+float srgbLuminance(Color e) {
+  return kSrgbR * e.r + kSrgbG * e.g + kSrgbB * e.b;
+}
+
+// See ITU-R BT.709-6, Section 3.
+// Uses the same coefficients for deriving luma signal as
+// IEC 61966-2-1/Amd 1:2003 states for luminance, so we reuse the luminance
+// function above.
+static const float kSrgbCb = 1.8556f, kSrgbCr = 1.5748f;
+
+Color srgbRgbToYuv(Color e_gamma) {
+  float y_gamma = srgbLuminance(e_gamma);
+  return {{{ y_gamma,
+             (e_gamma.b - y_gamma) / kSrgbCb,
+             (e_gamma.r - y_gamma) / kSrgbCr }}};
+}
+
+// See ITU-R BT.709-6, Section 3.
+// Same derivation to BT.2100's YUV->RGB, below. Similar to srgbRgbToYuv, we
+// can reuse the luminance coefficients since they are the same.
+static const float kSrgbGCb = kSrgbB * kSrgbCb / kSrgbG;
+static const float kSrgbGCr = kSrgbR * kSrgbCr / kSrgbG;
+
+Color srgbYuvToRgb(Color e_gamma) {
+  return {{{ clampPixelFloat(e_gamma.y + kSrgbCr * e_gamma.v),
+             clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v),
+             clampPixelFloat(e_gamma.y + kSrgbCb * e_gamma.u) }}};
+}
+
+// See IEC 61966-2-1/Amd 1:2003, Equations F.5 and F.6.
+float srgbInvOetf(float e_gamma) {
+  if (e_gamma <= 0.04045f) {
+    return e_gamma / 12.92f;
+  } else {
+    return pow((e_gamma + 0.055f) / 1.055f, 2.4);
+  }
+}
+
+Color srgbInvOetf(Color e_gamma) {
+  return {{{ srgbInvOetf(e_gamma.r),
+             srgbInvOetf(e_gamma.g),
+             srgbInvOetf(e_gamma.b) }}};
+}
+
+// See IEC 61966-2-1, Equations F.5 and F.6.
+float srgbInvOetfLUT(float e_gamma) {
+  uint32_t value = static_cast<uint32_t>(e_gamma * kSrgbInvOETFNumEntries);
+  //TODO() : Remove once conversion modules have appropriate clamping in place
+  value = CLIP3(value, 0, kSrgbInvOETFNumEntries - 1);
+  return kSrgbInvOETF[value];
+}
+
+Color srgbInvOetfLUT(Color e_gamma) {
+  return {{{ srgbInvOetfLUT(e_gamma.r),
+             srgbInvOetfLUT(e_gamma.g),
+             srgbInvOetfLUT(e_gamma.b) }}};
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Display-P3 transformations
+
+// See SMPTE EG 432-1, Equation 7-8.
+static const float kP3R = 0.20949f, kP3G = 0.72160f, kP3B = 0.06891f;
+
+float p3Luminance(Color e) {
+  return kP3R * e.r + kP3G * e.g + kP3B * e.b;
+}
+
+// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
+// Unfortunately, calculation of luma signal differs from calculation of
+// luminance for Display-P3, so we can't reuse p3Luminance here.
+static const float kP3YR = 0.299f, kP3YG = 0.587f, kP3YB = 0.114f;
+static const float kP3Cb = 1.772f, kP3Cr = 1.402f;
+
+Color p3RgbToYuv(Color e_gamma) {
+  float y_gamma = kP3YR * e_gamma.r + kP3YG * e_gamma.g + kP3YB * e_gamma.b;
+  return {{{ y_gamma,
+             (e_gamma.b - y_gamma) / kP3Cb,
+             (e_gamma.r - y_gamma) / kP3Cr }}};
+}
+
+// See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
+// Same derivation to BT.2100's YUV->RGB, below. Similar to p3RgbToYuv, we must
+// use luma signal coefficients rather than the luminance coefficients.
+static const float kP3GCb = kP3YB * kP3Cb / kP3YG;
+static const float kP3GCr = kP3YR * kP3Cr / kP3YG;
+
+Color p3YuvToRgb(Color e_gamma) {
+  return {{{ clampPixelFloat(e_gamma.y + kP3Cr * e_gamma.v),
+             clampPixelFloat(e_gamma.y - kP3GCb * e_gamma.u - kP3GCr * e_gamma.v),
+             clampPixelFloat(e_gamma.y + kP3Cb * e_gamma.u) }}};
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// BT.2100 transformations - according to ITU-R BT.2100-2
+
+// See ITU-R BT.2100-2, Table 5, HLG Reference OOTF
+static const float kBt2100R = 0.2627f, kBt2100G = 0.6780f, kBt2100B = 0.0593f;
+
+float bt2100Luminance(Color e) {
+  return kBt2100R * e.r + kBt2100G * e.g + kBt2100B * e.b;
+}
+
+// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
+// BT.2100 uses the same coefficients for calculating luma signal and luminance,
+// so we reuse the luminance function here.
+static const float kBt2100Cb = 1.8814f, kBt2100Cr = 1.4746f;
+
+Color bt2100RgbToYuv(Color e_gamma) {
+  float y_gamma = bt2100Luminance(e_gamma);
+  return {{{ y_gamma,
+             (e_gamma.b - y_gamma) / kBt2100Cb,
+             (e_gamma.r - y_gamma) / kBt2100Cr }}};
+}
+
+// See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
+//
+// Similar to bt2100RgbToYuv above, we can reuse the luminance coefficients.
+//
+// Derived by inversing bt2100RgbToYuv. The derivation for R and B are  pretty
+// straight forward; we just invert the formulas for U and V above. But deriving
+// the formula for G is a bit more complicated:
+//
+// Start with equation for luminance:
+//   Y = kBt2100R * R + kBt2100G * G + kBt2100B * B
+// Solve for G:
+//   G = (Y - kBt2100R * R - kBt2100B * B) / kBt2100B
+// Substitute equations for R and B in terms YUV:
+//   G = (Y - kBt2100R * (Y + kBt2100Cr * V) - kBt2100B * (Y + kBt2100Cb * U)) / kBt2100B
+// Simplify:
+//   G = Y * ((1 - kBt2100R - kBt2100B) / kBt2100G)
+//     + U * (kBt2100B * kBt2100Cb / kBt2100G)
+//     + V * (kBt2100R * kBt2100Cr / kBt2100G)
+//
+// We then get the following coeficients for calculating G from YUV:
+//
+// Coef for Y = (1 - kBt2100R - kBt2100B) / kBt2100G = 1
+// Coef for U = kBt2100B * kBt2100Cb / kBt2100G = kBt2100GCb = ~0.1645
+// Coef for V = kBt2100R * kBt2100Cr / kBt2100G = kBt2100GCr = ~0.5713
+
+static const float kBt2100GCb = kBt2100B * kBt2100Cb / kBt2100G;
+static const float kBt2100GCr = kBt2100R * kBt2100Cr / kBt2100G;
+
+Color bt2100YuvToRgb(Color e_gamma) {
+  return {{{ clampPixelFloat(e_gamma.y + kBt2100Cr * e_gamma.v),
+             clampPixelFloat(e_gamma.y - kBt2100GCb * e_gamma.u - kBt2100GCr * e_gamma.v),
+             clampPixelFloat(e_gamma.y + kBt2100Cb * e_gamma.u) }}};
+}
+
+// See ITU-R BT.2100-2, Table 5, HLG Reference OETF.
+static const float kHlgA = 0.17883277f, kHlgB = 0.28466892f, kHlgC = 0.55991073;
+
+float hlgOetf(float e) {
+  if (e <= 1.0f/12.0f) {
+    return sqrt(3.0f * e);
+  } else {
+    return kHlgA * log(12.0f * e - kHlgB) + kHlgC;
+  }
+}
+
+Color hlgOetf(Color e) {
+  return {{{ hlgOetf(e.r), hlgOetf(e.g), hlgOetf(e.b) }}};
+}
+
+float hlgOetfLUT(float e) {
+  uint32_t value = static_cast<uint32_t>(e * kHlgOETFNumEntries);
+  //TODO() : Remove once conversion modules have appropriate clamping in place
+  value = CLIP3(value, 0, kHlgOETFNumEntries - 1);
+
+  return kHlgOETF[value];
+}
+
+Color hlgOetfLUT(Color e) {
+  return {{{ hlgOetfLUT(e.r), hlgOetfLUT(e.g), hlgOetfLUT(e.b) }}};
+}
+
+// See ITU-R BT.2100-2, Table 5, HLG Reference EOTF.
+float hlgInvOetf(float e_gamma) {
+  if (e_gamma <= 0.5f) {
+    return pow(e_gamma, 2.0f) / 3.0f;
+  } else {
+    return (exp((e_gamma - kHlgC) / kHlgA) + kHlgB) / 12.0f;
+  }
+}
+
+Color hlgInvOetf(Color e_gamma) {
+  return {{{ hlgInvOetf(e_gamma.r),
+             hlgInvOetf(e_gamma.g),
+             hlgInvOetf(e_gamma.b) }}};
+}
+
+float hlgInvOetfLUT(float e_gamma) {
+  uint32_t value = static_cast<uint32_t>(e_gamma * kHlgInvOETFNumEntries);
+  //TODO() : Remove once conversion modules have appropriate clamping in place
+  value = CLIP3(value, 0, kHlgInvOETFNumEntries - 1);
+
+  return kHlgInvOETF[value];
+}
+
+Color hlgInvOetfLUT(Color e_gamma) {
+  return {{{ hlgInvOetfLUT(e_gamma.r),
+             hlgInvOetfLUT(e_gamma.g),
+             hlgInvOetfLUT(e_gamma.b) }}};
+}
+
+// See ITU-R BT.2100-2, Table 4, Reference PQ OETF.
+static const float kPqM1 = 2610.0f / 16384.0f, kPqM2 = 2523.0f / 4096.0f * 128.0f;
+static const float kPqC1 = 3424.0f / 4096.0f, kPqC2 = 2413.0f / 4096.0f * 32.0f,
+                   kPqC3 = 2392.0f / 4096.0f * 32.0f;
+
+float pqOetf(float e) {
+  if (e <= 0.0f) return 0.0f;
+  return pow((kPqC1 + kPqC2 * pow(e, kPqM1)) / (1 + kPqC3 * pow(e, kPqM1)),
+             kPqM2);
+}
+
+Color pqOetf(Color e) {
+  return {{{ pqOetf(e.r), pqOetf(e.g), pqOetf(e.b) }}};
+}
+
+float pqOetfLUT(float e) {
+  uint32_t value = static_cast<uint32_t>(e * kPqOETFNumEntries);
+  //TODO() : Remove once conversion modules have appropriate clamping in place
+  value = CLIP3(value, 0, kPqOETFNumEntries - 1);
+
+  return kPqOETF[value];
+}
+
+Color pqOetfLUT(Color e) {
+  return {{{ pqOetfLUT(e.r), pqOetfLUT(e.g), pqOetfLUT(e.b) }}};
+}
+
+// Derived from the inverse of the Reference PQ OETF.
+static const float kPqInvA = 128.0f, kPqInvB = 107.0f, kPqInvC = 2413.0f, kPqInvD = 2392.0f,
+                   kPqInvE = 6.2773946361f, kPqInvF = 0.0126833f;
+
+float pqInvOetf(float e_gamma) {
+  // This equation blows up if e_gamma is 0.0, and checking on <= 0.0 doesn't
+  // always catch 0.0. So, check on 0.0001, since anything this small will
+  // effectively be crushed to zero anyways.
+  if (e_gamma <= 0.0001f) return 0.0f;
+  return pow((kPqInvA * pow(e_gamma, kPqInvF) - kPqInvB)
+           / (kPqInvC - kPqInvD * pow(e_gamma, kPqInvF)),
+             kPqInvE);
+}
+
+Color pqInvOetf(Color e_gamma) {
+  return {{{ pqInvOetf(e_gamma.r),
+             pqInvOetf(e_gamma.g),
+             pqInvOetf(e_gamma.b) }}};
+}
+
+float pqInvOetfLUT(float e_gamma) {
+  uint32_t value = static_cast<uint32_t>(e_gamma * kPqInvOETFNumEntries);
+  //TODO() : Remove once conversion modules have appropriate clamping in place
+  value = CLIP3(value, 0, kPqInvOETFNumEntries - 1);
+
+  return kPqInvOETF[value];
+}
+
+Color pqInvOetfLUT(Color e_gamma) {
+  return {{{ pqInvOetfLUT(e_gamma.r),
+             pqInvOetfLUT(e_gamma.g),
+             pqInvOetfLUT(e_gamma.b) }}};
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Color conversions
+
+Color bt709ToP3(Color e) {
+ return {{{ 0.82254f * e.r + 0.17755f * e.g + 0.00006f * e.b,
+            0.03312f * e.r + 0.96684f * e.g + -0.00001f * e.b,
+            0.01706f * e.r + 0.07240f * e.g + 0.91049f * e.b }}};
+}
+
+Color bt709ToBt2100(Color e) {
+ return {{{ 0.62740f * e.r + 0.32930f * e.g + 0.04332f * e.b,
+            0.06904f * e.r + 0.91958f * e.g + 0.01138f * e.b,
+            0.01636f * e.r + 0.08799f * e.g + 0.89555f * e.b }}};
+}
+
+Color p3ToBt709(Color e) {
+ return {{{ 1.22482f * e.r + -0.22490f * e.g + -0.00007f * e.b,
+            -0.04196f * e.r + 1.04199f * e.g + 0.00001f * e.b,
+            -0.01961f * e.r + -0.07865f * e.g + 1.09831f * e.b }}};
+}
+
+Color p3ToBt2100(Color e) {
+ return {{{ 0.75378f * e.r + 0.19862f * e.g + 0.04754f * e.b,
+            0.04576f * e.r + 0.94177f * e.g + 0.01250f * e.b,
+            -0.00121f * e.r + 0.01757f * e.g + 0.98359f * e.b }}};
+}
+
+Color bt2100ToBt709(Color e) {
+ return {{{ 1.66045f * e.r + -0.58764f * e.g + -0.07286f * e.b,
+            -0.12445f * e.r + 1.13282f * e.g + -0.00837f * e.b,
+            -0.01811f * e.r + -0.10057f * e.g + 1.11878f * e.b }}};
+}
+
+Color bt2100ToP3(Color e) {
+ return {{{ 1.34369f * e.r + -0.28223f * e.g + -0.06135f * e.b,
+            -0.06533f * e.r + 1.07580f * e.g + -0.01051f * e.b,
+            0.00283f * e.r + -0.01957f * e.g + 1.01679f * e.b
+ }}};
+}
+
+// TODO: confirm we always want to convert like this before calculating
+// luminance.
+ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut,
+                                    ultrahdr_color_gamut hdr_gamut) {
+  switch (sdr_gamut) {
+    case ULTRAHDR_COLORGAMUT_BT709:
+      switch (hdr_gamut) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+          return identityConversion;
+        case ULTRAHDR_COLORGAMUT_P3:
+          return p3ToBt709;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+          return bt2100ToBt709;
+        case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+          return nullptr;
+      }
+      break;
+    case ULTRAHDR_COLORGAMUT_P3:
+      switch (hdr_gamut) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+          return bt709ToP3;
+        case ULTRAHDR_COLORGAMUT_P3:
+          return identityConversion;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+          return bt2100ToP3;
+        case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+          return nullptr;
+      }
+      break;
+    case ULTRAHDR_COLORGAMUT_BT2100:
+      switch (hdr_gamut) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+          return bt709ToBt2100;
+        case ULTRAHDR_COLORGAMUT_P3:
+          return p3ToBt2100;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+          return identityConversion;
+        case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+          return nullptr;
+      }
+      break;
+    case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+      return nullptr;
+  }
+}
+
+// All of these conversions are derived from the respective input YUV->RGB conversion followed by
+// the RGB->YUV for the receiving encoding. They are consistent with the RGB<->YUV functions in this
+// file, given that we uses BT.709 encoding for sRGB and BT.601 encoding for Display-P3, to match
+// DataSpace.
+
+Color yuv709To601(Color e_gamma) {
+  return {{{ 1.0f * e_gamma.y +  0.101579f * e_gamma.u +  0.196076f * e_gamma.v,
+             0.0f * e_gamma.y +  0.989854f * e_gamma.u + -0.110653f * e_gamma.v,
+             0.0f * e_gamma.y + -0.072453f * e_gamma.u +  0.983398f * e_gamma.v }}};
+}
+
+Color yuv709To2100(Color e_gamma) {
+  return {{{ 1.0f * e_gamma.y + -0.016969f * e_gamma.u +  0.096312f * e_gamma.v,
+             0.0f * e_gamma.y +  0.995306f * e_gamma.u + -0.051192f * e_gamma.v,
+             0.0f * e_gamma.y +  0.011507f * e_gamma.u +  1.002637f * e_gamma.v }}};
+}
+
+Color yuv601To709(Color e_gamma) {
+  return {{{ 1.0f * e_gamma.y + -0.118188f * e_gamma.u + -0.212685f * e_gamma.v,
+             0.0f * e_gamma.y +  1.018640f * e_gamma.u +  0.114618f * e_gamma.v,
+             0.0f * e_gamma.y +  0.075049f * e_gamma.u +  1.025327f * e_gamma.v }}};
+}
+
+Color yuv601To2100(Color e_gamma) {
+  return {{{ 1.0f * e_gamma.y + -0.128245f * e_gamma.u + -0.115879f * e_gamma.v,
+             0.0f * e_gamma.y +  1.010016f * e_gamma.u +  0.061592f * e_gamma.v,
+             0.0f * e_gamma.y +  0.086969f * e_gamma.u +  1.029350f * e_gamma.v }}};
+}
+
+Color yuv2100To709(Color e_gamma) {
+  return {{{ 1.0f * e_gamma.y +  0.018149f * e_gamma.u + -0.095132f * e_gamma.v,
+             0.0f * e_gamma.y +  1.004123f * e_gamma.u +  0.051267f * e_gamma.v,
+             0.0f * e_gamma.y + -0.011524f * e_gamma.u +  0.996782f * e_gamma.v }}};
+}
+
+Color yuv2100To601(Color e_gamma) {
+  return {{{ 1.0f * e_gamma.y +  0.117887f * e_gamma.u +  0.105521f * e_gamma.v,
+             0.0f * e_gamma.y +  0.995211f * e_gamma.u + -0.059549f * e_gamma.v,
+             0.0f * e_gamma.y + -0.084085f * e_gamma.u +  0.976518f * e_gamma.v }}};
+}
+
+void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
+                     ColorTransformFn fn) {
+  Color yuv1 = getYuv420Pixel(image, x_chroma * 2,     y_chroma * 2    );
+  Color yuv2 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2    );
+  Color yuv3 = getYuv420Pixel(image, x_chroma * 2,     y_chroma * 2 + 1);
+  Color yuv4 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 + 1);
+
+  yuv1 = fn(yuv1);
+  yuv2 = fn(yuv2);
+  yuv3 = fn(yuv3);
+  yuv4 = fn(yuv4);
+
+  Color new_uv = (yuv1 + yuv2 + yuv3 + yuv4) / 4.0f;
+
+  size_t pixel_y1_idx =  x_chroma * 2      +  y_chroma * 2      * image->width;
+  size_t pixel_y2_idx = (x_chroma * 2 + 1) +  y_chroma * 2      * image->width;
+  size_t pixel_y3_idx =  x_chroma * 2      + (y_chroma * 2 + 1) * image->width;
+  size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->width;
+
+  uint8_t& y1_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y1_idx];
+  uint8_t& y2_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y2_idx];
+  uint8_t& y3_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y3_idx];
+  uint8_t& y4_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y4_idx];
+
+  size_t pixel_count = image->width * image->height;
+  size_t pixel_uv_idx = x_chroma + y_chroma * (image->width / 2);
+
+  uint8_t& u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
+  uint8_t& v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
+
+  y1_uint = static_cast<uint8_t>(floor(yuv1.y * 255.0f + 0.5f));
+  y2_uint = static_cast<uint8_t>(floor(yuv2.y * 255.0f + 0.5f));
+  y3_uint = static_cast<uint8_t>(floor(yuv3.y * 255.0f + 0.5f));
+  y4_uint = static_cast<uint8_t>(floor(yuv4.y * 255.0f + 0.5f));
+
+  u_uint = static_cast<uint8_t>(floor(new_uv.u * 255.0f + 128.0f + 0.5f));
+  v_uint = static_cast<uint8_t>(floor(new_uv.v * 255.0f + 128.0f + 0.5f));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Gain map calculations
+uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata) {
+  return encodeGain(y_sdr, y_hdr, metadata,
+                    log2(metadata->minContentBoost), log2(metadata->maxContentBoost));
+}
+
+uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata,
+                   float log2MinContentBoost, float log2MaxContentBoost) {
+  float gain = 1.0f;
+  if (y_sdr > 0.0f) {
+    gain = y_hdr / y_sdr;
+  }
+
+  if (gain < metadata->minContentBoost) gain = metadata->minContentBoost;
+  if (gain > metadata->maxContentBoost) gain = metadata->maxContentBoost;
+
+  return static_cast<uint8_t>((log2(gain) - log2MinContentBoost)
+                            / (log2MaxContentBoost - log2MinContentBoost)
+                            * 255.0f);
+}
+
+Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata) {
+  float logBoost = log2(metadata->minContentBoost) * (1.0f - gain)
+                 + log2(metadata->maxContentBoost) * gain;
+  float gainFactor = exp2(logBoost);
+  return e * gainFactor;
+}
+
+Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost) {
+  float logBoost = log2(metadata->minContentBoost) * (1.0f - gain)
+                 + log2(metadata->maxContentBoost) * gain;
+  float gainFactor = exp2(logBoost * displayBoost / metadata->maxContentBoost);
+  return e * gainFactor;
+}
+
+Color applyGainLUT(Color e, float gain, GainLUT& gainLUT) {
+  float gainFactor = gainLUT.getGainFactor(gain);
+  return e * gainFactor;
+}
+
+Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y) {
+  size_t pixel_count = image->width * image->height;
+
+  size_t pixel_y_idx = x + y * image->width;
+  size_t pixel_uv_idx = x / 2 + (y / 2) * (image->width / 2);
+
+  uint8_t y_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y_idx];
+  uint8_t u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
+  uint8_t v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
+
+  // 128 bias for UV given we are using jpeglib; see:
+  // https://github.com/kornelski/libjpeg/blob/master/structure.doc
+  return {{{ static_cast<float>(y_uint) / 255.0f,
+             (static_cast<float>(u_uint) - 128.0f) / 255.0f,
+             (static_cast<float>(v_uint) - 128.0f) / 255.0f }}};
+}
+
+Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y) {
+  size_t luma_stride = image->luma_stride;
+  size_t chroma_stride = image->chroma_stride;
+  uint16_t* luma_data = reinterpret_cast<uint16_t*>(image->data);
+  uint16_t* chroma_data = reinterpret_cast<uint16_t*>(image->chroma_data);
+
+  if (luma_stride == 0) {
+    luma_stride = image->width;
+  }
+  if (chroma_stride == 0) {
+    chroma_stride = luma_stride;
+  }
+  if (chroma_data == nullptr) {
+    chroma_data = &reinterpret_cast<uint16_t*>(image->data)[luma_stride * image->height];
+  }
+
+  size_t pixel_y_idx = y * luma_stride + x;
+  size_t pixel_u_idx = (y >> 1) * chroma_stride + (x & ~0x1);
+  size_t pixel_v_idx = pixel_u_idx + 1;
+
+  uint16_t y_uint = luma_data[pixel_y_idx] >> 6;
+  uint16_t u_uint = chroma_data[pixel_u_idx] >> 6;
+  uint16_t v_uint = chroma_data[pixel_v_idx] >> 6;
+
+  // Conversions include taking narrow-range into account.
+  return {{{ (static_cast<float>(y_uint) - 64.0f) / 876.0f,
+             (static_cast<float>(u_uint) - 64.0f) / 896.0f - 0.5f,
+             (static_cast<float>(v_uint) - 64.0f) / 896.0f - 0.5f }}};
+}
+
+typedef Color (*getPixelFn)(jr_uncompressed_ptr, size_t, size_t);
+
+static Color samplePixels(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y,
+                          getPixelFn get_pixel_fn) {
+  Color e = {{{ 0.0f, 0.0f, 0.0f }}};
+  for (size_t dy = 0; dy < map_scale_factor; ++dy) {
+    for (size_t dx = 0; dx < map_scale_factor; ++dx) {
+      e += get_pixel_fn(image, x * map_scale_factor + dx, y * map_scale_factor + dy);
+    }
+  }
+
+  return e / static_cast<float>(map_scale_factor * map_scale_factor);
+}
+
+Color sampleYuv420(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) {
+  return samplePixels(image, map_scale_factor, x, y, getYuv420Pixel);
+}
+
+Color sampleP010(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) {
+  return samplePixels(image, map_scale_factor, x, y, getP010Pixel);
+}
+
+// TODO: do we need something more clever for filtering either the map or images
+// to generate the map?
+
+static size_t clamp(const size_t& val, const size_t& low, const size_t& high) {
+  return val < low ? low : (high < val ? high : val);
+}
+
+static float mapUintToFloat(uint8_t map_uint) {
+  return static_cast<float>(map_uint) / 255.0f;
+}
+
+static float pythDistance(float x_diff, float y_diff) {
+  return sqrt(pow(x_diff, 2.0f) + pow(y_diff, 2.0f));
+}
+
+// TODO: If map_scale_factor is guaranteed to be an integer, then remove the following.
+float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y) {
+  float x_map = static_cast<float>(x) / map_scale_factor;
+  float y_map = static_cast<float>(y) / map_scale_factor;
+
+  size_t x_lower = static_cast<size_t>(floor(x_map));
+  size_t x_upper = x_lower + 1;
+  size_t y_lower = static_cast<size_t>(floor(y_map));
+  size_t y_upper = y_lower + 1;
+
+  x_lower = clamp(x_lower, 0, map->width - 1);
+  x_upper = clamp(x_upper, 0, map->width - 1);
+  y_lower = clamp(y_lower, 0, map->height - 1);
+  y_upper = clamp(y_upper, 0, map->height - 1);
+
+  // Use Shepard's method for inverse distance weighting. For more information:
+  // en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method
+
+  float e1 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_lower * map->width]);
+  float e1_dist = pythDistance(x_map - static_cast<float>(x_lower),
+                               y_map - static_cast<float>(y_lower));
+  if (e1_dist == 0.0f) return e1;
+
+  float e2 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_upper * map->width]);
+  float e2_dist = pythDistance(x_map - static_cast<float>(x_lower),
+                               y_map - static_cast<float>(y_upper));
+  if (e2_dist == 0.0f) return e2;
+
+  float e3 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_lower * map->width]);
+  float e3_dist = pythDistance(x_map - static_cast<float>(x_upper),
+                               y_map - static_cast<float>(y_lower));
+  if (e3_dist == 0.0f) return e3;
+
+  float e4 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_upper * map->width]);
+  float e4_dist = pythDistance(x_map - static_cast<float>(x_upper),
+                               y_map - static_cast<float>(y_upper));
+  if (e4_dist == 0.0f) return e2;
+
+  float e1_weight = 1.0f / e1_dist;
+  float e2_weight = 1.0f / e2_dist;
+  float e3_weight = 1.0f / e3_dist;
+  float e4_weight = 1.0f / e4_dist;
+  float total_weight = e1_weight + e2_weight + e3_weight + e4_weight;
+
+  return e1 * (e1_weight / total_weight)
+       + e2 * (e2_weight / total_weight)
+       + e3 * (e3_weight / total_weight)
+       + e4 * (e4_weight / total_weight);
+}
+
+float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y,
+                ShepardsIDW& weightTables) {
+  // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the
+  // following by computing log2(map_scale_factor) once and then using >> log2(map_scale_factor)
+  int x_lower = x / map_scale_factor;
+  int x_upper = x_lower + 1;
+  int y_lower = y / map_scale_factor;
+  int y_upper = y_lower + 1;
+
+  x_lower = std::min(x_lower, map->width - 1);
+  x_upper = std::min(x_upper, map->width - 1);
+  y_lower = std::min(y_lower, map->height - 1);
+  y_upper = std::min(y_upper, map->height - 1);
+
+  float e1 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_lower * map->width]);
+  float e2 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_upper * map->width]);
+  float e3 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_lower * map->width]);
+  float e4 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_upper * map->width]);
+
+  // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the
+  // following by using & (map_scale_factor - 1)
+  int offset_x = x % map_scale_factor;
+  int offset_y = y % map_scale_factor;
+
+  float* weights = weightTables.mWeights;
+  if (x_lower == x_upper && y_lower == y_upper) weights = weightTables.mWeightsC;
+  else if (x_lower == x_upper) weights = weightTables.mWeightsNR;
+  else if (y_lower == y_upper) weights = weightTables.mWeightsNB;
+  weights += offset_y * map_scale_factor * 4 + offset_x * 4;
+
+  return e1 * weights[0] + e2 * weights[1] + e3 * weights[2] + e4 * weights[3];
+}
+
+uint32_t colorToRgba1010102(Color e_gamma) {
+  return (0x3ff & static_cast<uint32_t>(e_gamma.r * 1023.0f))
+       | ((0x3ff & static_cast<uint32_t>(e_gamma.g * 1023.0f)) << 10)
+       | ((0x3ff & static_cast<uint32_t>(e_gamma.b * 1023.0f)) << 20)
+       | (0x3 << 30);  // Set alpha to 1.0
+}
+
+uint64_t colorToRgbaF16(Color e_gamma) {
+  return (uint64_t) floatToHalf(e_gamma.r)
+       | (((uint64_t) floatToHalf(e_gamma.g)) << 16)
+       | (((uint64_t) floatToHalf(e_gamma.b)) << 32)
+       | (((uint64_t) floatToHalf(1.0f)) << 48);
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/icc.cpp b/libs/ultrahdr/icc.cpp
new file mode 100644
index 0000000..1ab3c7c
--- /dev/null
+++ b/libs/ultrahdr/icc.cpp
@@ -0,0 +1,677 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef USE_BIG_ENDIAN
+#define USE_BIG_ENDIAN true
+#endif
+
+#include <ultrahdr/icc.h>
+#include <ultrahdr/gainmapmath.h>
+#include <vector>
+#include <utils/Log.h>
+
+#ifndef FLT_MAX
+#define FLT_MAX 0x1.fffffep127f
+#endif
+
+namespace android::ultrahdr {
+static void Matrix3x3_apply(const Matrix3x3* m, float* x) {
+    float y0 = x[0] * m->vals[0][0] + x[1] * m->vals[0][1] + x[2] * m->vals[0][2];
+    float y1 = x[0] * m->vals[1][0] + x[1] * m->vals[1][1] + x[2] * m->vals[1][2];
+    float y2 = x[0] * m->vals[2][0] + x[1] * m->vals[2][1] + x[2] * m->vals[2][2];
+    x[0] = y0;
+    x[1] = y1;
+    x[2] = y2;
+}
+
+bool Matrix3x3_invert(const Matrix3x3* src, Matrix3x3* dst) {
+    double a00 = src->vals[0][0],
+           a01 = src->vals[1][0],
+           a02 = src->vals[2][0],
+           a10 = src->vals[0][1],
+           a11 = src->vals[1][1],
+           a12 = src->vals[2][1],
+           a20 = src->vals[0][2],
+           a21 = src->vals[1][2],
+           a22 = src->vals[2][2];
+
+    double b0 = a00*a11 - a01*a10,
+           b1 = a00*a12 - a02*a10,
+           b2 = a01*a12 - a02*a11,
+           b3 = a20,
+           b4 = a21,
+           b5 = a22;
+
+    double determinant = b0*b5
+                       - b1*b4
+                       + b2*b3;
+
+    if (determinant == 0) {
+        return false;
+    }
+
+    double invdet = 1.0 / determinant;
+    if (invdet > +FLT_MAX || invdet < -FLT_MAX || !isfinitef_((float)invdet)) {
+        return false;
+    }
+
+    b0 *= invdet;
+    b1 *= invdet;
+    b2 *= invdet;
+    b3 *= invdet;
+    b4 *= invdet;
+    b5 *= invdet;
+
+    dst->vals[0][0] = (float)( a11*b5 - a12*b4 );
+    dst->vals[1][0] = (float)( a02*b4 - a01*b5 );
+    dst->vals[2][0] = (float)(        +     b2 );
+    dst->vals[0][1] = (float)( a12*b3 - a10*b5 );
+    dst->vals[1][1] = (float)( a00*b5 - a02*b3 );
+    dst->vals[2][1] = (float)(        -     b1 );
+    dst->vals[0][2] = (float)( a10*b4 - a11*b3 );
+    dst->vals[1][2] = (float)( a01*b3 - a00*b4 );
+    dst->vals[2][2] = (float)(        +     b0 );
+
+    for (int r = 0; r < 3; ++r)
+    for (int c = 0; c < 3; ++c) {
+        if (!isfinitef_(dst->vals[r][c])) {
+            return false;
+        }
+    }
+    return true;
+}
+
+static Matrix3x3 Matrix3x3_concat(const Matrix3x3* A, const Matrix3x3* B) {
+    Matrix3x3 m = { { { 0,0,0 },{ 0,0,0 },{ 0,0,0 } } };
+    for (int r = 0; r < 3; r++)
+        for (int c = 0; c < 3; c++) {
+            m.vals[r][c] = A->vals[r][0] * B->vals[0][c]
+                         + A->vals[r][1] * B->vals[1][c]
+                         + A->vals[r][2] * B->vals[2][c];
+        }
+    return m;
+}
+
+static void float_XYZD50_to_grid16_lab(const float* xyz_float, uint8_t* grid16_lab) {
+    float v[3] = {
+            xyz_float[0] / kD50_x,
+            xyz_float[1] / kD50_y,
+            xyz_float[2] / kD50_z,
+    };
+    for (size_t i = 0; i < 3; ++i) {
+        v[i] = v[i] > 0.008856f ? cbrtf(v[i]) : v[i] * 7.787f + (16 / 116.0f);
+    }
+    const float L = v[1] * 116.0f - 16.0f;
+    const float a = (v[0] - v[1]) * 500.0f;
+    const float b = (v[1] - v[2]) * 200.0f;
+    const float Lab_unorm[3] = {
+            L * (1 / 100.f),
+            (a + 128.0f) * (1 / 255.0f),
+            (b + 128.0f) * (1 / 255.0f),
+    };
+    // This will encode L=1 as 0xFFFF. This matches how skcms will interpret the
+    // table, but the spec appears to indicate that the value should be 0xFF00.
+    // https://crbug.com/skia/13807
+    for (size_t i = 0; i < 3; ++i) {
+        reinterpret_cast<uint16_t*>(grid16_lab)[i] =
+                Endian_SwapBE16(float_round_to_unorm16(Lab_unorm[i]));
+    }
+}
+
+std::string IccHelper::get_desc_string(const ultrahdr_transfer_function tf,
+                                       const ultrahdr_color_gamut gamut) {
+    std::string result;
+    switch (gamut) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+            result += "sRGB";
+            break;
+        case ULTRAHDR_COLORGAMUT_P3:
+            result += "Display P3";
+            break;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+            result += "Rec2020";
+            break;
+        default:
+            result += "Unknown";
+            break;
+    }
+    result += " Gamut with ";
+    switch (tf) {
+        case ULTRAHDR_TF_SRGB:
+            result += "sRGB";
+            break;
+        case ULTRAHDR_TF_LINEAR:
+            result += "Linear";
+            break;
+        case ULTRAHDR_TF_PQ:
+            result += "PQ";
+            break;
+        case ULTRAHDR_TF_HLG:
+            result += "HLG";
+            break;
+        default:
+            result += "Unknown";
+            break;
+    }
+    result += " Transfer";
+    return result;
+}
+
+sp<DataStruct> IccHelper::write_text_tag(const char* text) {
+    uint32_t text_length = strlen(text);
+    uint32_t header[] = {
+            Endian_SwapBE32(kTAG_TextType),                         // Type signature
+            0,                                                      // Reserved
+            Endian_SwapBE32(1),                                     // Number of records
+            Endian_SwapBE32(12),                                    // Record size (must be 12)
+            Endian_SwapBE32(SetFourByteTag('e', 'n', 'U', 'S')),    // English USA
+            Endian_SwapBE32(2 * text_length),                       // Length of string in bytes
+            Endian_SwapBE32(28),                                    // Offset of string
+    };
+
+    uint32_t total_length = text_length * 2 + sizeof(header);
+    total_length = (((total_length + 2) >> 2) << 2);  // 4 aligned
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
+
+    if (!dataStruct->write(header, sizeof(header))) {
+        ALOGE("write_text_tag(): error in writing data");
+        return dataStruct;
+    }
+
+    for (size_t i = 0; i < text_length; i++) {
+        // Convert ASCII to big-endian UTF-16.
+        dataStruct->write8(0);
+        dataStruct->write8(text[i]);
+    }
+
+    return dataStruct;
+}
+
+sp<DataStruct> IccHelper::write_xyz_tag(float x, float y, float z) {
+    uint32_t data[] = {
+            Endian_SwapBE32(kXYZ_PCSSpace),
+            0,
+            static_cast<uint32_t>(Endian_SwapBE32(float_round_to_fixed(x))),
+            static_cast<uint32_t>(Endian_SwapBE32(float_round_to_fixed(y))),
+            static_cast<uint32_t>(Endian_SwapBE32(float_round_to_fixed(z))),
+    };
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(sizeof(data));
+    dataStruct->write(&data, sizeof(data));
+    return dataStruct;
+}
+
+sp<DataStruct> IccHelper::write_trc_tag(const int table_entries, const void* table_16) {
+    int total_length = 4 + 4 + 4 + table_entries * 2;
+    total_length = (((total_length + 2) >> 2) << 2);  // 4 aligned
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
+    dataStruct->write32(Endian_SwapBE32(kTAG_CurveType));     // Type
+    dataStruct->write32(0);                                     // Reserved
+    dataStruct->write32(Endian_SwapBE32(table_entries));  // Value count
+    for (size_t i = 0; i < table_entries; ++i) {
+        uint16_t value = reinterpret_cast<const uint16_t*>(table_16)[i];
+        dataStruct->write16(value);
+    }
+    return dataStruct;
+}
+
+sp<DataStruct> IccHelper::write_trc_tag_for_linear() {
+    int total_length = 16;
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
+    dataStruct->write32(Endian_SwapBE32(kTAG_ParaCurveType));  // Type
+    dataStruct->write32(0);                                      // Reserved
+    dataStruct->write32(Endian_SwapBE16(kExponential_ParaCurveType));
+    dataStruct->write32(Endian_SwapBE32(float_round_to_fixed(1.0)));
+
+    return dataStruct;
+}
+
+float IccHelper::compute_tone_map_gain(const ultrahdr_transfer_function tf, float L) {
+    if (L <= 0.f) {
+        return 1.f;
+    }
+    if (tf == ULTRAHDR_TF_PQ) {
+        // The PQ transfer function will map to the range [0, 1]. Linearly scale
+        // it up to the range [0, 10,000/203]. We will then tone map that back
+        // down to [0, 1].
+        constexpr float kInputMaxLuminance = 10000 / 203.f;
+        constexpr float kOutputMaxLuminance = 1.0;
+        L *= kInputMaxLuminance;
+
+        // Compute the tone map gain which will tone map from 10,000/203 to 1.0.
+        constexpr float kToneMapA = kOutputMaxLuminance / (kInputMaxLuminance * kInputMaxLuminance);
+        constexpr float kToneMapB = 1.f / kOutputMaxLuminance;
+        return kInputMaxLuminance * (1.f + kToneMapA * L) / (1.f + kToneMapB * L);
+    }
+    if (tf == ULTRAHDR_TF_HLG) {
+        // Let Lw be the brightness of the display in nits.
+        constexpr float Lw = 203.f;
+        const float gamma = 1.2f + 0.42f * std::log(Lw / 1000.f) / std::log(10.f);
+        return std::pow(L, gamma - 1.f);
+    }
+    return 1.f;
+}
+
+sp<DataStruct> IccHelper::write_cicp_tag(uint32_t color_primaries,
+                                         uint32_t transfer_characteristics) {
+    int total_length = 12;  // 4 + 4 + 1 + 1 + 1 + 1
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
+    dataStruct->write32(Endian_SwapBE32(kTAG_cicp));    // Type signature
+    dataStruct->write32(0);                             // Reserved
+    dataStruct->write8(color_primaries);                // Color primaries
+    dataStruct->write8(transfer_characteristics);       // Transfer characteristics
+    dataStruct->write8(0);                              // RGB matrix
+    dataStruct->write8(1);                              // Full range
+    return dataStruct;
+}
+
+void IccHelper::compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]) {
+    // Compute the matrices to convert from source to Rec2020, and from Rec2020 to XYZD50.
+    Matrix3x3 src_to_rec2020;
+    const Matrix3x3 rec2020_to_XYZD50 = kRec2020;
+    {
+        Matrix3x3 XYZD50_to_rec2020;
+        Matrix3x3_invert(&rec2020_to_XYZD50, &XYZD50_to_rec2020);
+        src_to_rec2020 = Matrix3x3_concat(&XYZD50_to_rec2020, &src_to_XYZD50);
+    }
+
+    // Convert the source signal to linear.
+    for (size_t i = 0; i < kNumChannels; ++i) {
+        rgb[i] = pqOetf(rgb[i]);
+    }
+
+    // Convert source gamut to Rec2020.
+    Matrix3x3_apply(&src_to_rec2020, rgb);
+
+    // Compute the luminance of the signal.
+    float L = bt2100Luminance({{{rgb[0], rgb[1], rgb[2]}}});
+
+    // Compute the tone map gain based on the luminance.
+    float tone_map_gain = compute_tone_map_gain(ULTRAHDR_TF_PQ, L);
+
+    // Apply the tone map gain.
+    for (size_t i = 0; i < kNumChannels; ++i) {
+        rgb[i] *= tone_map_gain;
+    }
+
+    // Convert from Rec2020-linear to XYZD50.
+    Matrix3x3_apply(&rec2020_to_XYZD50, rgb);
+}
+
+sp<DataStruct> IccHelper::write_clut(const uint8_t* grid_points, const uint8_t* grid_16) {
+    uint32_t value_count = kNumChannels;
+    for (uint32_t i = 0; i < kNumChannels; ++i) {
+        value_count *= grid_points[i];
+    }
+
+    int total_length = 20 + 2 * value_count;
+    total_length = (((total_length + 2) >> 2) << 2);  // 4 aligned
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
+
+    for (size_t i = 0; i < 16; ++i) {
+        dataStruct->write8(i < kNumChannels ? grid_points[i] : 0);  // Grid size
+    }
+    dataStruct->write8(2);  // Grid byte width (always 16-bit)
+    dataStruct->write8(0);  // Reserved
+    dataStruct->write8(0);  // Reserved
+    dataStruct->write8(0);  // Reserved
+
+    for (uint32_t i = 0; i < value_count; ++i) {
+        uint16_t value = reinterpret_cast<const uint16_t*>(grid_16)[i];
+        dataStruct->write16(value);
+    }
+
+    return dataStruct;
+}
+
+sp<DataStruct> IccHelper::write_mAB_or_mBA_tag(uint32_t type,
+                                               bool has_a_curves,
+                                               const uint8_t* grid_points,
+                                               const uint8_t* grid_16) {
+    const size_t b_curves_offset = 32;
+    sp<DataStruct> b_curves_data[kNumChannels];
+    sp<DataStruct> a_curves_data[kNumChannels];
+    size_t clut_offset = 0;
+    sp<DataStruct> clut;
+    size_t a_curves_offset = 0;
+
+    // The "B" curve is required.
+    for (size_t i = 0; i < kNumChannels; ++i) {
+        b_curves_data[i] = write_trc_tag_for_linear();
+    }
+
+    // The "A" curve and CLUT are optional.
+    if (has_a_curves) {
+        clut_offset = b_curves_offset;
+        for (size_t i = 0; i < kNumChannels; ++i) {
+            clut_offset += b_curves_data[i]->getLength();
+        }
+        clut = write_clut(grid_points, grid_16);
+
+        a_curves_offset = clut_offset + clut->getLength();
+        for (size_t i = 0; i < kNumChannels; ++i) {
+            a_curves_data[i] = write_trc_tag_for_linear();
+        }
+    }
+
+    int total_length = b_curves_offset;
+    for (size_t i = 0; i < kNumChannels; ++i) {
+        total_length += b_curves_data[i]->getLength();
+    }
+    if (has_a_curves) {
+        total_length += clut->getLength();
+        for (size_t i = 0; i < kNumChannels; ++i) {
+            total_length += a_curves_data[i]->getLength();
+        }
+    }
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(total_length);
+    dataStruct->write32(Endian_SwapBE32(type));             // Type signature
+    dataStruct->write32(0);                                 // Reserved
+    dataStruct->write8(kNumChannels);                       // Input channels
+    dataStruct->write8(kNumChannels);                       // Output channels
+    dataStruct->write16(0);                                 // Reserved
+    dataStruct->write32(Endian_SwapBE32(b_curves_offset));  // B curve offset
+    dataStruct->write32(Endian_SwapBE32(0));                // Matrix offset (ignored)
+    dataStruct->write32(Endian_SwapBE32(0));                // M curve offset (ignored)
+    dataStruct->write32(Endian_SwapBE32(clut_offset));      // CLUT offset
+    dataStruct->write32(Endian_SwapBE32(a_curves_offset));  // A curve offset
+    for (size_t i = 0; i < kNumChannels; ++i) {
+        if (dataStruct->write(b_curves_data[i]->getData(), b_curves_data[i]->getLength())) {
+            return dataStruct;
+        }
+    }
+    if (has_a_curves) {
+        dataStruct->write(clut->getData(), clut->getLength());
+        for (size_t i = 0; i < kNumChannels; ++i) {
+            dataStruct->write(a_curves_data[i]->getData(), a_curves_data[i]->getLength());
+        }
+    }
+    return dataStruct;
+}
+
+sp<DataStruct> IccHelper::writeIccProfile(ultrahdr_transfer_function tf,
+                                          ultrahdr_color_gamut gamut) {
+    ICCHeader header;
+
+    std::vector<std::pair<uint32_t, sp<DataStruct>>> tags;
+
+    // Compute profile description tag
+    std::string desc = get_desc_string(tf, gamut);
+
+    tags.emplace_back(kTAG_desc, write_text_tag(desc.c_str()));
+
+    Matrix3x3 toXYZD50;
+    switch (gamut) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+            toXYZD50 = kSRGB;
+            break;
+        case ULTRAHDR_COLORGAMUT_P3:
+            toXYZD50 = kDisplayP3;
+            break;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+            toXYZD50 = kRec2020;
+            break;
+        default:
+            // Should not fall here.
+            return nullptr;
+    }
+
+    // Compute primaries.
+    {
+        tags.emplace_back(kTAG_rXYZ,
+                write_xyz_tag(toXYZD50.vals[0][0], toXYZD50.vals[1][0], toXYZD50.vals[2][0]));
+        tags.emplace_back(kTAG_gXYZ,
+                write_xyz_tag(toXYZD50.vals[0][1], toXYZD50.vals[1][1], toXYZD50.vals[2][1]));
+        tags.emplace_back(kTAG_bXYZ,
+                write_xyz_tag(toXYZD50.vals[0][2], toXYZD50.vals[1][2], toXYZD50.vals[2][2]));
+    }
+
+    // Compute white point tag (must be D50)
+    tags.emplace_back(kTAG_wtpt, write_xyz_tag(kD50_x, kD50_y, kD50_z));
+
+    // Compute transfer curves.
+    if (tf != ULTRAHDR_TF_PQ) {
+        if (tf == ULTRAHDR_TF_HLG) {
+            std::vector<uint8_t> trc_table;
+            trc_table.resize(kTrcTableSize * 2);
+            for (uint32_t i = 0; i < kTrcTableSize; ++i) {
+                float x = i / (kTrcTableSize - 1.f);
+                float y = hlgOetf(x);
+                y *= compute_tone_map_gain(tf, y);
+                float_to_table16(y, &trc_table[2 * i]);
+            }
+
+            tags.emplace_back(kTAG_rTRC,
+                    write_trc_tag(kTrcTableSize, reinterpret_cast<uint8_t*>(trc_table.data())));
+            tags.emplace_back(kTAG_gTRC,
+                    write_trc_tag(kTrcTableSize, reinterpret_cast<uint8_t*>(trc_table.data())));
+            tags.emplace_back(kTAG_bTRC,
+                    write_trc_tag(kTrcTableSize, reinterpret_cast<uint8_t*>(trc_table.data())));
+        } else {
+            tags.emplace_back(kTAG_rTRC, write_trc_tag_for_linear());
+            tags.emplace_back(kTAG_gTRC, write_trc_tag_for_linear());
+            tags.emplace_back(kTAG_bTRC, write_trc_tag_for_linear());
+        }
+    }
+
+    // Compute CICP.
+    if (tf == ULTRAHDR_TF_HLG || tf == ULTRAHDR_TF_PQ) {
+        // The CICP tag is present in ICC 4.4, so update the header's version.
+        header.version = Endian_SwapBE32(0x04400000);
+
+        uint32_t color_primaries = 0;
+        if (gamut == ULTRAHDR_COLORGAMUT_BT709) {
+            color_primaries = kCICPPrimariesSRGB;
+        } else if (gamut == ULTRAHDR_COLORGAMUT_P3) {
+            color_primaries = kCICPPrimariesP3;
+        }
+
+        uint32_t transfer_characteristics = 0;
+        if (tf == ULTRAHDR_TF_SRGB) {
+            transfer_characteristics = kCICPTrfnSRGB;
+        } else if (tf == ULTRAHDR_TF_LINEAR) {
+            transfer_characteristics = kCICPTrfnLinear;
+        } else if (tf == ULTRAHDR_TF_PQ) {
+            transfer_characteristics = kCICPTrfnPQ;
+        } else if (tf == ULTRAHDR_TF_HLG) {
+            transfer_characteristics = kCICPTrfnHLG;
+        }
+        tags.emplace_back(kTAG_cicp, write_cicp_tag(color_primaries, transfer_characteristics));
+    }
+
+    // Compute A2B0.
+    if (tf == ULTRAHDR_TF_PQ) {
+        std::vector<uint8_t> a2b_grid;
+        a2b_grid.resize(kGridSize * kGridSize * kGridSize * kNumChannels * 2);
+        size_t a2b_grid_index = 0;
+        for (uint32_t r_index = 0; r_index < kGridSize; ++r_index) {
+            for (uint32_t g_index = 0; g_index < kGridSize; ++g_index) {
+                for (uint32_t b_index = 0; b_index < kGridSize; ++b_index) {
+                    float rgb[3] = {
+                            r_index / (kGridSize - 1.f),
+                            g_index / (kGridSize - 1.f),
+                            b_index / (kGridSize - 1.f),
+                    };
+                    compute_lut_entry(toXYZD50, rgb);
+                    float_XYZD50_to_grid16_lab(rgb, &a2b_grid[a2b_grid_index]);
+                    a2b_grid_index += 6;
+                }
+            }
+        }
+        const uint8_t* grid_16 = reinterpret_cast<const uint8_t*>(a2b_grid.data());
+
+        uint8_t grid_points[kNumChannels];
+        for (size_t i = 0; i < kNumChannels; ++i) {
+            grid_points[i] = kGridSize;
+        }
+
+        auto a2b_data = write_mAB_or_mBA_tag(kTAG_mABType,
+                                             /* has_a_curves */ true,
+                                             grid_points,
+                                             grid_16);
+        tags.emplace_back(kTAG_A2B0, std::move(a2b_data));
+    }
+
+    // Compute B2A0.
+    if (tf == ULTRAHDR_TF_PQ) {
+        auto b2a_data = write_mAB_or_mBA_tag(kTAG_mBAType,
+                                             /* has_a_curves */ false,
+                                             /* grid_points */ nullptr,
+                                             /* grid_16 */ nullptr);
+        tags.emplace_back(kTAG_B2A0, std::move(b2a_data));
+    }
+
+    // Compute copyright tag
+    tags.emplace_back(kTAG_cprt, write_text_tag("Google Inc. 2022"));
+
+    // Compute the size of the profile.
+    size_t tag_data_size = 0;
+    for (const auto& tag : tags) {
+        tag_data_size += tag.second->getLength();
+    }
+    size_t tag_table_size = kICCTagTableEntrySize * tags.size();
+    size_t profile_size = kICCHeaderSize + tag_table_size + tag_data_size;
+
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(profile_size + kICCIdentifierSize);
+
+    // Write identifier, chunk count, and chunk ID
+    if (!dataStruct->write(kICCIdentifier, sizeof(kICCIdentifier)) ||
+        !dataStruct->write8(1) || !dataStruct->write8(1)) {
+        ALOGE("writeIccProfile(): error in identifier");
+        return dataStruct;
+    }
+
+    // Write the header.
+    header.data_color_space = Endian_SwapBE32(Signature_RGB);
+    header.pcs = Endian_SwapBE32(tf == ULTRAHDR_TF_PQ ? Signature_Lab : Signature_XYZ);
+    header.size = Endian_SwapBE32(profile_size);
+    header.tag_count = Endian_SwapBE32(tags.size());
+
+    if (!dataStruct->write(&header, sizeof(header))) {
+        ALOGE("writeIccProfile(): error in header");
+        return dataStruct;
+    }
+
+    // Write the tag table. Track the offset and size of the previous tag to
+    // compute each tag's offset. An empty SkData indicates that the previous
+    // tag is to be reused.
+    uint32_t last_tag_offset = sizeof(header) + tag_table_size;
+    uint32_t last_tag_size = 0;
+    for (const auto& tag : tags) {
+        last_tag_offset = last_tag_offset + last_tag_size;
+        last_tag_size = tag.second->getLength();
+        uint32_t tag_table_entry[3] = {
+                Endian_SwapBE32(tag.first),
+                Endian_SwapBE32(last_tag_offset),
+                Endian_SwapBE32(last_tag_size),
+        };
+        if (!dataStruct->write(tag_table_entry, sizeof(tag_table_entry))) {
+            ALOGE("writeIccProfile(): error in writing tag table");
+            return dataStruct;
+        }
+    }
+
+    // Write the tags.
+    for (const auto& tag : tags) {
+        if (!dataStruct->write(tag.second->getData(), tag.second->getLength())) {
+            ALOGE("writeIccProfile(): error in writing tags");
+            return dataStruct;
+        }
+    }
+
+    return dataStruct;
+}
+
+bool IccHelper::tagsEqualToMatrix(const Matrix3x3& matrix,
+                                  const uint8_t* red_tag,
+                                  const uint8_t* green_tag,
+                                  const uint8_t* blue_tag) {
+    sp<DataStruct> red_tag_test = write_xyz_tag(matrix.vals[0][0], matrix.vals[1][0],
+                                                matrix.vals[2][0]);
+    sp<DataStruct> green_tag_test = write_xyz_tag(matrix.vals[0][1], matrix.vals[1][1],
+                                                  matrix.vals[2][1]);
+    sp<DataStruct> blue_tag_test = write_xyz_tag(matrix.vals[0][2], matrix.vals[1][2],
+                                                 matrix.vals[2][2]);
+    return memcmp(red_tag, red_tag_test->getData(), kColorantTagSize) == 0 &&
+           memcmp(green_tag, green_tag_test->getData(), kColorantTagSize) == 0 &&
+           memcmp(blue_tag, blue_tag_test->getData(), kColorantTagSize) == 0;
+}
+
+ultrahdr_color_gamut IccHelper::readIccColorGamut(void* icc_data, size_t icc_size) {
+    // Each tag table entry consists of 3 fields of 4 bytes each.
+    static const size_t kTagTableEntrySize = 12;
+
+    if (icc_data == nullptr || icc_size < sizeof(ICCHeader) + kICCIdentifierSize) {
+        return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+    }
+
+    if (memcmp(icc_data, kICCIdentifier, sizeof(kICCIdentifier)) != 0) {
+        return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+    }
+
+    uint8_t* icc_bytes = reinterpret_cast<uint8_t*>(icc_data) + kICCIdentifierSize;
+
+    ICCHeader* header = reinterpret_cast<ICCHeader*>(icc_bytes);
+
+    // Use 0 to indicate not found, since offsets are always relative to start
+    // of ICC data and therefore a tag offset of zero would never be valid.
+    size_t red_primary_offset = 0, green_primary_offset = 0, blue_primary_offset = 0;
+    size_t red_primary_size = 0, green_primary_size = 0, blue_primary_size = 0;
+    for (size_t tag_idx = 0; tag_idx < Endian_SwapBE32(header->tag_count); ++tag_idx) {
+        uint32_t* tag_entry_start = reinterpret_cast<uint32_t*>(
+            icc_bytes + sizeof(ICCHeader) + tag_idx * kTagTableEntrySize);
+        // first 4 bytes are the tag signature, next 4 bytes are the tag offset,
+        // last 4 bytes are the tag length in bytes.
+        if (red_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_rXYZ)) {
+            red_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+            red_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+        } else if (green_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_gXYZ)) {
+            green_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+            green_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+        } else if (blue_primary_offset == 0 && *tag_entry_start == Endian_SwapBE32(kTAG_bXYZ)) {
+            blue_primary_offset = Endian_SwapBE32(*(tag_entry_start+1));
+            blue_primary_size = Endian_SwapBE32(*(tag_entry_start+2));
+        }
+    }
+
+    if (red_primary_offset == 0 || red_primary_size != kColorantTagSize ||
+        kICCIdentifierSize + red_primary_offset + red_primary_size > icc_size ||
+        green_primary_offset == 0 || green_primary_size != kColorantTagSize ||
+        kICCIdentifierSize + green_primary_offset + green_primary_size > icc_size ||
+        blue_primary_offset == 0 || blue_primary_size != kColorantTagSize ||
+        kICCIdentifierSize + blue_primary_offset + blue_primary_size > icc_size) {
+        return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+    }
+
+    uint8_t* red_tag = icc_bytes + red_primary_offset;
+    uint8_t* green_tag = icc_bytes + green_primary_offset;
+    uint8_t* blue_tag = icc_bytes + blue_primary_offset;
+
+    // Serialize tags as we do on encode and compare what we find to that to
+    // determine the gamut (since we don't have a need yet for full deserialize).
+    if (tagsEqualToMatrix(kSRGB, red_tag, green_tag, blue_tag)) {
+        return ULTRAHDR_COLORGAMUT_BT709;
+    } else if (tagsEqualToMatrix(kDisplayP3, red_tag, green_tag, blue_tag)) {
+        return ULTRAHDR_COLORGAMUT_P3;
+    } else if (tagsEqualToMatrix(kRec2020, red_tag, green_tag, blue_tag)) {
+        return ULTRAHDR_COLORGAMUT_BT2100;
+    }
+
+    // Didn't find a match to one of the profiles we write; indicate the gamut
+    // is unspecified since we don't understand it.
+    return ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/include/ultrahdr/gainmapmath.h b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
new file mode 100644
index 0000000..edf152d
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
@@ -0,0 +1,488 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_RECOVERYMAPMATH_H
+#define ANDROID_ULTRAHDR_RECOVERYMAPMATH_H
+
+#include <cmath>
+#include <stdint.h>
+
+#include <ultrahdr/jpegr.h>
+
+namespace android::ultrahdr {
+
+#define CLIP3(x, min, max) ((x) < (min)) ? (min) : ((x) > (max)) ? (max) : (x)
+
+////////////////////////////////////////////////////////////////////////////////
+// Framework
+
+const float kSdrWhiteNits = 100.0f;
+const float kHlgMaxNits = 1000.0f;
+const float kPqMaxNits = 10000.0f;
+
+struct Color {
+  union {
+    struct {
+      float r;
+      float g;
+      float b;
+    };
+    struct {
+      float y;
+      float u;
+      float v;
+    };
+  };
+};
+
+typedef Color (*ColorTransformFn)(Color);
+typedef float (*ColorCalculationFn)(Color);
+
+inline Color operator+=(Color& lhs, const Color& rhs) {
+  lhs.r += rhs.r;
+  lhs.g += rhs.g;
+  lhs.b += rhs.b;
+  return lhs;
+}
+inline Color operator-=(Color& lhs, const Color& rhs) {
+  lhs.r -= rhs.r;
+  lhs.g -= rhs.g;
+  lhs.b -= rhs.b;
+  return lhs;
+}
+
+inline Color operator+(const Color& lhs, const Color& rhs) {
+  Color temp = lhs;
+  return temp += rhs;
+}
+inline Color operator-(const Color& lhs, const Color& rhs) {
+  Color temp = lhs;
+  return temp -= rhs;
+}
+
+inline Color operator+=(Color& lhs, const float rhs) {
+  lhs.r += rhs;
+  lhs.g += rhs;
+  lhs.b += rhs;
+  return lhs;
+}
+inline Color operator-=(Color& lhs, const float rhs) {
+  lhs.r -= rhs;
+  lhs.g -= rhs;
+  lhs.b -= rhs;
+  return lhs;
+}
+inline Color operator*=(Color& lhs, const float rhs) {
+  lhs.r *= rhs;
+  lhs.g *= rhs;
+  lhs.b *= rhs;
+  return lhs;
+}
+inline Color operator/=(Color& lhs, const float rhs) {
+  lhs.r /= rhs;
+  lhs.g /= rhs;
+  lhs.b /= rhs;
+  return lhs;
+}
+
+inline Color operator+(const Color& lhs, const float rhs) {
+  Color temp = lhs;
+  return temp += rhs;
+}
+inline Color operator-(const Color& lhs, const float rhs) {
+  Color temp = lhs;
+  return temp -= rhs;
+}
+inline Color operator*(const Color& lhs, const float rhs) {
+  Color temp = lhs;
+  return temp *= rhs;
+}
+inline Color operator/(const Color& lhs, const float rhs) {
+  Color temp = lhs;
+  return temp /= rhs;
+}
+
+inline uint16_t floatToHalf(float f) {
+  // round-to-nearest-even: add last bit after truncated mantissa
+  const uint32_t b = *((uint32_t*)&f) + 0x00001000;
+
+  const uint32_t e = (b & 0x7F800000) >> 23; // exponent
+  const uint32_t m = b & 0x007FFFFF; // mantissa
+
+  // sign : normalized : denormalized : saturate
+  return (b & 0x80000000) >> 16
+            | (e > 112) * ((((e - 112) << 10) & 0x7C00) | m >> 13)
+            | ((e < 113) & (e > 101)) * ((((0x007FF000 + m) >> (125 - e)) + 1) >> 1)
+            | (e > 143) * 0x7FFF;
+}
+
+constexpr size_t kGainFactorPrecision = 10;
+constexpr size_t kGainFactorNumEntries = 1 << kGainFactorPrecision;
+struct GainLUT {
+  GainLUT(ultrahdr_metadata_ptr metadata) {
+    for (int idx = 0; idx < kGainFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kGainFactorNumEntries - 1);
+      float logBoost = log2(metadata->minContentBoost) * (1.0f - value)
+                     + log2(metadata->maxContentBoost) * value;
+      mGainTable[idx] = exp2(logBoost);
+    }
+  }
+
+  GainLUT(ultrahdr_metadata_ptr metadata, float displayBoost) {
+    float boostFactor = displayBoost > 0 ? displayBoost / metadata->maxContentBoost : 1.0f;
+    for (int idx = 0; idx < kGainFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kGainFactorNumEntries - 1);
+      float logBoost = log2(metadata->minContentBoost) * (1.0f - value)
+                     + log2(metadata->maxContentBoost) * value;
+      mGainTable[idx] = exp2(logBoost * boostFactor);
+    }
+  }
+
+  ~GainLUT() {
+  }
+
+  float getGainFactor(float gain) {
+    uint32_t idx = static_cast<uint32_t>(gain * (kGainFactorNumEntries - 1));
+    //TODO() : Remove once conversion modules have appropriate clamping in place
+    idx = CLIP3(idx, 0, kGainFactorNumEntries - 1);
+    return mGainTable[idx];
+  }
+
+private:
+  float mGainTable[kGainFactorNumEntries];
+};
+
+struct ShepardsIDW {
+  ShepardsIDW(int mapScaleFactor) : mMapScaleFactor{mapScaleFactor} {
+    const int size = mMapScaleFactor * mMapScaleFactor * 4;
+    mWeights = new float[size];
+    mWeightsNR = new float[size];
+    mWeightsNB = new float[size];
+    mWeightsC = new float[size];
+    fillShepardsIDW(mWeights, 1, 1);
+    fillShepardsIDW(mWeightsNR, 0, 1);
+    fillShepardsIDW(mWeightsNB, 1, 0);
+    fillShepardsIDW(mWeightsC, 0, 0);
+  }
+  ~ShepardsIDW() {
+    delete[] mWeights;
+    delete[] mWeightsNR;
+    delete[] mWeightsNB;
+    delete[] mWeightsC;
+  }
+
+  int mMapScaleFactor;
+  // Image :-
+  // p00 p01 p02 p03 p04 p05 p06 p07
+  // p10 p11 p12 p13 p14 p15 p16 p17
+  // p20 p21 p22 p23 p24 p25 p26 p27
+  // p30 p31 p32 p33 p34 p35 p36 p37
+  // p40 p41 p42 p43 p44 p45 p46 p47
+  // p50 p51 p52 p53 p54 p55 p56 p57
+  // p60 p61 p62 p63 p64 p65 p66 p67
+  // p70 p71 p72 p73 p74 p75 p76 p77
+
+  // Gain Map (for 4 scale factor) :-
+  // m00 p01
+  // m10 m11
+
+  // Gain sample of curr 4x4, right 4x4, bottom 4x4, bottom right 4x4 are used during
+  // reconstruction. hence table weight size is 4.
+  float* mWeights;
+  // TODO: check if its ok to mWeights at places
+  float* mWeightsNR;  // no right
+  float* mWeightsNB;  // no bottom
+  float* mWeightsC;  // no right & bottom
+
+  float euclideanDistance(float x1, float x2, float y1, float y2);
+  void fillShepardsIDW(float *weights, int incR, int incB);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// sRGB transformations
+// NOTE: sRGB has the same color primaries as BT.709, but different transfer
+// function. For this reason, all sRGB transformations here apply to BT.709,
+// except for those concerning transfer functions.
+
+/*
+ * Calculate the luminance of a linear RGB sRGB pixel, according to
+ * IEC 61966-2-1/Amd 1:2003.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float srgbLuminance(Color e);
+
+/*
+ * Convert from OETF'd srgb RGB to YUV, according to ITU-R BT.709-6.
+ *
+ * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color srgbRgbToYuv(Color e_gamma);
+
+
+/*
+ * Convert from OETF'd srgb YUV to RGB, according to ITU-R BT.709-6.
+ *
+ * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color srgbYuvToRgb(Color e_gamma);
+
+/*
+ * Convert from srgb to linear, according to IEC 61966-2-1/Amd 1:2003.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float srgbInvOetf(float e_gamma);
+Color srgbInvOetf(Color e_gamma);
+float srgbInvOetfLUT(float e_gamma);
+Color srgbInvOetfLUT(Color e_gamma);
+
+constexpr size_t kSrgbInvOETFPrecision = 10;
+constexpr size_t kSrgbInvOETFNumEntries = 1 << kSrgbInvOETFPrecision;
+
+////////////////////////////////////////////////////////////////////////////////
+// Display-P3 transformations
+
+/*
+ * Calculated the luminance of a linear RGB P3 pixel, according to SMPTE EG 432-1.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float p3Luminance(Color e);
+
+/*
+ * Convert from OETF'd P3 RGB to YUV, according to ITU-R BT.601-7.
+ *
+ * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color p3RgbToYuv(Color e_gamma);
+
+/*
+ * Convert from OETF'd P3 YUV to RGB, according to ITU-R BT.601-7.
+ *
+ * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color p3YuvToRgb(Color e_gamma);
+
+
+////////////////////////////////////////////////////////////////////////////////
+// BT.2100 transformations - according to ITU-R BT.2100-2
+
+/*
+ * Calculate the luminance of a linear RGB BT.2100 pixel.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float bt2100Luminance(Color e);
+
+/*
+ * Convert from OETF'd BT.2100 RGB to YUV, according to ITU-R BT.2100-2.
+ *
+ * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color bt2100RgbToYuv(Color e_gamma);
+
+/*
+ * Convert from OETF'd BT.2100 YUV to RGB, according to ITU-R BT.2100-2.
+ *
+ * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
+ */
+Color bt2100YuvToRgb(Color e_gamma);
+
+/*
+ * Convert from scene luminance to HLG.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float hlgOetf(float e);
+Color hlgOetf(Color e);
+float hlgOetfLUT(float e);
+Color hlgOetfLUT(Color e);
+
+constexpr size_t kHlgOETFPrecision = 10;
+constexpr size_t kHlgOETFNumEntries = 1 << kHlgOETFPrecision;
+
+/*
+ * Convert from HLG to scene luminance.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float hlgInvOetf(float e_gamma);
+Color hlgInvOetf(Color e_gamma);
+float hlgInvOetfLUT(float e_gamma);
+Color hlgInvOetfLUT(Color e_gamma);
+
+constexpr size_t kHlgInvOETFPrecision = 10;
+constexpr size_t kHlgInvOETFNumEntries = 1 << kHlgInvOETFPrecision;
+
+/*
+ * Convert from scene luminance to PQ.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float pqOetf(float e);
+Color pqOetf(Color e);
+float pqOetfLUT(float e);
+Color pqOetfLUT(Color e);
+
+constexpr size_t kPqOETFPrecision = 10;
+constexpr size_t kPqOETFNumEntries = 1 << kPqOETFPrecision;
+
+/*
+ * Convert from PQ to scene luminance in nits.
+ *
+ * [0.0, 1.0] range in and out.
+ */
+float pqInvOetf(float e_gamma);
+Color pqInvOetf(Color e_gamma);
+float pqInvOetfLUT(float e_gamma);
+Color pqInvOetfLUT(Color e_gamma);
+
+constexpr size_t kPqInvOETFPrecision = 10;
+constexpr size_t kPqInvOETFNumEntries = 1 << kPqInvOETFPrecision;
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Color space conversions
+
+/*
+ * Convert between color spaces with linear RGB data, according to ITU-R BT.2407 and EG 432-1.
+ *
+ * All conversions are derived from multiplying the matrix for XYZ to output RGB color gamut by the
+ * matrix for input RGB color gamut to XYZ. The matrix for converting from XYZ to an RGB gamut is
+ * always the inverse of the RGB gamut to XYZ matrix.
+ */
+Color bt709ToP3(Color e);
+Color bt709ToBt2100(Color e);
+Color p3ToBt709(Color e);
+Color p3ToBt2100(Color e);
+Color bt2100ToBt709(Color e);
+Color bt2100ToP3(Color e);
+
+/*
+ * Identity conversion.
+ */
+inline Color identityConversion(Color e) { return e; }
+
+/*
+ * Get the conversion to apply to the HDR image for gain map generation
+ */
+ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, ultrahdr_color_gamut hdr_gamut);
+
+/*
+ * Convert between YUV encodings, according to ITU-R BT.709-6, ITU-R BT.601-7, and ITU-R BT.2100-2.
+ *
+ * Bt.709 and Bt.2100 have well-defined YUV encodings; Display-P3's is less well defined, but is
+ * treated as Bt.601 by DataSpace, hence we do the same.
+ */
+Color yuv709To601(Color e_gamma);
+Color yuv709To2100(Color e_gamma);
+Color yuv601To709(Color e_gamma);
+Color yuv601To2100(Color e_gamma);
+Color yuv2100To709(Color e_gamma);
+Color yuv2100To601(Color e_gamma);
+
+/*
+ * Performs a transformation at the chroma x and y coordinates provided on a YUV420 image.
+ *
+ * Apply the transformation by determining transformed YUV for each of the 4 Y + 1 UV; each Y gets
+ * this result, and UV gets the averaged result.
+ *
+ * x_chroma and y_chroma should be less than or equal to half the image's width and height
+ * respecitively, since input is 4:2:0 subsampled.
+ */
+void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
+                     ColorTransformFn fn);
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Gain map calculations
+
+/*
+ * Calculate the 8-bit unsigned integer gain value for the given SDR and HDR
+ * luminances in linear space, and the hdr ratio to encode against.
+ *
+ * Note: since this library always uses gamma of 1.0, offsetSdr of 0.0, and
+ * offsetHdr of 0.0, this function doesn't handle different metadata values for
+ * these fields.
+ */
+uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata);
+uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata,
+                   float log2MinContentBoost, float log2MaxContentBoost);
+
+/*
+ * Calculates the linear luminance in nits after applying the given gain
+ * value, with the given hdr ratio, to the given sdr input in the range [0, 1].
+ *
+ * Note: similar to encodeGain(), this function only supports gamma 1.0,
+ * offsetSdr 0.0, offsetHdr 0.0, hdrCapacityMin 1.0, and hdrCapacityMax equal to
+ * gainMapMax, as this library encodes.
+ */
+Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata);
+Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost);
+Color applyGainLUT(Color e, float gain, GainLUT& gainLUT);
+
+/*
+ * Helper for sampling from YUV 420 images.
+ */
+Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y);
+
+/*
+ * Helper for sampling from P010 images.
+ *
+ * Expect narrow-range image data for P010.
+ */
+Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y);
+
+/*
+ * Sample the image at the provided location, with a weighting based on nearby
+ * pixels and the map scale factor.
+ */
+Color sampleYuv420(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);
+
+/*
+ * Sample the image at the provided location, with a weighting based on nearby
+ * pixels and the map scale factor.
+ *
+ * Expect narrow-range image data for P010.
+ */
+Color sampleP010(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);
+
+/*
+ * Sample the gain value for the map from a given x,y coordinate on a scale
+ * that is map scale factor larger than the map size.
+ */
+float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y);
+float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y,
+                ShepardsIDW& weightTables);
+
+/*
+ * Convert from Color to RGBA1010102.
+ *
+ * Alpha always set to 1.0.
+ */
+uint32_t colorToRgba1010102(Color e_gamma);
+
+/*
+ * Convert from Color to F16.
+ *
+ * Alpha always set to 1.0.
+ */
+uint64_t colorToRgbaF16(Color e_gamma);
+
+} // namespace android::ultrahdr
+
+#endif // ANDROID_ULTRAHDR_RECOVERYMAPMATH_H
diff --git a/libs/ultrahdr/include/ultrahdr/icc.h b/libs/ultrahdr/include/ultrahdr/icc.h
new file mode 100644
index 0000000..7f047f8
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/icc.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_ICC_H
+#define ANDROID_ULTRAHDR_ICC_H
+
+#include <ultrahdr/jpegr.h>
+#include <ultrahdr/jpegrutils.h>
+#include <utils/RefBase.h>
+#include <cmath>
+#include <string>
+
+#ifdef USE_BIG_ENDIAN
+#undef USE_BIG_ENDIAN
+#define USE_BIG_ENDIAN true
+#endif
+
+namespace android::ultrahdr {
+
+typedef int32_t              Fixed;
+#define Fixed1               (1 << 16)
+#define MaxS32FitsInFloat    2147483520
+#define MinS32FitsInFloat    (-MaxS32FitsInFloat)
+#define FixedToFloat(x)      ((x) * 1.52587890625e-5f)
+
+typedef struct Matrix3x3 {
+    float vals[3][3];
+} Matrix3x3;
+
+// The D50 illuminant.
+constexpr float kD50_x = 0.9642f;
+constexpr float kD50_y = 1.0000f;
+constexpr float kD50_z = 0.8249f;
+
+enum {
+    // data_color_space
+    Signature_CMYK = 0x434D594B,
+    Signature_Gray = 0x47524159,
+    Signature_RGB  = 0x52474220,
+
+    // pcs
+    Signature_Lab  = 0x4C616220,
+    Signature_XYZ  = 0x58595A20,
+};
+
+typedef uint32_t FourByteTag;
+static inline constexpr FourByteTag SetFourByteTag(char a, char b, char c, char d) {
+    return (((uint32_t)a << 24) | ((uint32_t)b << 16) | ((uint32_t)c << 8) | (uint32_t)d);
+}
+
+static constexpr char kICCIdentifier[] = "ICC_PROFILE";
+// 12 for the actual identifier, +2 for the chunk count and chunk index which
+// will always follow.
+static constexpr size_t kICCIdentifierSize = 14;
+
+// This is equal to the header size according to the ICC specification (128)
+// plus the size of the tag count (4).  We include the tag count since we
+// always require it to be present anyway.
+static constexpr size_t kICCHeaderSize = 132;
+
+// Contains a signature (4), offset (4), and size (4).
+static constexpr size_t kICCTagTableEntrySize = 12;
+
+// size should be 20; 4 bytes for type descriptor, 4 bytes reserved, 12
+// bytes for a single XYZ number type (4 bytes per coordinate).
+static constexpr size_t kColorantTagSize = 20;
+
+static constexpr uint32_t kDisplay_Profile    = SetFourByteTag('m', 'n', 't', 'r');
+static constexpr uint32_t kRGB_ColorSpace     = SetFourByteTag('R', 'G', 'B', ' ');
+static constexpr uint32_t kXYZ_PCSSpace       = SetFourByteTag('X', 'Y', 'Z', ' ');
+static constexpr uint32_t kACSP_Signature     = SetFourByteTag('a', 'c', 's', 'p');
+
+static constexpr uint32_t kTAG_desc           = SetFourByteTag('d', 'e', 's', 'c');
+static constexpr uint32_t kTAG_TextType       = SetFourByteTag('m', 'l', 'u', 'c');
+static constexpr uint32_t kTAG_rXYZ           = SetFourByteTag('r', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_gXYZ           = SetFourByteTag('g', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_bXYZ           = SetFourByteTag('b', 'X', 'Y', 'Z');
+static constexpr uint32_t kTAG_wtpt           = SetFourByteTag('w', 't', 'p', 't');
+static constexpr uint32_t kTAG_rTRC           = SetFourByteTag('r', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_gTRC           = SetFourByteTag('g', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_bTRC           = SetFourByteTag('b', 'T', 'R', 'C');
+static constexpr uint32_t kTAG_cicp           = SetFourByteTag('c', 'i', 'c', 'p');
+static constexpr uint32_t kTAG_cprt           = SetFourByteTag('c', 'p', 'r', 't');
+static constexpr uint32_t kTAG_A2B0           = SetFourByteTag('A', '2', 'B', '0');
+static constexpr uint32_t kTAG_B2A0           = SetFourByteTag('B', '2', 'A', '0');
+
+static constexpr uint32_t kTAG_CurveType      = SetFourByteTag('c', 'u', 'r', 'v');
+static constexpr uint32_t kTAG_mABType        = SetFourByteTag('m', 'A', 'B', ' ');
+static constexpr uint32_t kTAG_mBAType        = SetFourByteTag('m', 'B', 'A', ' ');
+static constexpr uint32_t kTAG_ParaCurveType  = SetFourByteTag('p', 'a', 'r', 'a');
+
+
+static constexpr Matrix3x3 kSRGB = {{
+    // ICC fixed-point (16.16) representation, taken from skcms. Please keep them exactly in sync.
+    // 0.436065674f, 0.385147095f, 0.143066406f,
+    // 0.222488403f, 0.716873169f, 0.060607910f,
+    // 0.013916016f, 0.097076416f, 0.714096069f,
+    { FixedToFloat(0x6FA2), FixedToFloat(0x6299), FixedToFloat(0x24A0) },
+    { FixedToFloat(0x38F5), FixedToFloat(0xB785), FixedToFloat(0x0F84) },
+    { FixedToFloat(0x0390), FixedToFloat(0x18DA), FixedToFloat(0xB6CF) },
+}};
+
+static constexpr Matrix3x3 kDisplayP3 = {{
+    {  0.515102f,   0.291965f,  0.157153f  },
+    {  0.241182f,   0.692236f,  0.0665819f },
+    { -0.00104941f, 0.0418818f, 0.784378f  },
+}};
+
+static constexpr Matrix3x3 kRec2020 = {{
+    {  0.673459f,   0.165661f,  0.125100f  },
+    {  0.279033f,   0.675338f,  0.0456288f },
+    { -0.00193139f, 0.0299794f, 0.797162f  },
+}};
+
+static constexpr uint32_t kCICPPrimariesSRGB = 1;
+static constexpr uint32_t kCICPPrimariesP3 = 12;
+static constexpr uint32_t kCICPPrimariesRec2020 = 9;
+
+static constexpr uint32_t kCICPTrfnSRGB = 1;
+static constexpr uint32_t kCICPTrfnLinear = 8;
+static constexpr uint32_t kCICPTrfnPQ = 16;
+static constexpr uint32_t kCICPTrfnHLG = 18;
+
+enum ParaCurveType {
+    kExponential_ParaCurveType = 0,
+    kGAB_ParaCurveType         = 1,
+    kGABC_ParaCurveType        = 2,
+    kGABDE_ParaCurveType       = 3,
+    kGABCDEF_ParaCurveType     = 4,
+};
+
+/**
+ *  Return the closest int for the given float. Returns MaxS32FitsInFloat for NaN.
+ */
+static inline int float_saturate2int(float x) {
+    x = x < MaxS32FitsInFloat ? x : MaxS32FitsInFloat;
+    x = x > MinS32FitsInFloat ? x : MinS32FitsInFloat;
+    return (int)x;
+}
+
+static Fixed float_round_to_fixed(float x) {
+    return float_saturate2int((float)floor((double)x * Fixed1 + 0.5));
+}
+
+static uint16_t float_round_to_unorm16(float x) {
+    x = x * 65535.f + 0.5;
+    if (x > 65535) return 65535;
+    if (x < 0) return 0;
+    return static_cast<uint16_t>(x);
+}
+
+static void float_to_table16(const float f, uint8_t* table_16) {
+    *reinterpret_cast<uint16_t*>(table_16) = Endian_SwapBE16(float_round_to_unorm16(f));
+}
+
+static bool isfinitef_(float x) { return 0 == x*0; }
+
+struct ICCHeader {
+    // Size of the profile (computed)
+    uint32_t size;
+    // Preferred CMM type (ignored)
+    uint32_t cmm_type = 0;
+    // Version 4.3 or 4.4 if CICP is included.
+    uint32_t version = Endian_SwapBE32(0x04300000);
+    // Display device profile
+    uint32_t profile_class = Endian_SwapBE32(kDisplay_Profile);
+    // RGB input color space;
+    uint32_t data_color_space = Endian_SwapBE32(kRGB_ColorSpace);
+    // Profile connection space.
+    uint32_t pcs = Endian_SwapBE32(kXYZ_PCSSpace);
+    // Date and time (ignored)
+    uint8_t creation_date_time[12] = {0};
+    // Profile signature
+    uint32_t signature = Endian_SwapBE32(kACSP_Signature);
+    // Platform target (ignored)
+    uint32_t platform = 0;
+    // Flags: not embedded, can be used independently
+    uint32_t flags = 0x00000000;
+    // Device manufacturer (ignored)
+    uint32_t device_manufacturer = 0;
+    // Device model (ignored)
+    uint32_t device_model = 0;
+    // Device attributes (ignored)
+    uint8_t device_attributes[8] = {0};
+    // Relative colorimetric rendering intent
+    uint32_t rendering_intent = Endian_SwapBE32(1);
+    // D50 standard illuminant (X, Y, Z)
+    uint32_t illuminant_X = Endian_SwapBE32(float_round_to_fixed(kD50_x));
+    uint32_t illuminant_Y = Endian_SwapBE32(float_round_to_fixed(kD50_y));
+    uint32_t illuminant_Z = Endian_SwapBE32(float_round_to_fixed(kD50_z));
+    // Profile creator (ignored)
+    uint32_t creator = 0;
+    // Profile id checksum (ignored)
+    uint8_t profile_id[16] = {0};
+    // Reserved (ignored)
+    uint8_t reserved[28] = {0};
+    // Technically not part of header, but required
+    uint32_t tag_count = 0;
+};
+
+class IccHelper {
+private:
+    static constexpr uint32_t kTrcTableSize = 65;
+    static constexpr uint32_t kGridSize = 17;
+    static constexpr size_t kNumChannels = 3;
+
+    static sp<DataStruct> write_text_tag(const char* text);
+    static std::string get_desc_string(const ultrahdr_transfer_function tf,
+                                       const ultrahdr_color_gamut gamut);
+    static sp<DataStruct> write_xyz_tag(float x, float y, float z);
+    static sp<DataStruct> write_trc_tag(const int table_entries, const void* table_16);
+    static sp<DataStruct> write_trc_tag_for_linear();
+    static float compute_tone_map_gain(const ultrahdr_transfer_function tf, float L);
+    static sp<DataStruct> write_cicp_tag(uint32_t color_primaries,
+                                         uint32_t transfer_characteristics);
+    static sp<DataStruct> write_mAB_or_mBA_tag(uint32_t type,
+                                               bool has_a_curves,
+                                               const uint8_t* grid_points,
+                                               const uint8_t* grid_16);
+    static void compute_lut_entry(const Matrix3x3& src_to_XYZD50, float rgb[3]);
+    static sp<DataStruct> write_clut(const uint8_t* grid_points, const uint8_t* grid_16);
+
+    // Checks if a set of xyz tags is equivalent to a 3x3 Matrix. Each input
+    // tag buffer assumed to be at least kColorantTagSize in size.
+    static bool tagsEqualToMatrix(const Matrix3x3& matrix,
+                                  const uint8_t* red_tag,
+                                  const uint8_t* green_tag,
+                                  const uint8_t* blue_tag);
+
+public:
+    // Output includes JPEG embedding identifier and chunk information, but not
+    // APPx information.
+    static sp<DataStruct> writeIccProfile(const ultrahdr_transfer_function tf,
+                                          const ultrahdr_color_gamut gamut);
+    // NOTE: this function is not robust; it can infer gamuts that IccHelper
+    // writes out but should not be considered a reference implementation for
+    // robust parsing of ICC profiles or their gamuts.
+    static ultrahdr_color_gamut readIccColorGamut(void* icc_data, size_t icc_size);
+};
+}  // namespace android::ultrahdr
+
+#endif //ANDROID_ULTRAHDR_ICC_H
diff --git a/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h b/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
new file mode 100644
index 0000000..8b5499a
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/jpegdecoderhelper.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_JPEGDECODERHELPER_H
+#define ANDROID_ULTRAHDR_JPEGDECODERHELPER_H
+
+// We must include cstdio before jpeglib.h. It is a requirement of libjpeg.
+#include <cstdio>
+extern "C" {
+#include <jerror.h>
+#include <jpeglib.h>
+}
+#include <utils/Errors.h>
+#include <vector>
+
+static const int kMaxWidth = 8192;
+static const int kMaxHeight = 8192;
+
+namespace android::ultrahdr {
+/*
+ * Encapsulates a converter from JPEG to raw image (YUV420planer or grey-scale) format.
+ * This class is not thread-safe.
+ */
+class JpegDecoderHelper {
+public:
+    JpegDecoderHelper();
+    ~JpegDecoderHelper();
+    /*
+     * Decompresses JPEG image to raw image (YUV420planer, grey-scale or RGBA) format. After
+     * calling this method, call getDecompressedImage() to get the image.
+     * Returns false if decompressing the image fails.
+     */
+    bool decompressImage(const void* image, int length, bool decodeToRGBA = false);
+    /*
+     * Returns the decompressed raw image buffer pointer. This method must be called only after
+     * calling decompressImage().
+     */
+    void* getDecompressedImagePtr();
+    /*
+     * Returns the decompressed raw image buffer size. This method must be called only after
+     * calling decompressImage().
+     */
+    size_t getDecompressedImageSize();
+    /*
+     * Returns the image width in pixels. This method must be called only after calling
+     * decompressImage().
+     */
+    size_t getDecompressedImageWidth();
+    /*
+     * Returns the image width in pixels. This method must be called only after calling
+     * decompressImage().
+     */
+    size_t getDecompressedImageHeight();
+    /*
+     * Returns the XMP data from the image.
+     */
+    void* getXMPPtr();
+    /*
+     * Returns the decompressed XMP buffer size. This method must be called only after
+     * calling decompressImage() or getCompressedImageParameters().
+     */
+    size_t getXMPSize();
+    /*
+     * Returns the EXIF data from the image.
+     */
+    void* getEXIFPtr();
+    /*
+     * Returns the decompressed EXIF buffer size. This method must be called only after
+     * calling decompressImage() or getCompressedImageParameters().
+     */
+    size_t getEXIFSize();
+    /*
+     * Returns the ICC data from the image.
+     */
+    void* getICCPtr();
+    /*
+     * Returns the decompressed ICC buffer size. This method must be called only after
+     * calling decompressImage() or getCompressedImageParameters().
+     */
+    size_t getICCSize();
+    /*
+     * Decompresses metadata of the image. All vectors are owned by the caller.
+     */
+    bool getCompressedImageParameters(const void* image, int length,
+                                      size_t* pWidth, size_t* pHeight,
+                                      std::vector<uint8_t>* iccData,
+                                      std::vector<uint8_t>* exifData);
+
+private:
+    bool decode(const void* image, int length, bool decodeToRGBA);
+    // Returns false if errors occur.
+    bool decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest, bool isSingleChannel);
+    bool decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest);
+    bool decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest);
+    bool decompressSingleChannel(jpeg_decompress_struct* cinfo, const uint8_t* dest);
+    // Process 16 lines of Y and 16 lines of U/V each time.
+    // We must pass at least 16 scanlines according to libjpeg documentation.
+    static const int kCompressBatchSize = 16;
+    // The buffer that holds the decompressed result.
+    std::vector<JOCTET> mResultBuffer;
+    // The buffer that holds XMP Data.
+    std::vector<JOCTET> mXMPBuffer;
+    // The buffer that holds EXIF Data.
+    std::vector<JOCTET> mEXIFBuffer;
+    // The buffer that holds ICC Data.
+    std::vector<JOCTET> mICCBuffer;
+
+    // Resolution of the decompressed image.
+    size_t mWidth;
+    size_t mHeight;
+};
+} /* namespace android::ultrahdr  */
+
+#endif // ANDROID_ULTRAHDR_JPEGDECODERHELPER_H
diff --git a/libs/ultrahdr/include/ultrahdr/jpegencoderhelper.h b/libs/ultrahdr/include/ultrahdr/jpegencoderhelper.h
new file mode 100644
index 0000000..2c6778e
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/jpegencoderhelper.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_JPEGENCODERHELPER_H
+#define ANDROID_ULTRAHDR_JPEGENCODERHELPER_H
+
+// We must include cstdio before jpeglib.h. It is a requirement of libjpeg.
+#include <cstdio>
+
+extern "C" {
+#include <jerror.h>
+#include <jpeglib.h>
+}
+
+#include <utils/Errors.h>
+#include <vector>
+
+namespace android::ultrahdr {
+
+/*
+ * Encapsulates a converter from raw image (YUV420planer or grey-scale) to JPEG format.
+ * This class is not thread-safe.
+ */
+class JpegEncoderHelper {
+public:
+    JpegEncoderHelper();
+    ~JpegEncoderHelper();
+
+    /*
+     * Compresses YUV420Planer image to JPEG format. After calling this method, call
+     * getCompressedImage() to get the image. |quality| is the jpeg image quality parameter to use.
+     * It ranges from 1 (poorest quality) to 100 (highest quality). |iccBuffer| is the buffer of
+     * ICC segment which will be added to the compressed image.
+     * Returns false if errors occur during compression.
+     */
+    bool compressImage(const void* image, int width, int height, int quality,
+                       const void* iccBuffer, unsigned int iccSize, bool isSingleChannel = false);
+
+    /*
+     * Returns the compressed JPEG buffer pointer. This method must be called only after calling
+     * compressImage().
+     */
+    void* getCompressedImagePtr();
+
+    /*
+     * Returns the compressed JPEG buffer size. This method must be called only after calling
+     * compressImage().
+     */
+    size_t getCompressedImageSize();
+
+    /*
+     * Process 16 lines of Y and 16 lines of U/V each time.
+     * We must pass at least 16 scanlines according to libjpeg documentation.
+     */
+    static const int kCompressBatchSize = 16;
+private:
+    // initDestination(), emptyOutputBuffer() and emptyOutputBuffer() are callback functions to be
+    // passed into jpeg library.
+    static void initDestination(j_compress_ptr cinfo);
+    static boolean emptyOutputBuffer(j_compress_ptr cinfo);
+    static void terminateDestination(j_compress_ptr cinfo);
+    static void outputErrorMessage(j_common_ptr cinfo);
+
+    // Returns false if errors occur.
+    bool encode(const void* inYuv, int width, int height, int jpegQuality,
+                const void* iccBuffer, unsigned int iccSize, bool isSingleChannel);
+    void setJpegDestination(jpeg_compress_struct* cinfo);
+    void setJpegCompressStruct(int width, int height, int quality, jpeg_compress_struct* cinfo,
+                               bool isSingleChannel);
+    // Returns false if errors occur.
+    bool compress(jpeg_compress_struct* cinfo, const uint8_t* image, bool isSingleChannel);
+    bool compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yuv);
+    bool compressSingleChannel(jpeg_compress_struct* cinfo, const uint8_t* image);
+
+    // The block size for encoded jpeg image buffer.
+    static const int kBlockSize = 16384;
+
+    // The buffer that holds the compressed result.
+    std::vector<JOCTET> mResultBuffer;
+};
+
+} /* namespace android::ultrahdr  */
+
+#endif // ANDROID_ULTRAHDR_JPEGENCODERHELPER_H
diff --git a/libs/ultrahdr/include/ultrahdr/jpegr.h b/libs/ultrahdr/include/ultrahdr/jpegr.h
new file mode 100644
index 0000000..a35fd30
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/jpegr.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_JPEGR_H
+#define ANDROID_ULTRAHDR_JPEGR_H
+
+#include "jpegencoderhelper.h"
+#include "jpegrerrorcode.h"
+#include "ultrahdr.h"
+
+#ifndef FLT_MAX
+#define FLT_MAX 0x1.fffffep127f
+#endif
+
+namespace android::ultrahdr {
+
+struct jpegr_info_struct {
+    size_t width;
+    size_t height;
+    std::vector<uint8_t>* iccData;
+    std::vector<uint8_t>* exifData;
+};
+
+/*
+ * Holds information for uncompressed image or gain map.
+ */
+struct jpegr_uncompressed_struct {
+    // Pointer to the data location.
+    void* data;
+    // Width of the gain map or the luma plane of the image in pixels.
+    int width;
+    // Height of the gain map or the luma plane of the image in pixels.
+    int height;
+    // Color gamut.
+    ultrahdr_color_gamut colorGamut;
+
+    // Values below are optional
+    // Pointer to chroma data, if it's NULL, chroma plane is considered to be immediately
+    // following after the luma plane.
+    // Note: currently this feature is only supported for P010 image (HDR input).
+    void* chroma_data = nullptr;
+    // Strides of Y plane in number of pixels, using 0 to present uninitialized, must be
+    // larger than or equal to luma width.
+    // Note: currently this feature is only supported for P010 image (HDR input).
+    int luma_stride = 0;
+    // Strides of UV plane in number of pixels, using 0 to present uninitialized, must be
+    // larger than or equal to chroma width.
+    // Note: currently this feature is only supported for P010 image (HDR input).
+    int chroma_stride = 0;
+};
+
+/*
+ * Holds information for compressed image or gain map.
+ */
+struct jpegr_compressed_struct {
+    // Pointer to the data location.
+    void* data;
+    // Used data length in bytes.
+    int length;
+    // Maximum available data length in bytes.
+    int maxLength;
+    // Color gamut.
+    ultrahdr_color_gamut colorGamut;
+};
+
+/*
+ * Holds information for EXIF metadata.
+ */
+struct jpegr_exif_struct {
+    // Pointer to the data location.
+    void* data;
+    // Data length;
+    int length;
+};
+
+typedef struct jpegr_uncompressed_struct* jr_uncompressed_ptr;
+typedef struct jpegr_compressed_struct* jr_compressed_ptr;
+typedef struct jpegr_exif_struct* jr_exif_ptr;
+typedef struct jpegr_info_struct* jr_info_ptr;
+
+class JpegR {
+public:
+    /*
+     * Experimental only
+     *
+     * Encode API-0
+     * Compress JPEGR image from 10-bit HDR YUV.
+     *
+     * Tonemap the HDR input to a SDR image, generate gain map from the HDR and SDR images,
+     * compress SDR YUV to 8-bit JPEG and append the gain map to the end of the compressed
+     * JPEG.
+     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param hdr_tf transfer function of the HDR image
+     * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
+     *             represents the maximum available size of the desitination buffer, and it must be
+     *             set before calling this method. If the encoded JPEGR size exceeds
+     *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
+     * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is
+     *                the highest quality
+     * @param exif pointer to the exif metadata.
+     * @return NO_ERROR if encoding succeeds, error code if error occurs.
+     */
+    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                         ultrahdr_transfer_function hdr_tf,
+                         jr_compressed_ptr dest,
+                         int quality,
+                         jr_exif_ptr exif);
+
+    /*
+     * Encode API-1
+     * Compress JPEGR image from 10-bit HDR YUV and 8-bit SDR YUV.
+     *
+     * Generate gain map from the HDR and SDR inputs, compress SDR YUV to 8-bit JPEG and append
+     * the gain map to the end of the compressed JPEG. HDR and SDR inputs must be the same
+     * resolution. SDR input is assumed to use the sRGB transfer function.
+     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param hdr_tf transfer function of the HDR image
+     * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
+     *             represents the maximum available size of the desitination buffer, and it must be
+     *             set before calling this method. If the encoded JPEGR size exceeds
+     *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
+     * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is
+     *                the highest quality
+     * @param exif pointer to the exif metadata.
+     * @return NO_ERROR if encoding succeeds, error code if error occurs.
+     */
+    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                         jr_uncompressed_ptr uncompressed_yuv_420_image,
+                         ultrahdr_transfer_function hdr_tf,
+                         jr_compressed_ptr dest,
+                         int quality,
+                         jr_exif_ptr exif);
+
+    /*
+     * Encode API-2
+     * Compress JPEGR image from 10-bit HDR YUV, 8-bit SDR YUV and compressed 8-bit JPEG.
+     *
+     * This method requires HAL Hardware JPEG encoder.
+     *
+     * Generate gain map from the HDR and SDR inputs, append the gain map to the end of the
+     * compressed JPEG. Adds an ICC profile if one isn't present in the input JPEG image. HDR and
+     * SDR inputs must be the same resolution and color space. SDR image is assumed to use the sRGB
+     * transfer function.
+     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     *                                   Note: the SDR image must be the decoded version of the JPEG
+     *                                         input
+     * @param compressed_jpeg_image compressed 8-bit JPEG image
+     * @param hdr_tf transfer function of the HDR image
+     * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
+     *             represents the maximum available size of the desitination buffer, and it must be
+     *             set before calling this method. If the encoded JPEGR size exceeds
+     *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
+     * @return NO_ERROR if encoding succeeds, error code if error occurs.
+     */
+    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                         jr_uncompressed_ptr uncompressed_yuv_420_image,
+                         jr_compressed_ptr compressed_jpeg_image,
+                         ultrahdr_transfer_function hdr_tf,
+                         jr_compressed_ptr dest);
+
+    /*
+     * Encode API-3
+     * Compress JPEGR image from 10-bit HDR YUV and 8-bit SDR YUV.
+     *
+     * This method requires HAL Hardware JPEG encoder.
+     *
+     * Decode the compressed 8-bit JPEG image to YUV SDR, generate gain map from the HDR input
+     * and the decoded SDR result, append the gain map to the end of the compressed JPEG. Adds an
+     * ICC profile if one isn't present in the input JPEG image. HDR and SDR inputs must be the same
+     * resolution. JPEG image is assumed to use the sRGB transfer function.
+     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param compressed_jpeg_image compressed 8-bit JPEG image
+     * @param hdr_tf transfer function of the HDR image
+     * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
+     *             represents the maximum available size of the desitination buffer, and it must be
+     *             set before calling this method. If the encoded JPEGR size exceeds
+     *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
+     * @return NO_ERROR if encoding succeeds, error code if error occurs.
+     */
+    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                         jr_compressed_ptr compressed_jpeg_image,
+                         ultrahdr_transfer_function hdr_tf,
+                         jr_compressed_ptr dest);
+
+    /*
+     * Encode API-4
+     * Assemble JPEGR image from SDR JPEG and gainmap JPEG.
+     *
+     * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format. Adds an ICC
+     * profile if one isn't present in the input JPEG image.
+     * @param compressed_jpeg_image compressed 8-bit JPEG image
+     * @param compressed_gainmap compressed 8-bit JPEG single channel image
+     * @param metadata metadata to be written in XMP of the primary jpeg
+     * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
+     *             represents the maximum available size of the desitination buffer, and it must be
+     *             set before calling this method. If the encoded JPEGR size exceeds
+     *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
+     * @return NO_ERROR if encoding succeeds, error code if error occurs.
+     */
+    status_t encodeJPEGR(jr_compressed_ptr compressed_jpeg_image,
+                         jr_compressed_ptr compressed_gainmap,
+                         ultrahdr_metadata_ptr metadata,
+                         jr_compressed_ptr dest);
+
+    /*
+     * Decode API
+     * Decompress JPEGR image.
+     *
+     * This method assumes that the JPEGR image contains an ICC profile with primaries that match
+     * those of a color gamut that this library is aware of; Bt.709, Display-P3, or Bt.2100. It also
+     * assumes the base image uses the sRGB transfer function.
+     *
+     * This method only supports single gain map metadata values for fields that allow multi-channel
+     * metadata values.
+     *
+     * @param compressed_jpegr_image compressed JPEGR image.
+     * @param dest destination of the uncompressed JPEGR image.
+     * @param max_display_boost (optional) the maximum available boost supported by a display,
+     *                          the value must be greater than or equal to 1.0.
+     * @param exif destination of the decoded EXIF metadata. The default value is NULL where the
+                   decoder will do nothing about it. If configured not NULL the decoder will write
+                   EXIF data into this structure. The format is defined in {@code jpegr_exif_struct}
+     * @param output_format flag for setting output color format. Its value configures the output
+                            color format. The default value is {@code JPEGR_OUTPUT_HDR_LINEAR}.
+                            ----------------------------------------------------------------------
+                            |      output_format       |    decoded color format to be written   |
+                            ----------------------------------------------------------------------
+                            |     JPEGR_OUTPUT_SDR     |                RGBA_8888                |
+                            ----------------------------------------------------------------------
+                            | JPEGR_OUTPUT_HDR_LINEAR  |        (default)RGBA_F16 linear         |
+                            ----------------------------------------------------------------------
+                            |   JPEGR_OUTPUT_HDR_PQ    |             RGBA_1010102 PQ             |
+                            ----------------------------------------------------------------------
+                            |   JPEGR_OUTPUT_HDR_HLG   |            RGBA_1010102 HLG             |
+                            ----------------------------------------------------------------------
+     * @param gain_map destination of the decoded gain map. The default value is NULL where
+                           the decoder will do nothing about it. If configured not NULL the decoder
+                           will write the decoded gain_map data into this structure. The format
+                           is defined in {@code jpegr_uncompressed_struct}.
+     * @param metadata destination of the decoded metadata. The default value is NULL where the
+                       decoder will do nothing about it. If configured not NULL the decoder will
+                       write metadata into this structure. the format of metadata is defined in
+                       {@code ultrahdr_metadata_struct}.
+     * @return NO_ERROR if decoding succeeds, error code if error occurs.
+     */
+    status_t decodeJPEGR(jr_compressed_ptr compressed_jpegr_image,
+                         jr_uncompressed_ptr dest,
+                         float max_display_boost = FLT_MAX,
+                         jr_exif_ptr exif = nullptr,
+                         ultrahdr_output_format output_format = ULTRAHDR_OUTPUT_HDR_LINEAR,
+                         jr_uncompressed_ptr gain_map = nullptr,
+                         ultrahdr_metadata_ptr metadata = nullptr);
+
+    /*
+    * Gets Info from JPEGR file without decoding it.
+    *
+    * This method only supports single gain map metadata values for fields that allow multi-channel
+    * metadata values.
+    *
+    * The output is filled jpegr_info structure
+    * @param compressed_jpegr_image compressed JPEGR image
+    * @param jpegr_info pointer to output JPEGR info. Members of jpegr_info
+    *         are owned by the caller
+    * @return NO_ERROR if JPEGR parsing succeeds, error code otherwise
+    */
+    status_t getJPEGRInfo(jr_compressed_ptr compressed_jpegr_image,
+                          jr_info_ptr jpegr_info);
+protected:
+    /*
+     * This method is called in the encoding pipeline. It will take the uncompressed 8-bit and
+     * 10-bit yuv images as input, and calculate the uncompressed gain map. The input images
+     * must be the same resolution. The SDR input is assumed to use the sRGB transfer function.
+     *
+     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param hdr_tf transfer function of the HDR image
+     * @param dest gain map; caller responsible for memory of data
+     * @param metadata max_content_boost is filled in
+     * @param sdr_is_601 if true, then use BT.601 decoding of YUV regardless of SDR image gamut
+     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+     */
+    status_t generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
+                             jr_uncompressed_ptr uncompressed_p010_image,
+                             ultrahdr_transfer_function hdr_tf,
+                             ultrahdr_metadata_ptr metadata,
+                             jr_uncompressed_ptr dest,
+                             bool sdr_is_601 = false);
+
+    /*
+     * This method is called in the decoding pipeline. It will take the uncompressed (decoded)
+     * 8-bit yuv image, the uncompressed (decoded) gain map, and extracted JPEG/R metadata as
+     * input, and calculate the 10-bit recovered image. The recovered output image is the same
+     * color gamut as the SDR image, with HLG transfer function, and is in RGBA1010102 data format.
+     * The SDR image is assumed to use the sRGB transfer function. The SDR image is also assumed to
+     * be a decoded JPEG for the purpose of YUV interpration.
+     *
+     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param uncompressed_gain_map uncompressed gain map
+     * @param metadata JPEG/R metadata extracted from XMP.
+     * @param output_format flag for setting output color format. if set to
+     *                      {@code JPEGR_OUTPUT_SDR}, decoder will only decode the primary image
+     *                      which is SDR. Default value is JPEGR_OUTPUT_HDR_LINEAR.
+     * @param max_display_boost the maximum available boost supported by a display
+     * @param dest reconstructed HDR image
+     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+     */
+    status_t applyGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
+                          jr_uncompressed_ptr uncompressed_gain_map,
+                          ultrahdr_metadata_ptr metadata,
+                          ultrahdr_output_format output_format,
+                          float max_display_boost,
+                          jr_uncompressed_ptr dest);
+
+private:
+    /*
+     * This method is called in the encoding pipeline. It will encode the gain map.
+     *
+     * @param uncompressed_gain_map uncompressed gain map
+     * @param resource to compress gain map
+     * @return NO_ERROR if encoding succeeds, error code if error occurs.
+     */
+    status_t compressGainMap(jr_uncompressed_ptr uncompressed_gain_map,
+                             JpegEncoderHelper* jpeg_encoder);
+
+    /*
+     * This methoud is called to separate primary image and gain map image from JPEGR
+     *
+     * @param compressed_jpegr_image compressed JPEGR image
+     * @param primary_image destination of primary image
+     * @param gain_map destination of compressed gain map
+     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+    */
+    status_t extractPrimaryImageAndGainMap(jr_compressed_ptr compressed_jpegr_image,
+                                           jr_compressed_ptr primary_image,
+                                           jr_compressed_ptr gain_map);
+    /*
+     * This method is called in the decoding pipeline. It will read XMP metadata to find the start
+     * position of the compressed gain map, and will extract the compressed gain map.
+     *
+     * @param compressed_jpegr_image compressed JPEGR image
+     * @param dest destination of compressed gain map
+     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+     */
+    status_t extractGainMap(jr_compressed_ptr compressed_jpegr_image,
+                            jr_compressed_ptr dest);
+
+    /*
+     * This method is called in the encoding pipeline. It will take the standard 8-bit JPEG image,
+     * the compressed gain map and optionally the exif package as inputs, and generate the XMP
+     * metadata, and finally append everything in the order of:
+     *     SOI, APP2(EXIF) (if EXIF is from outside), APP2(XMP), primary image, gain map
+     * Note that EXIF package is only available for encoding API-0 and API-1. For encoding API-2 and
+     * API-3 this parameter is null, but the primary image in JPEG/R may still have EXIF as long as
+     * the input JPEG has EXIF.
+     *
+     * @param compressed_jpeg_image compressed 8-bit JPEG image
+     * @param compress_gain_map compressed recover map
+     * @param (nullable) exif EXIF package
+     * @param (nullable) icc ICC package
+     * @param icc_size length in bytes of ICC package
+     * @param metadata JPEG/R metadata to encode in XMP of the jpeg
+     * @param dest compressed JPEGR image
+     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+     */
+    status_t appendGainMap(jr_compressed_ptr compressed_jpeg_image,
+                           jr_compressed_ptr compressed_gain_map,
+                           jr_exif_ptr exif,
+                           void* icc, size_t icc_size,
+                           ultrahdr_metadata_ptr metadata,
+                           jr_compressed_ptr dest);
+
+    /*
+     * This method will tone map a HDR image to an SDR image.
+     *
+     * @param src (input) uncompressed P010 image
+     * @param dest (output) tone mapping result as a YUV_420 image
+     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+     */
+    status_t toneMap(jr_uncompressed_ptr src,
+                     jr_uncompressed_ptr dest);
+
+    /*
+     * This method will convert a YUV420 image from one YUV encoding to another in-place (eg.
+     * Bt.709 to Bt.601 YUV encoding).
+     *
+     * src_encoding and dest_encoding indicate the encoding via the YUV conversion defined for that
+     * gamut. P3 indicates Rec.601, since this is how DataSpace encodes Display-P3 YUV data.
+     *
+     * @param image the YUV420 image to convert
+     * @param src_encoding input YUV encoding
+     * @param dest_encoding output YUV encoding
+     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+     */
+    status_t convertYuv(jr_uncompressed_ptr image,
+                        ultrahdr_color_gamut src_encoding,
+                        ultrahdr_color_gamut dest_encoding);
+
+    /*
+     * This method will check the validity of the input arguments.
+     *
+     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param hdr_tf transfer function of the HDR image
+     * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
+     *             represents the maximum available size of the desitination buffer, and it must be
+     *             set before calling this method. If the encoded JPEGR size exceeds
+     *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
+     * @return NO_ERROR if the input args are valid, error code is not valid.
+     */
+     status_t areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
+                                     jr_uncompressed_ptr uncompressed_yuv_420_image,
+                                     ultrahdr_transfer_function hdr_tf,
+                                     jr_compressed_ptr dest);
+
+    /*
+     * This method will check the validity of the input arguments.
+     *
+     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param hdr_tf transfer function of the HDR image
+     * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
+     *             represents the maximum available size of the desitination buffer, and it must be
+     *             set before calling this method. If the encoded JPEGR size exceeds
+     *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
+     * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is
+     *                the highest quality
+     * @return NO_ERROR if the input args are valid, error code is not valid.
+     */
+     status_t areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
+                                     jr_uncompressed_ptr uncompressed_yuv_420_image,
+                                     ultrahdr_transfer_function hdr_tf,
+                                     jr_compressed_ptr dest,
+                                     int quality);
+};
+
+} // namespace android::ultrahdr
+
+#endif // ANDROID_ULTRAHDR_JPEGR_H
diff --git a/libs/ultrahdr/include/ultrahdr/jpegrerrorcode.h b/libs/ultrahdr/include/ultrahdr/jpegrerrorcode.h
new file mode 100644
index 0000000..0641232
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/jpegrerrorcode.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_JPEGRERRORCODE_H
+#define ANDROID_ULTRAHDR_JPEGRERRORCODE_H
+
+#include <utils/Errors.h>
+
+namespace android::ultrahdr {
+
+enum {
+    // status_t map for errors in the media framework
+    // OK or NO_ERROR or 0 represents no error.
+
+    // See system/core/include/utils/Errors.h
+    // System standard errors from -1 through (possibly) -133
+    //
+    // Errors with special meanings and side effects.
+    // INVALID_OPERATION:  Operation attempted in an illegal state (will try to signal to app).
+    // DEAD_OBJECT:        Signal from CodecBase to MediaCodec that MediaServer has died.
+    // NAME_NOT_FOUND:     Signal from CodecBase to MediaCodec that the component was not found.
+
+    // JPEGR errors
+    JPEGR_IO_ERROR_BASE                 = -10000,
+    ERROR_JPEGR_INVALID_INPUT_TYPE      = JPEGR_IO_ERROR_BASE,
+    ERROR_JPEGR_INVALID_OUTPUT_TYPE     = JPEGR_IO_ERROR_BASE - 1,
+    ERROR_JPEGR_INVALID_NULL_PTR        = JPEGR_IO_ERROR_BASE - 2,
+    ERROR_JPEGR_RESOLUTION_MISMATCH     = JPEGR_IO_ERROR_BASE - 3,
+    ERROR_JPEGR_BUFFER_TOO_SMALL        = JPEGR_IO_ERROR_BASE - 4,
+    ERROR_JPEGR_INVALID_COLORGAMUT      = JPEGR_IO_ERROR_BASE - 5,
+    ERROR_JPEGR_INVALID_TRANS_FUNC      = JPEGR_IO_ERROR_BASE - 6,
+    ERROR_JPEGR_INVALID_METADATA        = JPEGR_IO_ERROR_BASE - 7,
+    ERROR_JPEGR_UNSUPPORTED_METADATA    = JPEGR_IO_ERROR_BASE - 8,
+
+    JPEGR_RUNTIME_ERROR_BASE            = -20000,
+    ERROR_JPEGR_ENCODE_ERROR            = JPEGR_RUNTIME_ERROR_BASE - 1,
+    ERROR_JPEGR_DECODE_ERROR            = JPEGR_RUNTIME_ERROR_BASE - 2,
+    ERROR_JPEGR_CALCULATION_ERROR       = JPEGR_RUNTIME_ERROR_BASE - 3,
+    ERROR_JPEGR_METADATA_ERROR          = JPEGR_RUNTIME_ERROR_BASE - 4,
+    ERROR_JPEGR_TONEMAP_ERROR           = JPEGR_RUNTIME_ERROR_BASE - 5,
+
+    ERROR_JPEGR_UNSUPPORTED_FEATURE     = -20000,
+};
+
+}  // namespace android::ultrahdr
+
+#endif // ANDROID_ULTRAHDR_JPEGRERRORCODE_H
diff --git a/libs/ultrahdr/include/ultrahdr/jpegrutils.h b/libs/ultrahdr/include/ultrahdr/jpegrutils.h
new file mode 100644
index 0000000..4ab664e
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/jpegrutils.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_JPEGRUTILS_H
+#define ANDROID_ULTRAHDR_JPEGRUTILS_H
+
+#include <ultrahdr/jpegr.h>
+#include <utils/RefBase.h>
+
+#include <sstream>
+#include <stdint.h>
+#include <string>
+#include <cstdio>
+
+namespace android::ultrahdr {
+
+static constexpr uint32_t EndianSwap32(uint32_t value) {
+    return ((value & 0xFF) << 24) |
+           ((value & 0xFF00) << 8) |
+           ((value & 0xFF0000) >> 8) |
+           (value >> 24);
+}
+static inline uint16_t EndianSwap16(uint16_t value) {
+    return static_cast<uint16_t>((value >> 8) | ((value & 0xFF) << 8));
+}
+
+#if USE_BIG_ENDIAN
+    #define Endian_SwapBE32(n) EndianSwap32(n)
+    #define Endian_SwapBE16(n) EndianSwap16(n)
+#else
+    #define Endian_SwapBE32(n) (n)
+    #define Endian_SwapBE16(n) (n)
+#endif
+
+struct ultrahdr_metadata_struct;
+/*
+ * Mutable data structure. Holds information for metadata.
+ */
+class DataStruct : public RefBase {
+private:
+    void* data;
+    int writePos;
+    int length;
+    ~DataStruct();
+
+public:
+    DataStruct(int s);
+    void* getData();
+    int getLength();
+    int getBytesWritten();
+    bool write8(uint8_t value);
+    bool write16(uint16_t value);
+    bool write32(uint32_t value);
+    bool write(const void* src, int size);
+};
+
+/*
+ * Helper function used for writing data to destination.
+ *
+ * @param destination destination of the data to be written.
+ * @param source source of data being written.
+ * @param length length of the data to be written.
+ * @param position cursor in desitination where the data is to be written.
+ * @return status of succeed or error code.
+ */
+status_t Write(jr_compressed_ptr destination, const void* source, size_t length, int &position);
+
+
+/*
+ * Parses XMP packet and fills metadata with data from XMP
+ *
+ * @param xmp_data pointer to XMP packet
+ * @param xmp_size size of XMP packet
+ * @param metadata place to store HDR metadata values
+ * @return true if metadata is successfully retrieved, false otherwise
+*/
+bool getMetadataFromXMP(uint8_t* xmp_data, size_t xmp_size, ultrahdr_metadata_struct* metadata);
+
+/*
+ * This method generates XMP metadata for the primary image.
+ *
+ * below is an example of the XMP metadata that this function generates where
+ * secondary_image_length = 1000
+ *
+ * <x:xmpmeta
+ *   xmlns:x="adobe:ns:meta/"
+ *   x:xmptk="Adobe XMP Core 5.1.2">
+ *   <rdf:RDF
+ *     xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
+ *     <rdf:Description
+ *       xmlns:Container="http://ns.google.com/photos/1.0/container/"
+ *       xmlns:Item="http://ns.google.com/photos/1.0/container/item/"
+ *       xmlns:hdrgm="http://ns.adobe.com/hdr-gain-map/1.0/"
+ *       hdrgm:Version="1">
+ *       <Container:Directory>
+ *         <rdf:Seq>
+ *           <rdf:li
+ *             rdf:parseType="Resource">
+ *             <Container:Item
+ *               Item:Semantic="Primary"
+ *               Item:Mime="image/jpeg"/>
+ *           </rdf:li>
+ *           <rdf:li
+ *             rdf:parseType="Resource">
+ *             <Container:Item
+ *               Item:Semantic="GainMap"
+ *               Item:Mime="image/jpeg"
+ *               Item:Length="1000"/>
+ *           </rdf:li>
+ *         </rdf:Seq>
+ *       </Container:Directory>
+ *     </rdf:Description>
+ *   </rdf:RDF>
+ * </x:xmpmeta>
+ *
+ * @param secondary_image_length length of secondary image
+ * @return XMP metadata in type of string
+ */
+std::string generateXmpForPrimaryImage(int secondary_image_length,
+                                       ultrahdr_metadata_struct& metadata);
+
+/*
+ * This method generates XMP metadata for the recovery map image.
+ *
+ * below is an example of the XMP metadata that this function generates where
+ * max_content_boost = 8.0
+ * min_content_boost = 0.5
+ *
+ * <x:xmpmeta
+ *   xmlns:x="adobe:ns:meta/"
+ *   x:xmptk="Adobe XMP Core 5.1.2">
+ *   <rdf:RDF
+ *     xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
+ *     <rdf:Description
+ *       xmlns:hdrgm="http://ns.adobe.com/hdr-gain-map/1.0/"
+ *       hdrgm:Version="1"
+ *       hdrgm:GainMapMin="-1"
+ *       hdrgm:GainMapMax="3"
+ *       hdrgm:Gamma="1"
+ *       hdrgm:OffsetSDR="0"
+ *       hdrgm:OffsetHDR="0"
+ *       hdrgm:HDRCapacityMin="0"
+ *       hdrgm:HDRCapacityMax="3"
+ *       hdrgm:BaseRenditionIsHDR="False"/>
+ *   </rdf:RDF>
+ * </x:xmpmeta>
+ *
+ * @param metadata JPEG/R metadata to encode as XMP
+ * @return XMP metadata in type of string
+ */
+ std::string generateXmpForSecondaryImage(ultrahdr_metadata_struct& metadata);
+}  // namespace android::ultrahdr
+
+#endif //ANDROID_ULTRAHDR_JPEGRUTILS_H
diff --git a/libs/ultrahdr/include/ultrahdr/multipictureformat.h b/libs/ultrahdr/include/ultrahdr/multipictureformat.h
new file mode 100644
index 0000000..c5bd09d
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/multipictureformat.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_MULTIPICTUREFORMAT_H
+#define ANDROID_ULTRAHDR_MULTIPICTUREFORMAT_H
+
+#include <ultrahdr/jpegrutils.h>
+
+#ifdef USE_BIG_ENDIAN
+#undef USE_BIG_ENDIAN
+#define USE_BIG_ENDIAN true
+#endif
+
+namespace android::ultrahdr {
+
+constexpr size_t kNumPictures = 2;
+constexpr size_t kMpEndianSize = 4;
+constexpr uint16_t kTagSerializedCount = 3;
+constexpr uint32_t kTagSize = 12;
+
+constexpr uint16_t kTypeLong = 0x4;
+constexpr uint16_t kTypeUndefined = 0x7;
+
+static constexpr uint8_t kMpfSig[] = {'M', 'P', 'F', '\0'};
+constexpr uint8_t kMpLittleEndian[kMpEndianSize] = {0x49, 0x49, 0x2A, 0x00};
+constexpr uint8_t kMpBigEndian[kMpEndianSize] = {0x4D, 0x4D, 0x00, 0x2A};
+
+constexpr uint16_t kVersionTag = 0xB000;
+constexpr uint16_t kVersionType = kTypeUndefined;
+constexpr uint32_t kVersionCount = 4;
+constexpr size_t kVersionSize = 4;
+constexpr uint8_t kVersionExpected[kVersionSize] = {'0', '1', '0', '0'};
+
+constexpr uint16_t kNumberOfImagesTag = 0xB001;
+constexpr uint16_t kNumberOfImagesType = kTypeLong;
+constexpr uint32_t kNumberOfImagesCount = 1;
+
+constexpr uint16_t kMPEntryTag = 0xB002;
+constexpr uint16_t kMPEntryType = kTypeUndefined;
+constexpr uint32_t kMPEntrySize = 16;
+
+constexpr uint32_t kMPEntryAttributeFormatJpeg = 0x0000000;
+constexpr uint32_t kMPEntryAttributeTypePrimary = 0x030000;
+
+size_t calculateMpfSize();
+sp<DataStruct> generateMpf(int primary_image_size, int primary_image_offset,
+                           int secondary_image_size, int secondary_image_offset);
+
+}  // namespace android::ultrahdr
+
+#endif //ANDROID_ULTRAHDR_MULTIPICTUREFORMAT_H
diff --git a/libs/ultrahdr/include/ultrahdr/ultrahdr.h b/libs/ultrahdr/include/ultrahdr/ultrahdr.h
new file mode 100644
index 0000000..17cc971
--- /dev/null
+++ b/libs/ultrahdr/include/ultrahdr/ultrahdr.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ULTRAHDR_ULTRAHDR_H
+#define ANDROID_ULTRAHDR_ULTRAHDR_H
+
+namespace android::ultrahdr {
+// Color gamuts for image data
+typedef enum {
+  ULTRAHDR_COLORGAMUT_UNSPECIFIED = -1,
+  ULTRAHDR_COLORGAMUT_BT709,
+  ULTRAHDR_COLORGAMUT_P3,
+  ULTRAHDR_COLORGAMUT_BT2100,
+  ULTRAHDR_COLORGAMUT_MAX = ULTRAHDR_COLORGAMUT_BT2100,
+} ultrahdr_color_gamut;
+
+// Transfer functions for image data
+typedef enum {
+  ULTRAHDR_TF_UNSPECIFIED = -1,
+  ULTRAHDR_TF_LINEAR = 0,
+  ULTRAHDR_TF_HLG = 1,
+  ULTRAHDR_TF_PQ = 2,
+  ULTRAHDR_TF_SRGB = 3,
+  ULTRAHDR_TF_MAX = ULTRAHDR_TF_SRGB,
+} ultrahdr_transfer_function;
+
+// Target output formats for decoder
+typedef enum {
+  ULTRAHDR_OUTPUT_UNSPECIFIED = -1,
+  ULTRAHDR_OUTPUT_SDR,          // SDR in RGBA_8888 color format
+  ULTRAHDR_OUTPUT_HDR_LINEAR,   // HDR in F16 color format (linear)
+  ULTRAHDR_OUTPUT_HDR_PQ,       // HDR in RGBA_1010102 color format (PQ transfer function)
+  ULTRAHDR_OUTPUT_HDR_HLG,      // HDR in RGBA_1010102 color format (HLG transfer function)
+  ULTRAHDR_OUTPUT_MAX = ULTRAHDR_OUTPUT_HDR_HLG,
+} ultrahdr_output_format;
+
+/*
+ * Holds information for gain map related metadata.
+ *
+ * Not: all values stored in linear. This differs from the metadata encoding in XMP, where
+ * maxContentBoost (aka gainMapMax), minContentBoost (aka gainMapMin), hdrCapacityMin, and
+ * hdrCapacityMax are stored in log2 space.
+ */
+struct ultrahdr_metadata_struct {
+  // Ultra HDR format version
+  std::string version;
+  // Max Content Boost for the map
+  float maxContentBoost;
+  // Min Content Boost for the map
+  float minContentBoost;
+  // Gamma of the map data
+  float gamma;
+  // Offset for SDR data in map calculations
+  float offsetSdr;
+  // Offset for HDR data in map calculations
+  float offsetHdr;
+  // HDR capacity to apply the map at all
+  float hdrCapacityMin;
+  // HDR capacity to apply the map completely
+  float hdrCapacityMax;
+};
+typedef struct ultrahdr_metadata_struct* ultrahdr_metadata_ptr;
+
+}  // namespace android::ultrahdr
+
+#endif //ANDROID_ULTRAHDR_ULTRAHDR_H
diff --git a/libs/ultrahdr/jpegdecoderhelper.cpp b/libs/ultrahdr/jpegdecoderhelper.cpp
new file mode 100644
index 0000000..fef5444
--- /dev/null
+++ b/libs/ultrahdr/jpegdecoderhelper.cpp
@@ -0,0 +1,499 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ultrahdr/jpegdecoderhelper.h>
+
+#include <utils/Log.h>
+
+#include <errno.h>
+#include <setjmp.h>
+#include <string>
+
+using namespace std;
+
+namespace android::ultrahdr {
+
+#define ALIGNM(x, m)  ((((x) + ((m) - 1)) / (m)) * (m))
+
+const uint32_t kAPP0Marker = JPEG_APP0;      // JFIF
+const uint32_t kAPP1Marker = JPEG_APP0 + 1;  // EXIF, XMP
+const uint32_t kAPP2Marker = JPEG_APP0 + 2;  // ICC
+
+const std::string kXmpNameSpace = "http://ns.adobe.com/xap/1.0/";
+const std::string kExifIdCode = "Exif";
+constexpr uint32_t kICCMarkerHeaderSize = 14;
+constexpr uint8_t kICCSig[] = {
+        'I', 'C', 'C', '_', 'P', 'R', 'O', 'F', 'I', 'L', 'E', '\0',
+};
+
+struct jpegr_source_mgr : jpeg_source_mgr {
+    jpegr_source_mgr(const uint8_t* ptr, int len);
+    ~jpegr_source_mgr();
+
+    const uint8_t* mBufferPtr;
+    size_t mBufferLength;
+};
+
+struct jpegrerror_mgr {
+    struct jpeg_error_mgr pub;
+    jmp_buf setjmp_buffer;
+};
+
+static void jpegr_init_source(j_decompress_ptr cinfo) {
+    jpegr_source_mgr* src = static_cast<jpegr_source_mgr*>(cinfo->src);
+    src->next_input_byte = static_cast<const JOCTET*>(src->mBufferPtr);
+    src->bytes_in_buffer = src->mBufferLength;
+}
+
+static boolean jpegr_fill_input_buffer(j_decompress_ptr /* cinfo */) {
+    ALOGE("%s : should not get here", __func__);
+    return FALSE;
+}
+
+static void jpegr_skip_input_data(j_decompress_ptr cinfo, long num_bytes) {
+    jpegr_source_mgr* src = static_cast<jpegr_source_mgr*>(cinfo->src);
+
+    if (num_bytes > static_cast<long>(src->bytes_in_buffer)) {
+        ALOGE("jpegr_skip_input_data - num_bytes > (long)src->bytes_in_buffer");
+    } else {
+        src->next_input_byte += num_bytes;
+        src->bytes_in_buffer -= num_bytes;
+    }
+}
+
+static void jpegr_term_source(j_decompress_ptr /*cinfo*/) {}
+
+jpegr_source_mgr::jpegr_source_mgr(const uint8_t* ptr, int len) :
+        mBufferPtr(ptr), mBufferLength(len) {
+    init_source = jpegr_init_source;
+    fill_input_buffer = jpegr_fill_input_buffer;
+    skip_input_data = jpegr_skip_input_data;
+    resync_to_restart = jpeg_resync_to_restart;
+    term_source = jpegr_term_source;
+}
+
+jpegr_source_mgr::~jpegr_source_mgr() {}
+
+static void jpegrerror_exit(j_common_ptr cinfo) {
+    jpegrerror_mgr* err = reinterpret_cast<jpegrerror_mgr*>(cinfo->err);
+    longjmp(err->setjmp_buffer, 1);
+}
+
+JpegDecoderHelper::JpegDecoderHelper() {
+}
+
+JpegDecoderHelper::~JpegDecoderHelper() {
+}
+
+bool JpegDecoderHelper::decompressImage(const void* image, int length, bool decodeToRGBA) {
+    if (image == nullptr || length <= 0) {
+        ALOGE("Image size can not be handled: %d", length);
+        return false;
+    }
+
+    mResultBuffer.clear();
+    mXMPBuffer.clear();
+    if (!decode(image, length, decodeToRGBA)) {
+        return false;
+    }
+
+    return true;
+}
+
+void* JpegDecoderHelper::getDecompressedImagePtr() {
+    return mResultBuffer.data();
+}
+
+size_t JpegDecoderHelper::getDecompressedImageSize() {
+    return mResultBuffer.size();
+}
+
+void* JpegDecoderHelper::getXMPPtr() {
+    return mXMPBuffer.data();
+}
+
+size_t JpegDecoderHelper::getXMPSize() {
+    return mXMPBuffer.size();
+}
+
+void* JpegDecoderHelper::getEXIFPtr() {
+    return mEXIFBuffer.data();
+}
+
+size_t JpegDecoderHelper::getEXIFSize() {
+    return mEXIFBuffer.size();
+}
+
+void* JpegDecoderHelper::getICCPtr() {
+    return mICCBuffer.data();
+}
+
+size_t JpegDecoderHelper::getICCSize() {
+    return mICCBuffer.size();
+}
+
+size_t JpegDecoderHelper::getDecompressedImageWidth() {
+    return mWidth;
+}
+
+size_t JpegDecoderHelper::getDecompressedImageHeight() {
+    return mHeight;
+}
+
+bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA) {
+    jpeg_decompress_struct cinfo;
+    jpegr_source_mgr mgr(static_cast<const uint8_t*>(image), length);
+    jpegrerror_mgr myerr;
+    bool status = true;
+
+    cinfo.err = jpeg_std_error(&myerr.pub);
+    myerr.pub.error_exit = jpegrerror_exit;
+
+    if (setjmp(myerr.setjmp_buffer)) {
+        jpeg_destroy_decompress(&cinfo);
+        return false;
+    }
+    jpeg_create_decompress(&cinfo);
+
+    jpeg_save_markers(&cinfo, kAPP0Marker, 0xFFFF);
+    jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF);
+    jpeg_save_markers(&cinfo, kAPP2Marker, 0xFFFF);
+
+    cinfo.src = &mgr;
+    jpeg_read_header(&cinfo, TRUE);
+
+    // Save XMP data, EXIF data, and ICC data.
+    // Here we only handle the first XMP / EXIF / ICC package.
+    // We assume that all packages are starting with two bytes marker (eg FF E1 for EXIF package),
+    // two bytes of package length which is stored in marker->original_length, and the real data
+    // which is stored in marker->data.
+    bool exifAppears = false;
+    bool xmpAppears = false;
+    bool iccAppears = false;
+    for (jpeg_marker_struct* marker = cinfo.marker_list;
+         marker && !(exifAppears && xmpAppears && iccAppears);
+         marker = marker->next) {
+
+        if (marker->marker != kAPP1Marker && marker->marker != kAPP2Marker) {
+            continue;
+        }
+        const unsigned int len = marker->data_length;
+        if (!xmpAppears &&
+            len > kXmpNameSpace.size() &&
+            !strncmp(reinterpret_cast<const char*>(marker->data),
+                     kXmpNameSpace.c_str(),
+                     kXmpNameSpace.size())) {
+            mXMPBuffer.resize(len+1, 0);
+            memcpy(static_cast<void*>(mXMPBuffer.data()), marker->data, len);
+            xmpAppears = true;
+        } else if (!exifAppears &&
+                   len > kExifIdCode.size() &&
+                   !strncmp(reinterpret_cast<const char*>(marker->data),
+                            kExifIdCode.c_str(),
+                            kExifIdCode.size())) {
+            mEXIFBuffer.resize(len, 0);
+            memcpy(static_cast<void*>(mEXIFBuffer.data()), marker->data, len);
+            exifAppears = true;
+        } else if (!iccAppears &&
+                   len > sizeof(kICCSig) &&
+                   !memcmp(marker->data, kICCSig, sizeof(kICCSig))) {
+            mICCBuffer.resize(len, 0);
+            memcpy(static_cast<void*>(mICCBuffer.data()), marker->data, len);
+            iccAppears = true;
+        }
+    }
+
+    if (cinfo.image_width > kMaxWidth || cinfo.image_height > kMaxHeight) {
+        // constraint on max width and max height is only due to alloc constraints
+        // tune these values basing on the target device
+        status = false;
+        goto CleanUp;
+    }
+
+    mWidth = cinfo.image_width;
+    mHeight = cinfo.image_height;
+
+    if (decodeToRGBA) {
+        if (cinfo.jpeg_color_space == JCS_GRAYSCALE) {
+            // We don't intend to support decoding grayscale to RGBA
+            status = false;
+            ALOGE("%s: decoding grayscale to RGBA is unsupported", __func__);
+            goto CleanUp;
+        }
+        // 4 bytes per pixel
+        mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 4);
+        cinfo.out_color_space = JCS_EXT_RGBA;
+    } else {
+        if (cinfo.jpeg_color_space == JCS_YCbCr) {
+            if (cinfo.comp_info[0].h_samp_factor != 2 ||
+                cinfo.comp_info[1].h_samp_factor != 1 ||
+                cinfo.comp_info[2].h_samp_factor != 1 ||
+                cinfo.comp_info[0].v_samp_factor != 2 ||
+                cinfo.comp_info[1].v_samp_factor != 1 ||
+                cinfo.comp_info[2].v_samp_factor != 1) {
+                status = false;
+                ALOGE("%s: decoding to YUV only supports 4:2:0 subsampling", __func__);
+                goto CleanUp;
+            }
+            mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 3 / 2, 0);
+        } else if (cinfo.jpeg_color_space == JCS_GRAYSCALE) {
+            mResultBuffer.resize(cinfo.image_width * cinfo.image_height, 0);
+        }
+        cinfo.out_color_space = cinfo.jpeg_color_space;
+        cinfo.raw_data_out = TRUE;
+    }
+
+    cinfo.dct_method = JDCT_IFAST;
+
+    jpeg_start_decompress(&cinfo);
+
+    if (!decompress(&cinfo, static_cast<const uint8_t*>(mResultBuffer.data()),
+            cinfo.jpeg_color_space == JCS_GRAYSCALE)) {
+        status = false;
+        goto CleanUp;
+    }
+
+CleanUp:
+    jpeg_finish_decompress(&cinfo);
+    jpeg_destroy_decompress(&cinfo);
+
+    return status;
+}
+
+bool JpegDecoderHelper::decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest,
+        bool isSingleChannel) {
+    if (isSingleChannel) {
+        return decompressSingleChannel(cinfo, dest);
+    }
+    if (cinfo->out_color_space == JCS_EXT_RGBA)
+        return decompressRGBA(cinfo, dest);
+    else
+        return decompressYUV(cinfo, dest);
+}
+
+bool JpegDecoderHelper::getCompressedImageParameters(const void* image, int length,
+                              size_t *pWidth, size_t *pHeight,
+                              std::vector<uint8_t> *iccData , std::vector<uint8_t> *exifData) {
+    jpeg_decompress_struct cinfo;
+    jpegr_source_mgr mgr(static_cast<const uint8_t*>(image), length);
+    jpegrerror_mgr myerr;
+    cinfo.err = jpeg_std_error(&myerr.pub);
+    myerr.pub.error_exit = jpegrerror_exit;
+
+    if (setjmp(myerr.setjmp_buffer)) {
+        jpeg_destroy_decompress(&cinfo);
+        return false;
+    }
+    jpeg_create_decompress(&cinfo);
+
+    jpeg_save_markers(&cinfo, kAPP1Marker, 0xFFFF);
+    jpeg_save_markers(&cinfo, kAPP2Marker, 0xFFFF);
+
+    cinfo.src = &mgr;
+    if (jpeg_read_header(&cinfo, TRUE) != JPEG_HEADER_OK) {
+        jpeg_destroy_decompress(&cinfo);
+        return false;
+    }
+
+    if (pWidth != nullptr) {
+        *pWidth = cinfo.image_width;
+    }
+    if (pHeight != nullptr) {
+        *pHeight = cinfo.image_height;
+    }
+
+    if (iccData != nullptr) {
+        for (jpeg_marker_struct* marker = cinfo.marker_list; marker;
+             marker = marker->next) {
+            if (marker->marker != kAPP2Marker) {
+                continue;
+            }
+            if (marker->data_length <= kICCMarkerHeaderSize ||
+                memcmp(marker->data, kICCSig, sizeof(kICCSig)) != 0) {
+                continue;
+            }
+
+            iccData->insert(iccData->end(), marker->data, marker->data + marker->data_length);
+        }
+    }
+
+    if (exifData != nullptr) {
+        bool exifAppears = false;
+        for (jpeg_marker_struct* marker = cinfo.marker_list; marker && !exifAppears;
+             marker = marker->next) {
+            if (marker->marker != kAPP1Marker) {
+                continue;
+            }
+
+            const unsigned int len = marker->data_length;
+            if (len >= kExifIdCode.size() &&
+                !strncmp(reinterpret_cast<const char*>(marker->data), kExifIdCode.c_str(),
+                         kExifIdCode.size())) {
+                exifData->resize(len, 0);
+                memcpy(static_cast<void*>(exifData->data()), marker->data, len);
+                exifAppears = true;
+            }
+        }
+    }
+
+    jpeg_destroy_decompress(&cinfo);
+    return true;
+}
+
+bool JpegDecoderHelper::decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
+    JSAMPLE* decodeDst = (JSAMPLE*) dest;
+    uint32_t lines = 0;
+    // TODO: use batches for more effectiveness
+    while (lines < cinfo->image_height) {
+        uint32_t ret = jpeg_read_scanlines(cinfo, &decodeDst, 1);
+        if (ret == 0) {
+            break;
+        }
+        decodeDst += cinfo->image_width * 4;
+        lines++;
+    }
+    return lines == cinfo->image_height;
+}
+
+bool JpegDecoderHelper::decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
+    JSAMPROW y[kCompressBatchSize];
+    JSAMPROW cb[kCompressBatchSize / 2];
+    JSAMPROW cr[kCompressBatchSize / 2];
+    JSAMPARRAY planes[3] {y, cb, cr};
+
+    size_t y_plane_size = cinfo->image_width * cinfo->image_height;
+    size_t uv_plane_size = y_plane_size / 4;
+    uint8_t* y_plane = const_cast<uint8_t*>(dest);
+    uint8_t* u_plane = const_cast<uint8_t*>(dest + y_plane_size);
+    uint8_t* v_plane = const_cast<uint8_t*>(dest + y_plane_size + uv_plane_size);
+    std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
+    memset(empty.get(), 0, cinfo->image_width);
+
+    const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
+    bool is_width_aligned = (aligned_width == cinfo->image_width);
+    std::unique_ptr<uint8_t[]> buffer_intrm = nullptr;
+    uint8_t* y_plane_intrm = nullptr;
+    uint8_t* u_plane_intrm = nullptr;
+    uint8_t* v_plane_intrm = nullptr;
+    JSAMPROW y_intrm[kCompressBatchSize];
+    JSAMPROW cb_intrm[kCompressBatchSize / 2];
+    JSAMPROW cr_intrm[kCompressBatchSize / 2];
+    JSAMPARRAY planes_intrm[3] {y_intrm, cb_intrm, cr_intrm};
+    if (!is_width_aligned) {
+        size_t mcu_row_size = aligned_width * kCompressBatchSize * 3 / 2;
+        buffer_intrm = std::make_unique<uint8_t[]>(mcu_row_size);
+        y_plane_intrm = buffer_intrm.get();
+        u_plane_intrm = y_plane_intrm + (aligned_width * kCompressBatchSize);
+        v_plane_intrm = u_plane_intrm + (aligned_width * kCompressBatchSize) / 4;
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            y_intrm[i] = y_plane_intrm + i * aligned_width;
+        }
+        for (int i = 0; i < kCompressBatchSize / 2; ++i) {
+            int offset_intrm = i * (aligned_width / 2);
+            cb_intrm[i] = u_plane_intrm + offset_intrm;
+            cr_intrm[i] = v_plane_intrm + offset_intrm;
+        }
+    }
+
+    while (cinfo->output_scanline < cinfo->image_height) {
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            size_t scanline = cinfo->output_scanline + i;
+            if (scanline < cinfo->image_height) {
+                y[i] = y_plane + scanline * cinfo->image_width;
+            } else {
+                y[i] = empty.get();
+            }
+        }
+        // cb, cr only have half scanlines
+        for (int i = 0; i < kCompressBatchSize / 2; ++i) {
+            size_t scanline = cinfo->output_scanline / 2 + i;
+            if (scanline < cinfo->image_height / 2) {
+                int offset = scanline * (cinfo->image_width / 2);
+                cb[i] = u_plane + offset;
+                cr[i] = v_plane + offset;
+            } else {
+                cb[i] = cr[i] = empty.get();
+            }
+        }
+
+        int processed = jpeg_read_raw_data(cinfo, is_width_aligned ? planes : planes_intrm,
+                                           kCompressBatchSize);
+        if (processed != kCompressBatchSize) {
+            ALOGE("Number of processed lines does not equal input lines.");
+            return false;
+        }
+        if (!is_width_aligned) {
+            for (int i = 0; i < kCompressBatchSize; ++i) {
+                memcpy(y[i], y_intrm[i], cinfo->image_width);
+            }
+            for (int i = 0; i < kCompressBatchSize / 2; ++i) {
+                memcpy(cb[i], cb_intrm[i], cinfo->image_width / 2);
+                memcpy(cr[i], cr_intrm[i], cinfo->image_width / 2);
+            }
+        }
+    }
+    return true;
+}
+
+bool JpegDecoderHelper::decompressSingleChannel(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
+    JSAMPROW y[kCompressBatchSize];
+    JSAMPARRAY planes[1] {y};
+
+    uint8_t* y_plane = const_cast<uint8_t*>(dest);
+    std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
+    memset(empty.get(), 0, cinfo->image_width);
+
+    int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
+    bool is_width_aligned = (aligned_width == cinfo->image_width);
+    std::unique_ptr<uint8_t[]> buffer_intrm = nullptr;
+    uint8_t* y_plane_intrm = nullptr;
+    JSAMPROW y_intrm[kCompressBatchSize];
+    JSAMPARRAY planes_intrm[1] {y_intrm};
+    if (!is_width_aligned) {
+        size_t mcu_row_size = aligned_width * kCompressBatchSize;
+        buffer_intrm = std::make_unique<uint8_t[]>(mcu_row_size);
+        y_plane_intrm = buffer_intrm.get();
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            y_intrm[i] = y_plane_intrm + i * aligned_width;
+        }
+    }
+
+    while (cinfo->output_scanline < cinfo->image_height) {
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            size_t scanline = cinfo->output_scanline + i;
+            if (scanline < cinfo->image_height) {
+                y[i] = y_plane + scanline * cinfo->image_width;
+            } else {
+                y[i] = empty.get();
+            }
+        }
+
+        int processed = jpeg_read_raw_data(cinfo, is_width_aligned ? planes : planes_intrm,
+                                           kCompressBatchSize);
+        if (processed != kCompressBatchSize / 2) {
+            ALOGE("Number of processed lines does not equal input lines.");
+            return false;
+        }
+        if (!is_width_aligned) {
+            for (int i = 0; i < kCompressBatchSize; ++i) {
+                memcpy(y[i], y_intrm[i], cinfo->image_width);
+            }
+        }
+    }
+    return true;
+}
+
+} // namespace ultrahdr
diff --git a/libs/ultrahdr/jpegencoderhelper.cpp b/libs/ultrahdr/jpegencoderhelper.cpp
new file mode 100644
index 0000000..a03547b
--- /dev/null
+++ b/libs/ultrahdr/jpegencoderhelper.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ultrahdr/jpegencoderhelper.h>
+
+#include <utils/Log.h>
+
+#include <errno.h>
+
+namespace android::ultrahdr {
+
+#define ALIGNM(x, m)  ((((x) + ((m) - 1)) / (m)) * (m))
+
+// The destination manager that can access |mResultBuffer| in JpegEncoderHelper.
+struct destination_mgr {
+public:
+    struct jpeg_destination_mgr mgr;
+    JpegEncoderHelper* encoder;
+};
+
+JpegEncoderHelper::JpegEncoderHelper() {
+}
+
+JpegEncoderHelper::~JpegEncoderHelper() {
+}
+
+bool JpegEncoderHelper::compressImage(const void* image, int width, int height, int quality,
+                                   const void* iccBuffer, unsigned int iccSize,
+                                   bool isSingleChannel) {
+    mResultBuffer.clear();
+    if (!encode(image, width, height, quality, iccBuffer, iccSize, isSingleChannel)) {
+        return false;
+    }
+    ALOGI("Compressed JPEG: %d[%dx%d] -> %zu bytes",
+        (width * height * 12) / 8, width, height, mResultBuffer.size());
+    return true;
+}
+
+void* JpegEncoderHelper::getCompressedImagePtr() {
+    return mResultBuffer.data();
+}
+
+size_t JpegEncoderHelper::getCompressedImageSize() {
+    return mResultBuffer.size();
+}
+
+void JpegEncoderHelper::initDestination(j_compress_ptr cinfo) {
+    destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
+    std::vector<JOCTET>& buffer = dest->encoder->mResultBuffer;
+    buffer.resize(kBlockSize);
+    dest->mgr.next_output_byte = &buffer[0];
+    dest->mgr.free_in_buffer = buffer.size();
+}
+
+boolean JpegEncoderHelper::emptyOutputBuffer(j_compress_ptr cinfo) {
+    destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
+    std::vector<JOCTET>& buffer = dest->encoder->mResultBuffer;
+    size_t oldsize = buffer.size();
+    buffer.resize(oldsize + kBlockSize);
+    dest->mgr.next_output_byte = &buffer[oldsize];
+    dest->mgr.free_in_buffer = kBlockSize;
+    return true;
+}
+
+void JpegEncoderHelper::terminateDestination(j_compress_ptr cinfo) {
+    destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
+    std::vector<JOCTET>& buffer = dest->encoder->mResultBuffer;
+    buffer.resize(buffer.size() - dest->mgr.free_in_buffer);
+}
+
+void JpegEncoderHelper::outputErrorMessage(j_common_ptr cinfo) {
+    char buffer[JMSG_LENGTH_MAX];
+
+    /* Create the message */
+    (*cinfo->err->format_message) (cinfo, buffer);
+    ALOGE("%s\n", buffer);
+}
+
+bool JpegEncoderHelper::encode(const void* image, int width, int height, int jpegQuality,
+                         const void* iccBuffer, unsigned int iccSize, bool isSingleChannel) {
+    jpeg_compress_struct cinfo;
+    jpeg_error_mgr jerr;
+
+    cinfo.err = jpeg_std_error(&jerr);
+    // Override output_message() to print error log with ALOGE().
+    cinfo.err->output_message = &outputErrorMessage;
+    jpeg_create_compress(&cinfo);
+    setJpegDestination(&cinfo);
+
+    setJpegCompressStruct(width, height, jpegQuality, &cinfo, isSingleChannel);
+    jpeg_start_compress(&cinfo, TRUE);
+
+    if (iccBuffer != nullptr && iccSize > 0) {
+        jpeg_write_marker(&cinfo, JPEG_APP0 + 2, static_cast<const JOCTET*>(iccBuffer), iccSize);
+    }
+
+    bool status = compress(&cinfo, static_cast<const uint8_t*>(image), isSingleChannel);
+    jpeg_finish_compress(&cinfo);
+    jpeg_destroy_compress(&cinfo);
+
+    return status;
+}
+
+void JpegEncoderHelper::setJpegDestination(jpeg_compress_struct* cinfo) {
+    destination_mgr* dest = static_cast<struct destination_mgr *>((*cinfo->mem->alloc_small) (
+            (j_common_ptr) cinfo, JPOOL_PERMANENT, sizeof(destination_mgr)));
+    dest->encoder = this;
+    dest->mgr.init_destination = &initDestination;
+    dest->mgr.empty_output_buffer = &emptyOutputBuffer;
+    dest->mgr.term_destination = &terminateDestination;
+    cinfo->dest = reinterpret_cast<struct jpeg_destination_mgr*>(dest);
+}
+
+void JpegEncoderHelper::setJpegCompressStruct(int width, int height, int quality,
+                                        jpeg_compress_struct* cinfo, bool isSingleChannel) {
+    cinfo->image_width = width;
+    cinfo->image_height = height;
+    if (isSingleChannel) {
+        cinfo->input_components = 1;
+        cinfo->in_color_space = JCS_GRAYSCALE;
+    } else {
+        cinfo->input_components = 3;
+        cinfo->in_color_space = JCS_YCbCr;
+    }
+    jpeg_set_defaults(cinfo);
+
+    jpeg_set_quality(cinfo, quality, TRUE);
+    jpeg_set_colorspace(cinfo, isSingleChannel ? JCS_GRAYSCALE : JCS_YCbCr);
+    cinfo->raw_data_in = TRUE;
+    cinfo->dct_method = JDCT_IFAST;
+
+    if (!isSingleChannel) {
+        // Configure sampling factors. The sampling factor is JPEG subsampling 420 because the
+        // source format is YUV420.
+        cinfo->comp_info[0].h_samp_factor = 2;
+        cinfo->comp_info[0].v_samp_factor = 2;
+        cinfo->comp_info[1].h_samp_factor = 1;
+        cinfo->comp_info[1].v_samp_factor = 1;
+        cinfo->comp_info[2].h_samp_factor = 1;
+        cinfo->comp_info[2].v_samp_factor = 1;
+    }
+}
+
+bool JpegEncoderHelper::compress(
+        jpeg_compress_struct* cinfo, const uint8_t* image, bool isSingleChannel) {
+    if (isSingleChannel) {
+        return compressSingleChannel(cinfo, image);
+    }
+    return compressYuv(cinfo, image);
+}
+
+bool JpegEncoderHelper::compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yuv) {
+    JSAMPROW y[kCompressBatchSize];
+    JSAMPROW cb[kCompressBatchSize / 2];
+    JSAMPROW cr[kCompressBatchSize / 2];
+    JSAMPARRAY planes[3] {y, cb, cr};
+
+    size_t y_plane_size = cinfo->image_width * cinfo->image_height;
+    size_t uv_plane_size = y_plane_size / 4;
+    uint8_t* y_plane = const_cast<uint8_t*>(yuv);
+    uint8_t* u_plane = const_cast<uint8_t*>(yuv + y_plane_size);
+    uint8_t* v_plane = const_cast<uint8_t*>(yuv + y_plane_size + uv_plane_size);
+    std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
+    memset(empty.get(), 0, cinfo->image_width);
+
+    const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
+    const bool is_width_aligned = (aligned_width == cinfo->image_width);
+    std::unique_ptr<uint8_t[]> buffer_intrm = nullptr;
+    uint8_t* y_plane_intrm = nullptr;
+    uint8_t* u_plane_intrm = nullptr;
+    uint8_t* v_plane_intrm = nullptr;
+    JSAMPROW y_intrm[kCompressBatchSize];
+    JSAMPROW cb_intrm[kCompressBatchSize / 2];
+    JSAMPROW cr_intrm[kCompressBatchSize / 2];
+    JSAMPARRAY planes_intrm[3]{y_intrm, cb_intrm, cr_intrm};
+    if (!is_width_aligned) {
+        size_t mcu_row_size = aligned_width * kCompressBatchSize * 3 / 2;
+        buffer_intrm = std::make_unique<uint8_t[]>(mcu_row_size);
+        y_plane_intrm = buffer_intrm.get();
+        u_plane_intrm = y_plane_intrm + (aligned_width * kCompressBatchSize);
+        v_plane_intrm = u_plane_intrm + (aligned_width * kCompressBatchSize) / 4;
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            y_intrm[i] = y_plane_intrm + i * aligned_width;
+            memset(y_intrm[i] + cinfo->image_width, 0, aligned_width - cinfo->image_width);
+        }
+        for (int i = 0; i < kCompressBatchSize / 2; ++i) {
+            int offset_intrm = i * (aligned_width / 2);
+            cb_intrm[i] = u_plane_intrm + offset_intrm;
+            cr_intrm[i] = v_plane_intrm + offset_intrm;
+            memset(cb_intrm[i] + cinfo->image_width / 2, 0,
+                   (aligned_width - cinfo->image_width) / 2);
+            memset(cr_intrm[i] + cinfo->image_width / 2, 0,
+                   (aligned_width - cinfo->image_width) / 2);
+        }
+    }
+
+    while (cinfo->next_scanline < cinfo->image_height) {
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            size_t scanline = cinfo->next_scanline + i;
+            if (scanline < cinfo->image_height) {
+                y[i] = y_plane + scanline * cinfo->image_width;
+            } else {
+                y[i] = empty.get();
+            }
+            if (!is_width_aligned) {
+                memcpy(y_intrm[i], y[i], cinfo->image_width);
+            }
+        }
+        // cb, cr only have half scanlines
+        for (int i = 0; i < kCompressBatchSize / 2; ++i) {
+            size_t scanline = cinfo->next_scanline / 2 + i;
+            if (scanline < cinfo->image_height / 2) {
+                int offset = scanline * (cinfo->image_width / 2);
+                cb[i] = u_plane + offset;
+                cr[i] = v_plane + offset;
+            } else {
+                cb[i] = cr[i] = empty.get();
+            }
+            if (!is_width_aligned) {
+                memcpy(cb_intrm[i], cb[i], cinfo->image_width / 2);
+                memcpy(cr_intrm[i], cr[i], cinfo->image_width / 2);
+            }
+        }
+        int processed = jpeg_write_raw_data(cinfo, is_width_aligned ? planes : planes_intrm,
+                                            kCompressBatchSize);
+        if (processed != kCompressBatchSize) {
+            ALOGE("Number of processed lines does not equal input lines.");
+            return false;
+        }
+    }
+    return true;
+}
+
+bool JpegEncoderHelper::compressSingleChannel(jpeg_compress_struct* cinfo, const uint8_t* image) {
+    JSAMPROW y[kCompressBatchSize];
+    JSAMPARRAY planes[1] {y};
+
+    uint8_t* y_plane = const_cast<uint8_t*>(image);
+    std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
+    memset(empty.get(), 0, cinfo->image_width);
+
+    const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
+    bool is_width_aligned = (aligned_width == cinfo->image_width);
+    std::unique_ptr<uint8_t[]> buffer_intrm = nullptr;
+    uint8_t* y_plane_intrm = nullptr;
+    uint8_t* u_plane_intrm = nullptr;
+    JSAMPROW y_intrm[kCompressBatchSize];
+    JSAMPARRAY planes_intrm[]{y_intrm};
+    if (!is_width_aligned) {
+        size_t mcu_row_size = aligned_width * kCompressBatchSize;
+        buffer_intrm = std::make_unique<uint8_t[]>(mcu_row_size);
+        y_plane_intrm = buffer_intrm.get();
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            y_intrm[i] = y_plane_intrm + i * aligned_width;
+            memset(y_intrm[i] + cinfo->image_width, 0, aligned_width - cinfo->image_width);
+        }
+    }
+
+    while (cinfo->next_scanline < cinfo->image_height) {
+        for (int i = 0; i < kCompressBatchSize; ++i) {
+            size_t scanline = cinfo->next_scanline + i;
+            if (scanline < cinfo->image_height) {
+                y[i] = y_plane + scanline * cinfo->image_width;
+            } else {
+                y[i] = empty.get();
+            }
+            if (!is_width_aligned) {
+                memcpy(y_intrm[i], y[i], cinfo->image_width);
+            }
+        }
+        int processed = jpeg_write_raw_data(cinfo, is_width_aligned ? planes : planes_intrm,
+                                            kCompressBatchSize);
+        if (processed != kCompressBatchSize / 2) {
+            ALOGE("Number of processed lines does not equal input lines.");
+            return false;
+        }
+    }
+    return true;
+}
+
+} // namespace ultrahdr
diff --git a/libs/ultrahdr/jpegr.cpp b/libs/ultrahdr/jpegr.cpp
new file mode 100644
index 0000000..9c57f34
--- /dev/null
+++ b/libs/ultrahdr/jpegr.cpp
@@ -0,0 +1,1486 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ultrahdr/jpegr.h>
+#include <ultrahdr/jpegencoderhelper.h>
+#include <ultrahdr/jpegdecoderhelper.h>
+#include <ultrahdr/gainmapmath.h>
+#include <ultrahdr/jpegrutils.h>
+#include <ultrahdr/multipictureformat.h>
+#include <ultrahdr/icc.h>
+
+#include <image_io/jpeg/jpeg_marker.h>
+#include <image_io/jpeg/jpeg_info.h>
+#include <image_io/jpeg/jpeg_scanner.h>
+#include <image_io/jpeg/jpeg_info_builder.h>
+#include <image_io/base/data_segment_data_source.h>
+#include <utils/Log.h>
+
+#include <map>
+#include <memory>
+#include <sstream>
+#include <string>
+#include <cmath>
+#include <condition_variable>
+#include <deque>
+#include <mutex>
+#include <thread>
+#include <unistd.h>
+
+using namespace std;
+using namespace photos_editing_formats::image_io;
+
+namespace android::ultrahdr {
+
+#define USE_SRGB_INVOETF_LUT 1
+#define USE_HLG_OETF_LUT 1
+#define USE_PQ_OETF_LUT 1
+#define USE_HLG_INVOETF_LUT 1
+#define USE_PQ_INVOETF_LUT 1
+#define USE_APPLY_GAIN_LUT 1
+
+#define JPEGR_CHECK(x)          \
+  {                             \
+    status_t status = (x);      \
+    if ((status) != NO_ERROR) { \
+      return status;            \
+    }                           \
+  }
+
+// The current JPEGR version that we encode to
+static const char* const kJpegrVersion = "1.0";
+
+// Map is quarter res / sixteenth size
+static const size_t kMapDimensionScaleFactor = 4;
+
+// Gain Map width is (image_width / kMapDimensionScaleFactor). If we were to
+// compress 420 GainMap in jpeg, then we need at least 2 samples. For Grayscale
+// 1 sample is sufficient. We are using 2 here anyways
+static const int kMinWidth = 2 * kMapDimensionScaleFactor;
+static const int kMinHeight = 2 * kMapDimensionScaleFactor;
+
+// JPEG block size.
+// JPEG encoding / decoding will require block based DCT transform 16 x 16 for luma,
+// and 8 x 8 for chroma.
+// Width must be 16 dividable for luma, and 8 dividable for chroma.
+// If this criteria is not facilitated, we will pad zeros based to each line on the
+// required block size.
+static const size_t kJpegBlock = JpegEncoderHelper::kCompressBatchSize;
+// JPEG compress quality (0 ~ 100) for gain map
+static const int kMapCompressQuality = 85;
+
+#define CONFIG_MULTITHREAD 1
+int GetCPUCoreCount() {
+  int cpuCoreCount = 1;
+#if CONFIG_MULTITHREAD
+#if defined(_SC_NPROCESSORS_ONLN)
+  cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+  // _SC_NPROC_ONLN must be defined...
+  cpuCoreCount = sysconf(_SC_NPROC_ONLN);
+#endif
+#endif
+  return cpuCoreCount;
+}
+
+status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
+                                       jr_uncompressed_ptr uncompressed_yuv_420_image,
+                                       ultrahdr_transfer_function hdr_tf,
+                                       jr_compressed_ptr dest) {
+  if (uncompressed_p010_image == nullptr || uncompressed_p010_image->data == nullptr) {
+    ALOGE("received nullptr for uncompressed p010 image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (uncompressed_p010_image->width % 2 != 0
+          || uncompressed_p010_image->height % 2 != 0) {
+    ALOGE("Image dimensions cannot be odd, image dimensions %dx%d",
+          uncompressed_p010_image->width, uncompressed_p010_image->height);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (uncompressed_p010_image->width < kMinWidth
+          || uncompressed_p010_image->height < kMinHeight) {
+    ALOGE("Image dimensions cannot be less than %dx%d, image dimensions %dx%d",
+          kMinWidth, kMinHeight, uncompressed_p010_image->width, uncompressed_p010_image->height);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (uncompressed_p010_image->width > kMaxWidth
+          || uncompressed_p010_image->height > kMaxHeight) {
+    ALOGE("Image dimensions cannot be larger than %dx%d, image dimensions %dx%d",
+          kMaxWidth, kMaxHeight, uncompressed_p010_image->width, uncompressed_p010_image->height);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (uncompressed_p010_image->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED
+          || uncompressed_p010_image->colorGamut > ULTRAHDR_COLORGAMUT_MAX) {
+    ALOGE("Unrecognized p010 color gamut %d", uncompressed_p010_image->colorGamut);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (uncompressed_p010_image->luma_stride != 0
+          && uncompressed_p010_image->luma_stride < uncompressed_p010_image->width) {
+    ALOGE("Luma stride can not be smaller than width, stride=%d, width=%d",
+                uncompressed_p010_image->luma_stride, uncompressed_p010_image->width);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (uncompressed_p010_image->chroma_data != nullptr
+          && uncompressed_p010_image->chroma_stride < uncompressed_p010_image->width) {
+    ALOGE("Chroma stride can not be smaller than width, stride=%d, width=%d",
+          uncompressed_p010_image->chroma_stride,
+          uncompressed_p010_image->width);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (dest == nullptr || dest->data == nullptr) {
+    ALOGE("received nullptr for destination");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (hdr_tf <= ULTRAHDR_TF_UNSPECIFIED || hdr_tf > ULTRAHDR_TF_MAX
+          || hdr_tf == ULTRAHDR_TF_SRGB) {
+    ALOGE("Invalid hdr transfer function %d", hdr_tf);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (uncompressed_yuv_420_image == nullptr) {
+    return NO_ERROR;
+  }
+
+  if (uncompressed_yuv_420_image->data == nullptr) {
+    ALOGE("received nullptr for uncompressed 420 image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (uncompressed_yuv_420_image->luma_stride != 0) {
+    ALOGE("Stride is not supported for YUV420 image");
+    return ERROR_JPEGR_UNSUPPORTED_FEATURE;
+  }
+
+  if (uncompressed_yuv_420_image->chroma_data != nullptr) {
+    ALOGE("Pointer to chroma plane is not supported for YUV420 image, chroma data must"
+          "be immediately after the luma data.");
+    return ERROR_JPEGR_UNSUPPORTED_FEATURE;
+  }
+
+  if (uncompressed_p010_image->width != uncompressed_yuv_420_image->width
+      || uncompressed_p010_image->height != uncompressed_yuv_420_image->height) {
+    ALOGE("Image resolutions mismatch: P010: %dx%d, YUV420: %dx%d",
+              uncompressed_p010_image->width,
+              uncompressed_p010_image->height,
+              uncompressed_yuv_420_image->width,
+              uncompressed_yuv_420_image->height);
+    return ERROR_JPEGR_RESOLUTION_MISMATCH;
+  }
+
+  if (uncompressed_yuv_420_image->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED
+          || uncompressed_yuv_420_image->colorGamut > ULTRAHDR_COLORGAMUT_MAX) {
+    ALOGE("Unrecognized 420 color gamut %d", uncompressed_yuv_420_image->colorGamut);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  return NO_ERROR;
+}
+
+status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
+                                       jr_uncompressed_ptr uncompressed_yuv_420_image,
+                                       ultrahdr_transfer_function hdr_tf,
+                                       jr_compressed_ptr dest,
+                                       int quality) {
+  if (status_t ret = areInputArgumentsValid(
+          uncompressed_p010_image, uncompressed_yuv_420_image, hdr_tf, dest) != NO_ERROR) {
+    return ret;
+  }
+
+  if (quality < 0 || quality > 100) {
+    ALOGE("quality factor is out side range [0-100], quality factor : %d", quality);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  return NO_ERROR;
+}
+
+/* Encode API-0 */
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                            ultrahdr_transfer_function hdr_tf,
+                            jr_compressed_ptr dest,
+                            int quality,
+                            jr_exif_ptr exif) {
+  if (status_t ret = areInputArgumentsValid(
+          uncompressed_p010_image, /* uncompressed_yuv_420_image */ nullptr,
+          hdr_tf, dest, quality) != NO_ERROR) {
+    return ret;
+  }
+
+  if (exif != nullptr && exif->data == nullptr) {
+    ALOGE("received nullptr for exif metadata");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  ultrahdr_metadata_struct metadata;
+  metadata.version = kJpegrVersion;
+
+  jpegr_uncompressed_struct uncompressed_yuv_420_image;
+  unique_ptr<uint8_t[]> uncompressed_yuv_420_image_data = make_unique<uint8_t[]>(
+      uncompressed_p010_image->width * uncompressed_p010_image->height * 3 / 2);
+  uncompressed_yuv_420_image.data = uncompressed_yuv_420_image_data.get();
+  JPEGR_CHECK(toneMap(uncompressed_p010_image, &uncompressed_yuv_420_image));
+
+  jpegr_uncompressed_struct map;
+  JPEGR_CHECK(generateGainMap(
+      &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
+  std::unique_ptr<uint8_t[]> map_data;
+  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
+
+  JpegEncoderHelper jpeg_encoder_gainmap;
+  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
+  jpegr_compressed_struct compressed_map;
+  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
+  compressed_map.length = compressed_map.maxLength;
+  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
+  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+
+  sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+                                                  uncompressed_yuv_420_image.colorGamut);
+
+  // Convert to Bt601 YUV encoding for JPEG encode
+  JPEGR_CHECK(convertYuv(&uncompressed_yuv_420_image, uncompressed_yuv_420_image.colorGamut,
+                         ULTRAHDR_COLORGAMUT_P3));
+
+  JpegEncoderHelper jpeg_encoder;
+  if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image.data,
+                                  uncompressed_yuv_420_image.width,
+                                  uncompressed_yuv_420_image.height, quality,
+                                  icc->getData(), icc->getLength())) {
+    return ERROR_JPEGR_ENCODE_ERROR;
+  }
+  jpegr_compressed_struct jpeg;
+  jpeg.data = jpeg_encoder.getCompressedImagePtr();
+  jpeg.length = jpeg_encoder.getCompressedImageSize();
+
+  // No ICC since JPEG encode already did it
+  JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
+                            &metadata, dest));
+
+  return NO_ERROR;
+}
+
+/* Encode API-1 */
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                            jr_uncompressed_ptr uncompressed_yuv_420_image,
+                            ultrahdr_transfer_function hdr_tf,
+                            jr_compressed_ptr dest,
+                            int quality,
+                            jr_exif_ptr exif) {
+  if (uncompressed_yuv_420_image == nullptr) {
+    ALOGE("received nullptr for uncompressed 420 image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (exif != nullptr && exif->data == nullptr) {
+    ALOGE("received nullptr for exif metadata");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (status_t ret = areInputArgumentsValid(
+          uncompressed_p010_image, uncompressed_yuv_420_image, hdr_tf,
+          dest, quality) != NO_ERROR) {
+    return ret;
+  }
+
+  ultrahdr_metadata_struct metadata;
+  metadata.version = kJpegrVersion;
+
+  jpegr_uncompressed_struct map;
+  JPEGR_CHECK(generateGainMap(
+      uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
+  std::unique_ptr<uint8_t[]> map_data;
+  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
+
+  JpegEncoderHelper jpeg_encoder_gainmap;
+  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
+  jpegr_compressed_struct compressed_map;
+  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
+  compressed_map.length = compressed_map.maxLength;
+  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
+  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+
+  sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+                                                  uncompressed_yuv_420_image->colorGamut);
+
+  // Convert to Bt601 YUV encoding for JPEG encode; make a copy so as to no clobber client data
+  unique_ptr<uint8_t[]> yuv_420_bt601_data = make_unique<uint8_t[]>(
+      uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
+  memcpy(yuv_420_bt601_data.get(), uncompressed_yuv_420_image->data,
+         uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
+
+  jpegr_uncompressed_struct yuv_420_bt601_image = {
+    yuv_420_bt601_data.get(), uncompressed_yuv_420_image->width, uncompressed_yuv_420_image->height,
+    uncompressed_yuv_420_image->colorGamut };
+  JPEGR_CHECK(convertYuv(&yuv_420_bt601_image, yuv_420_bt601_image.colorGamut,
+                         ULTRAHDR_COLORGAMUT_P3));
+
+  JpegEncoderHelper jpeg_encoder;
+  if (!jpeg_encoder.compressImage(yuv_420_bt601_image.data,
+                                  yuv_420_bt601_image.width,
+                                  yuv_420_bt601_image.height, quality,
+                                  icc->getData(), icc->getLength())) {
+    return ERROR_JPEGR_ENCODE_ERROR;
+  }
+  jpegr_compressed_struct jpeg;
+  jpeg.data = jpeg_encoder.getCompressedImagePtr();
+  jpeg.length = jpeg_encoder.getCompressedImageSize();
+
+  // No ICC since jpeg encode already did it
+  JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
+                            &metadata, dest));
+
+  return NO_ERROR;
+}
+
+/* Encode API-2 */
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                            jr_uncompressed_ptr uncompressed_yuv_420_image,
+                            jr_compressed_ptr compressed_jpeg_image,
+                            ultrahdr_transfer_function hdr_tf,
+                            jr_compressed_ptr dest) {
+  if (uncompressed_yuv_420_image == nullptr) {
+    ALOGE("received nullptr for uncompressed 420 image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (compressed_jpeg_image == nullptr || compressed_jpeg_image->data == nullptr) {
+    ALOGE("received nullptr for compressed jpeg image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (status_t ret = areInputArgumentsValid(
+          uncompressed_p010_image, uncompressed_yuv_420_image, hdr_tf, dest) != NO_ERROR) {
+    return ret;
+  }
+
+  ultrahdr_metadata_struct metadata;
+  metadata.version = kJpegrVersion;
+
+  jpegr_uncompressed_struct map;
+  JPEGR_CHECK(generateGainMap(
+      uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
+  std::unique_ptr<uint8_t[]> map_data;
+  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
+
+  JpegEncoderHelper jpeg_encoder_gainmap;
+  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
+  jpegr_compressed_struct compressed_map;
+  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
+  compressed_map.length = compressed_map.maxLength;
+  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
+  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+
+  // We just want to check if ICC is present, so don't do a full decode. Note,
+  // this doesn't verify that the ICC is valid.
+  JpegDecoderHelper decoder;
+  std::vector<uint8_t> icc;
+  decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+                                       /* pWidth */ nullptr, /* pHeight */ nullptr,
+                                       &icc, /* exifData */ nullptr);
+
+  // Add ICC if not already present.
+  if (icc.size() > 0) {
+      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+                                /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
+  } else {
+      sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+                                                         uncompressed_yuv_420_image->colorGamut);
+      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+                                newIcc->getData(), newIcc->getLength(), &metadata, dest));
+  }
+
+  return NO_ERROR;
+}
+
+/* Encode API-3 */
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+                            jr_compressed_ptr compressed_jpeg_image,
+                            ultrahdr_transfer_function hdr_tf,
+                            jr_compressed_ptr dest) {
+  if (compressed_jpeg_image == nullptr || compressed_jpeg_image->data == nullptr) {
+    ALOGE("received nullptr for compressed jpeg image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (status_t ret = areInputArgumentsValid(
+          uncompressed_p010_image, /* uncompressed_yuv_420_image */ nullptr,
+          hdr_tf, dest) != NO_ERROR) {
+    return ret;
+  }
+
+  // Note: output is Bt.601 YUV encoded regardless of gamut, due to jpeg decode.
+  JpegDecoderHelper jpeg_decoder;
+  if (!jpeg_decoder.decompressImage(compressed_jpeg_image->data, compressed_jpeg_image->length)) {
+    return ERROR_JPEGR_DECODE_ERROR;
+  }
+  jpegr_uncompressed_struct uncompressed_yuv_420_image;
+  uncompressed_yuv_420_image.data = jpeg_decoder.getDecompressedImagePtr();
+  uncompressed_yuv_420_image.width = jpeg_decoder.getDecompressedImageWidth();
+  uncompressed_yuv_420_image.height = jpeg_decoder.getDecompressedImageHeight();
+  uncompressed_yuv_420_image.colorGamut = compressed_jpeg_image->colorGamut;
+
+  if (uncompressed_p010_image->width != uncompressed_yuv_420_image.width
+   || uncompressed_p010_image->height != uncompressed_yuv_420_image.height) {
+    return ERROR_JPEGR_RESOLUTION_MISMATCH;
+  }
+
+  ultrahdr_metadata_struct metadata;
+  metadata.version = kJpegrVersion;
+
+  jpegr_uncompressed_struct map;
+  // Indicate that the SDR image is Bt.601 YUV encoded.
+  JPEGR_CHECK(generateGainMap(
+      &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map,
+      true /* sdr_is_601 */ ));
+  std::unique_ptr<uint8_t[]> map_data;
+  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
+
+  JpegEncoderHelper jpeg_encoder_gainmap;
+  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
+  jpegr_compressed_struct compressed_map;
+  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
+  compressed_map.length = compressed_map.maxLength;
+  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
+  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+
+  // We just want to check if ICC is present, so don't do a full decode. Note,
+  // this doesn't verify that the ICC is valid.
+  JpegDecoderHelper decoder;
+  std::vector<uint8_t> icc;
+  decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+                                       /* pWidth */ nullptr, /* pHeight */ nullptr,
+                                       &icc, /* exifData */ nullptr);
+
+  // Add ICC if not already present.
+  if (icc.size() > 0) {
+      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+                                /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
+  } else {
+      sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+                                                         uncompressed_yuv_420_image.colorGamut);
+      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
+                                newIcc->getData(), newIcc->getLength(), &metadata, dest));
+  }
+
+  return NO_ERROR;
+}
+
+/* Encode API-4 */
+status_t JpegR::encodeJPEGR(jr_compressed_ptr compressed_jpeg_image,
+                            jr_compressed_ptr compressed_gainmap,
+                            ultrahdr_metadata_ptr metadata,
+                            jr_compressed_ptr dest) {
+  if (compressed_jpeg_image == nullptr || compressed_jpeg_image->data == nullptr) {
+    ALOGE("received nullptr for compressed jpeg image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (compressed_gainmap == nullptr || compressed_gainmap->data == nullptr) {
+    ALOGE("received nullptr for compressed gain map");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (dest == nullptr || dest->data == nullptr) {
+    ALOGE("received nullptr for destination");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  // We just want to check if ICC is present, so don't do a full decode. Note,
+  // this doesn't verify that the ICC is valid.
+  JpegDecoderHelper decoder;
+  std::vector<uint8_t> icc;
+  decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
+                                       /* pWidth */ nullptr, /* pHeight */ nullptr,
+                                       &icc, /* exifData */ nullptr);
+
+  // Add ICC if not already present.
+  if (icc.size() > 0) {
+      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
+                                /* icc */ nullptr, /* icc size */ 0, metadata, dest));
+  } else {
+      sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+                                                         compressed_jpeg_image->colorGamut);
+      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
+                                newIcc->getData(), newIcc->getLength(), metadata, dest));
+  }
+
+  return NO_ERROR;
+}
+
+status_t JpegR::getJPEGRInfo(jr_compressed_ptr compressed_jpegr_image, jr_info_ptr jpegr_info) {
+  if (compressed_jpegr_image == nullptr || compressed_jpegr_image->data == nullptr) {
+    ALOGE("received nullptr for compressed jpegr image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (jpegr_info == nullptr) {
+    ALOGE("received nullptr for compressed jpegr info struct");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  jpegr_compressed_struct primary_image, gain_map;
+  JPEGR_CHECK(extractPrimaryImageAndGainMap(compressed_jpegr_image,
+                                            &primary_image, &gain_map));
+
+  JpegDecoderHelper jpeg_decoder;
+  if (!jpeg_decoder.getCompressedImageParameters(primary_image.data, primary_image.length,
+                                                 &jpegr_info->width, &jpegr_info->height,
+                                                 jpegr_info->iccData, jpegr_info->exifData)) {
+    return ERROR_JPEGR_DECODE_ERROR;
+  }
+
+  return NO_ERROR;
+}
+
+/* Decode API */
+status_t JpegR::decodeJPEGR(jr_compressed_ptr compressed_jpegr_image,
+                            jr_uncompressed_ptr dest,
+                            float max_display_boost,
+                            jr_exif_ptr exif,
+                            ultrahdr_output_format output_format,
+                            jr_uncompressed_ptr gain_map,
+                            ultrahdr_metadata_ptr metadata) {
+  if (compressed_jpegr_image == nullptr || compressed_jpegr_image->data == nullptr) {
+    ALOGE("received nullptr for compressed jpegr image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (dest == nullptr || dest->data == nullptr) {
+    ALOGE("received nullptr for dest image");
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (max_display_boost < 1.0f) {
+    ALOGE("received bad value for max_display_boost %f", max_display_boost);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (exif != nullptr && exif->data == nullptr) {
+    ALOGE("received nullptr address for exif data");
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (output_format <= ULTRAHDR_OUTPUT_UNSPECIFIED || output_format > ULTRAHDR_OUTPUT_MAX) {
+    ALOGE("received bad value for output format %d", output_format);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (output_format == ULTRAHDR_OUTPUT_SDR) {
+    JpegDecoderHelper jpeg_decoder;
+    if (!jpeg_decoder.decompressImage(compressed_jpegr_image->data, compressed_jpegr_image->length,
+                                      true)) {
+        return ERROR_JPEGR_DECODE_ERROR;
+    }
+    jpegr_uncompressed_struct uncompressed_rgba_image;
+    uncompressed_rgba_image.data = jpeg_decoder.getDecompressedImagePtr();
+    uncompressed_rgba_image.width = jpeg_decoder.getDecompressedImageWidth();
+    uncompressed_rgba_image.height = jpeg_decoder.getDecompressedImageHeight();
+    memcpy(dest->data, uncompressed_rgba_image.data,
+           uncompressed_rgba_image.width * uncompressed_rgba_image.height * 4);
+    dest->width = uncompressed_rgba_image.width;
+    dest->height = uncompressed_rgba_image.height;
+
+    if (gain_map == nullptr && exif == nullptr) {
+      return NO_ERROR;
+    }
+
+    if (exif != nullptr) {
+      if (exif->data == nullptr) {
+        return ERROR_JPEGR_INVALID_NULL_PTR;
+      }
+      if (exif->length < jpeg_decoder.getEXIFSize()) {
+        return ERROR_JPEGR_BUFFER_TOO_SMALL;
+      }
+      memcpy(exif->data, jpeg_decoder.getEXIFPtr(), jpeg_decoder.getEXIFSize());
+      exif->length = jpeg_decoder.getEXIFSize();
+    }
+    if (gain_map == nullptr) {
+      return NO_ERROR;
+    }
+  }
+
+  jpegr_compressed_struct compressed_map;
+  JPEGR_CHECK(extractGainMap(compressed_jpegr_image, &compressed_map));
+
+  JpegDecoderHelper gain_map_decoder;
+  if (!gain_map_decoder.decompressImage(compressed_map.data, compressed_map.length)) {
+    return ERROR_JPEGR_DECODE_ERROR;
+  }
+  if ((gain_map_decoder.getDecompressedImageWidth() *
+       gain_map_decoder.getDecompressedImageHeight()) >
+      gain_map_decoder.getDecompressedImageSize()) {
+    return ERROR_JPEGR_CALCULATION_ERROR;
+  }
+
+  if (gain_map != nullptr) {
+    gain_map->width = gain_map_decoder.getDecompressedImageWidth();
+    gain_map->height = gain_map_decoder.getDecompressedImageHeight();
+    int size = gain_map->width * gain_map->height;
+    gain_map->data = malloc(size);
+    memcpy(gain_map->data, gain_map_decoder.getDecompressedImagePtr(), size);
+  }
+
+  ultrahdr_metadata_struct uhdr_metadata;
+  if (!getMetadataFromXMP(static_cast<uint8_t*>(gain_map_decoder.getXMPPtr()),
+                          gain_map_decoder.getXMPSize(), &uhdr_metadata)) {
+    return ERROR_JPEGR_INVALID_METADATA;
+  }
+
+  if (metadata != nullptr) {
+      metadata->version = uhdr_metadata.version;
+      metadata->minContentBoost = uhdr_metadata.minContentBoost;
+      metadata->maxContentBoost = uhdr_metadata.maxContentBoost;
+      metadata->gamma = uhdr_metadata.gamma;
+      metadata->offsetSdr = uhdr_metadata.offsetSdr;
+      metadata->offsetHdr = uhdr_metadata.offsetHdr;
+      metadata->hdrCapacityMin = uhdr_metadata.hdrCapacityMin;
+      metadata->hdrCapacityMax = uhdr_metadata.hdrCapacityMax;
+  }
+
+  if (output_format == ULTRAHDR_OUTPUT_SDR) {
+    return NO_ERROR;
+  }
+
+  JpegDecoderHelper jpeg_decoder;
+  if (!jpeg_decoder.decompressImage(compressed_jpegr_image->data, compressed_jpegr_image->length)) {
+    return ERROR_JPEGR_DECODE_ERROR;
+  }
+  if ((jpeg_decoder.getDecompressedImageWidth() *
+       jpeg_decoder.getDecompressedImageHeight() * 3 / 2) >
+      jpeg_decoder.getDecompressedImageSize()) {
+    return ERROR_JPEGR_CALCULATION_ERROR;
+  }
+
+  if (exif != nullptr) {
+    if (exif->data == nullptr) {
+      return ERROR_JPEGR_INVALID_NULL_PTR;
+    }
+    if (exif->length < jpeg_decoder.getEXIFSize()) {
+      return ERROR_JPEGR_BUFFER_TOO_SMALL;
+    }
+    memcpy(exif->data, jpeg_decoder.getEXIFPtr(), jpeg_decoder.getEXIFSize());
+    exif->length = jpeg_decoder.getEXIFSize();
+  }
+
+  jpegr_uncompressed_struct map;
+  map.data = gain_map_decoder.getDecompressedImagePtr();
+  map.width = gain_map_decoder.getDecompressedImageWidth();
+  map.height = gain_map_decoder.getDecompressedImageHeight();
+
+  jpegr_uncompressed_struct uncompressed_yuv_420_image;
+  uncompressed_yuv_420_image.data = jpeg_decoder.getDecompressedImagePtr();
+  uncompressed_yuv_420_image.width = jpeg_decoder.getDecompressedImageWidth();
+  uncompressed_yuv_420_image.height = jpeg_decoder.getDecompressedImageHeight();
+  uncompressed_yuv_420_image.colorGamut = IccHelper::readIccColorGamut(
+      jpeg_decoder.getICCPtr(), jpeg_decoder.getICCSize());
+
+  JPEGR_CHECK(applyGainMap(&uncompressed_yuv_420_image, &map, &uhdr_metadata, output_format,
+                           max_display_boost, dest));
+  return NO_ERROR;
+}
+
+status_t JpegR::compressGainMap(jr_uncompressed_ptr uncompressed_gain_map,
+                                JpegEncoderHelper* jpeg_encoder) {
+  if (uncompressed_gain_map == nullptr || jpeg_encoder == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  // Don't need to convert YUV to Bt601 since single channel
+  if (!jpeg_encoder->compressImage(uncompressed_gain_map->data,
+                                   uncompressed_gain_map->width,
+                                   uncompressed_gain_map->height,
+                                   kMapCompressQuality,
+                                   nullptr,
+                                   0,
+                                   true /* isSingleChannel */)) {
+    return ERROR_JPEGR_ENCODE_ERROR;
+  }
+
+  return NO_ERROR;
+}
+
+const int kJobSzInRows = 16;
+static_assert(kJobSzInRows > 0 && kJobSzInRows % kMapDimensionScaleFactor == 0,
+              "align job size to kMapDimensionScaleFactor");
+
+class JobQueue {
+ public:
+  bool dequeueJob(size_t& rowStart, size_t& rowEnd);
+  void enqueueJob(size_t rowStart, size_t rowEnd);
+  void markQueueForEnd();
+  void reset();
+
+ private:
+  bool mQueuedAllJobs = false;
+  std::deque<std::tuple<size_t, size_t>> mJobs;
+  std::mutex mMutex;
+  std::condition_variable mCv;
+};
+
+bool JobQueue::dequeueJob(size_t& rowStart, size_t& rowEnd) {
+  std::unique_lock<std::mutex> lock{mMutex};
+  while (true) {
+    if (mJobs.empty()) {
+      if (mQueuedAllJobs) {
+        return false;
+      } else {
+        mCv.wait_for(lock, std::chrono::milliseconds(100));
+      }
+    } else {
+      auto it = mJobs.begin();
+      rowStart = std::get<0>(*it);
+      rowEnd = std::get<1>(*it);
+      mJobs.erase(it);
+      return true;
+    }
+  }
+  return false;
+}
+
+void JobQueue::enqueueJob(size_t rowStart, size_t rowEnd) {
+  std::unique_lock<std::mutex> lock{mMutex};
+  mJobs.push_back(std::make_tuple(rowStart, rowEnd));
+  lock.unlock();
+  mCv.notify_one();
+}
+
+void JobQueue::markQueueForEnd() {
+  std::unique_lock<std::mutex> lock{mMutex};
+  mQueuedAllJobs = true;
+  lock.unlock();
+  mCv.notify_all();
+}
+
+void JobQueue::reset() {
+  std::unique_lock<std::mutex> lock{mMutex};
+  mJobs.clear();
+  mQueuedAllJobs = false;
+}
+
+status_t JpegR::generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
+                                jr_uncompressed_ptr uncompressed_p010_image,
+                                ultrahdr_transfer_function hdr_tf,
+                                ultrahdr_metadata_ptr metadata,
+                                jr_uncompressed_ptr dest,
+                                bool sdr_is_601) {
+  if (uncompressed_yuv_420_image == nullptr
+   || uncompressed_p010_image == nullptr
+   || metadata == nullptr
+   || dest == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (uncompressed_yuv_420_image->width != uncompressed_p010_image->width
+   || uncompressed_yuv_420_image->height != uncompressed_p010_image->height) {
+    return ERROR_JPEGR_RESOLUTION_MISMATCH;
+  }
+
+  if (uncompressed_yuv_420_image->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED
+   || uncompressed_p010_image->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
+    return ERROR_JPEGR_INVALID_COLORGAMUT;
+  }
+
+  size_t image_width = uncompressed_yuv_420_image->width;
+  size_t image_height = uncompressed_yuv_420_image->height;
+  size_t map_width = image_width / kMapDimensionScaleFactor;
+  size_t map_height = image_height / kMapDimensionScaleFactor;
+  size_t map_stride = static_cast<size_t>(
+          floor((map_width + kJpegBlock - 1) / kJpegBlock)) * kJpegBlock;
+  size_t map_height_aligned = ((map_height + 1) >> 1) << 1;
+
+  dest->width = map_stride;
+  dest->height = map_height_aligned;
+  dest->colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  dest->data = new uint8_t[map_stride * map_height_aligned];
+  std::unique_ptr<uint8_t[]> map_data;
+  map_data.reset(reinterpret_cast<uint8_t*>(dest->data));
+
+  ColorTransformFn hdrInvOetf = nullptr;
+  float hdr_white_nits = kSdrWhiteNits;
+  switch (hdr_tf) {
+    case ULTRAHDR_TF_LINEAR:
+      hdrInvOetf = identityConversion;
+      break;
+    case ULTRAHDR_TF_HLG:
+#if USE_HLG_INVOETF_LUT
+      hdrInvOetf = hlgInvOetfLUT;
+#else
+      hdrInvOetf = hlgInvOetf;
+#endif
+      hdr_white_nits = kHlgMaxNits;
+      break;
+    case ULTRAHDR_TF_PQ:
+#if USE_PQ_INVOETF_LUT
+      hdrInvOetf = pqInvOetfLUT;
+#else
+      hdrInvOetf = pqInvOetf;
+#endif
+      hdr_white_nits = kPqMaxNits;
+      break;
+    default:
+      // Should be impossible to hit after input validation.
+      return ERROR_JPEGR_INVALID_TRANS_FUNC;
+  }
+
+  metadata->maxContentBoost = hdr_white_nits / kSdrWhiteNits;
+  metadata->minContentBoost = 1.0f;
+  metadata->gamma = 1.0f;
+  metadata->offsetSdr = 0.0f;
+  metadata->offsetHdr = 0.0f;
+  metadata->hdrCapacityMin = 1.0f;
+  metadata->hdrCapacityMax = metadata->maxContentBoost;
+
+  float log2MinBoost = log2(metadata->minContentBoost);
+  float log2MaxBoost = log2(metadata->maxContentBoost);
+
+  ColorTransformFn hdrGamutConversionFn = getHdrConversionFn(
+      uncompressed_yuv_420_image->colorGamut, uncompressed_p010_image->colorGamut);
+
+  ColorCalculationFn luminanceFn = nullptr;
+  ColorTransformFn sdrYuvToRgbFn = nullptr;
+  switch (uncompressed_yuv_420_image->colorGamut) {
+    case ULTRAHDR_COLORGAMUT_BT709:
+      luminanceFn = srgbLuminance;
+      sdrYuvToRgbFn = srgbYuvToRgb;
+      break;
+    case ULTRAHDR_COLORGAMUT_P3:
+      luminanceFn = p3Luminance;
+      sdrYuvToRgbFn = p3YuvToRgb;
+      break;
+    case ULTRAHDR_COLORGAMUT_BT2100:
+      luminanceFn = bt2100Luminance;
+      sdrYuvToRgbFn = bt2100YuvToRgb;
+      break;
+    case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+      // Should be impossible to hit after input validation.
+      return ERROR_JPEGR_INVALID_COLORGAMUT;
+  }
+  if (sdr_is_601) {
+    sdrYuvToRgbFn = p3YuvToRgb;
+  }
+
+  ColorTransformFn hdrYuvToRgbFn = nullptr;
+  switch (uncompressed_p010_image->colorGamut) {
+    case ULTRAHDR_COLORGAMUT_BT709:
+      hdrYuvToRgbFn = srgbYuvToRgb;
+      break;
+    case ULTRAHDR_COLORGAMUT_P3:
+      hdrYuvToRgbFn = p3YuvToRgb;
+      break;
+    case ULTRAHDR_COLORGAMUT_BT2100:
+      hdrYuvToRgbFn = bt2100YuvToRgb;
+      break;
+    case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
+      // Should be impossible to hit after input validation.
+      return ERROR_JPEGR_INVALID_COLORGAMUT;
+  }
+
+  std::mutex mutex;
+  const int threads = std::clamp(GetCPUCoreCount(), 1, 4);
+  size_t rowStep = threads == 1 ? image_height : kJobSzInRows;
+  JobQueue jobQueue;
+
+  std::function<void()> generateMap = [uncompressed_yuv_420_image, uncompressed_p010_image,
+                                       metadata, dest, hdrInvOetf, hdrGamutConversionFn,
+                                       luminanceFn, sdrYuvToRgbFn, hdrYuvToRgbFn, hdr_white_nits,
+                                       log2MinBoost, log2MaxBoost, &jobQueue]() -> void {
+    size_t rowStart, rowEnd;
+    size_t dest_map_width = uncompressed_yuv_420_image->width / kMapDimensionScaleFactor;
+    size_t dest_map_stride = dest->width;
+    while (jobQueue.dequeueJob(rowStart, rowEnd)) {
+      for (size_t y = rowStart; y < rowEnd; ++y) {
+        for (size_t x = 0; x < dest_map_width; ++x) {
+          Color sdr_yuv_gamma =
+              sampleYuv420(uncompressed_yuv_420_image, kMapDimensionScaleFactor, x, y);
+          Color sdr_rgb_gamma = sdrYuvToRgbFn(sdr_yuv_gamma);
+          // We are assuming the SDR input is always sRGB transfer.
+#if USE_SRGB_INVOETF_LUT
+          Color sdr_rgb = srgbInvOetfLUT(sdr_rgb_gamma);
+#else
+          Color sdr_rgb = srgbInvOetf(sdr_rgb_gamma);
+#endif
+          float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits;
+
+          Color hdr_yuv_gamma = sampleP010(uncompressed_p010_image, kMapDimensionScaleFactor, x, y);
+          Color hdr_rgb_gamma = hdrYuvToRgbFn(hdr_yuv_gamma);
+          Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma);
+          hdr_rgb = hdrGamutConversionFn(hdr_rgb);
+          float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits;
+
+          size_t pixel_idx = x + y * dest_map_stride;
+          reinterpret_cast<uint8_t*>(dest->data)[pixel_idx] =
+              encodeGain(sdr_y_nits, hdr_y_nits, metadata, log2MinBoost, log2MaxBoost);
+        }
+      }
+    }
+  };
+
+  // generate map
+  std::vector<std::thread> workers;
+  for (int th = 0; th < threads - 1; th++) {
+    workers.push_back(std::thread(generateMap));
+  }
+
+  rowStep = (threads == 1 ? image_height : kJobSzInRows) / kMapDimensionScaleFactor;
+  for (size_t rowStart = 0; rowStart < map_height;) {
+    size_t rowEnd = std::min(rowStart + rowStep, map_height);
+    jobQueue.enqueueJob(rowStart, rowEnd);
+    rowStart = rowEnd;
+  }
+  jobQueue.markQueueForEnd();
+  generateMap();
+  std::for_each(workers.begin(), workers.end(), [](std::thread& t) { t.join(); });
+
+  map_data.release();
+  return NO_ERROR;
+}
+
+status_t JpegR::applyGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
+                             jr_uncompressed_ptr uncompressed_gain_map,
+                             ultrahdr_metadata_ptr metadata,
+                             ultrahdr_output_format output_format,
+                             float max_display_boost,
+                             jr_uncompressed_ptr dest) {
+  if (uncompressed_yuv_420_image == nullptr
+   || uncompressed_gain_map == nullptr
+   || metadata == nullptr
+   || dest == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (metadata->version.compare("1.0")) {
+      ALOGE("Unsupported metadata version: %s", metadata->version.c_str());
+      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+  }
+  if (metadata->gamma != 1.0f) {
+      ALOGE("Unsupported metadata gamma: %f", metadata->gamma);
+      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+  }
+  if (metadata->offsetSdr != 0.0f || metadata->offsetHdr != 0.0f) {
+      ALOGE("Unsupported metadata offset sdr, hdr: %f, %f", metadata->offsetSdr,
+            metadata->offsetHdr);
+      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+  }
+  if (metadata->hdrCapacityMin != metadata->minContentBoost
+   || metadata->hdrCapacityMax != metadata->maxContentBoost) {
+      ALOGE("Unsupported metadata hdr capacity min, max: %f, %f", metadata->hdrCapacityMin,
+            metadata->hdrCapacityMax);
+      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+  }
+
+  // TODO: remove once map scaling factor is computed based on actual map dims
+  size_t image_width = uncompressed_yuv_420_image->width;
+  size_t image_height = uncompressed_yuv_420_image->height;
+  size_t map_width = image_width / kMapDimensionScaleFactor;
+  size_t map_height = image_height / kMapDimensionScaleFactor;
+  map_width = static_cast<size_t>(
+          floor((map_width + kJpegBlock - 1) / kJpegBlock)) * kJpegBlock;
+  map_height = ((map_height + 1) >> 1) << 1;
+  if (map_width != uncompressed_gain_map->width
+   || map_height != uncompressed_gain_map->height) {
+    ALOGE("gain map dimensions and primary image dimensions are not to scale");
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  dest->width = uncompressed_yuv_420_image->width;
+  dest->height = uncompressed_yuv_420_image->height;
+  ShepardsIDW idwTable(kMapDimensionScaleFactor);
+  float display_boost = std::min(max_display_boost, metadata->maxContentBoost);
+  GainLUT gainLUT(metadata, display_boost);
+
+  JobQueue jobQueue;
+  std::function<void()> applyRecMap = [uncompressed_yuv_420_image, uncompressed_gain_map,
+                                       metadata, dest, &jobQueue, &idwTable, output_format,
+                                       &gainLUT, display_boost]() -> void {
+    size_t width = uncompressed_yuv_420_image->width;
+    size_t height = uncompressed_yuv_420_image->height;
+
+    size_t rowStart, rowEnd;
+    while (jobQueue.dequeueJob(rowStart, rowEnd)) {
+      for (size_t y = rowStart; y < rowEnd; ++y) {
+        for (size_t x = 0; x < width; ++x) {
+          Color yuv_gamma_sdr = getYuv420Pixel(uncompressed_yuv_420_image, x, y);
+          // Assuming the sdr image is a decoded JPEG, we should always use Rec.601 YUV coefficients
+          Color rgb_gamma_sdr = p3YuvToRgb(yuv_gamma_sdr);
+          // We are assuming the SDR base image is always sRGB transfer.
+#if USE_SRGB_INVOETF_LUT
+          Color rgb_sdr = srgbInvOetfLUT(rgb_gamma_sdr);
+#else
+          Color rgb_sdr = srgbInvOetf(rgb_gamma_sdr);
+#endif
+          float gain;
+          // TODO: determine map scaling factor based on actual map dims
+          size_t map_scale_factor = kMapDimensionScaleFactor;
+          // TODO: If map_scale_factor is guaranteed to be an integer, then remove the following.
+          // Currently map_scale_factor is of type size_t, but it could be changed to a float
+          // later.
+          if (map_scale_factor != floorf(map_scale_factor)) {
+            gain = sampleMap(uncompressed_gain_map, map_scale_factor, x, y);
+          } else {
+            gain = sampleMap(uncompressed_gain_map, map_scale_factor, x, y, idwTable);
+          }
+
+#if USE_APPLY_GAIN_LUT
+          Color rgb_hdr = applyGainLUT(rgb_sdr, gain, gainLUT);
+#else
+          Color rgb_hdr = applyGain(rgb_sdr, gain, metadata, display_boost);
+#endif
+          rgb_hdr = rgb_hdr / display_boost;
+          size_t pixel_idx = x + y * width;
+
+          switch (output_format) {
+            case ULTRAHDR_OUTPUT_HDR_LINEAR:
+            {
+              uint64_t rgba_f16 = colorToRgbaF16(rgb_hdr);
+              reinterpret_cast<uint64_t*>(dest->data)[pixel_idx] = rgba_f16;
+              break;
+            }
+            case ULTRAHDR_OUTPUT_HDR_HLG:
+            {
+#if USE_HLG_OETF_LUT
+              ColorTransformFn hdrOetf = hlgOetfLUT;
+#else
+              ColorTransformFn hdrOetf = hlgOetf;
+#endif
+              Color rgb_gamma_hdr = hdrOetf(rgb_hdr);
+              uint32_t rgba_1010102 = colorToRgba1010102(rgb_gamma_hdr);
+              reinterpret_cast<uint32_t*>(dest->data)[pixel_idx] = rgba_1010102;
+              break;
+            }
+            case ULTRAHDR_OUTPUT_HDR_PQ:
+            {
+#if USE_HLG_OETF_LUT
+              ColorTransformFn hdrOetf = pqOetfLUT;
+#else
+              ColorTransformFn hdrOetf = pqOetf;
+#endif
+              Color rgb_gamma_hdr = hdrOetf(rgb_hdr);
+              uint32_t rgba_1010102 = colorToRgba1010102(rgb_gamma_hdr);
+              reinterpret_cast<uint32_t*>(dest->data)[pixel_idx] = rgba_1010102;
+              break;
+            }
+            default:
+            {}
+              // Should be impossible to hit after input validation.
+          }
+        }
+      }
+    }
+  };
+
+  const int threads = std::clamp(GetCPUCoreCount(), 1, 4);
+  std::vector<std::thread> workers;
+  for (int th = 0; th < threads - 1; th++) {
+    workers.push_back(std::thread(applyRecMap));
+  }
+  const int rowStep = threads == 1 ? uncompressed_yuv_420_image->height : kJobSzInRows;
+  for (int rowStart = 0; rowStart < uncompressed_yuv_420_image->height;) {
+    int rowEnd = std::min(rowStart + rowStep, uncompressed_yuv_420_image->height);
+    jobQueue.enqueueJob(rowStart, rowEnd);
+    rowStart = rowEnd;
+  }
+  jobQueue.markQueueForEnd();
+  applyRecMap();
+  std::for_each(workers.begin(), workers.end(), [](std::thread& t) { t.join(); });
+  return NO_ERROR;
+}
+
+status_t JpegR::extractPrimaryImageAndGainMap(jr_compressed_ptr compressed_jpegr_image,
+                                              jr_compressed_ptr primary_image,
+                                              jr_compressed_ptr gain_map) {
+  if (compressed_jpegr_image == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  MessageHandler msg_handler;
+  std::shared_ptr<DataSegment> seg =
+                  DataSegment::Create(DataRange(0, compressed_jpegr_image->length),
+                                      static_cast<const uint8_t*>(compressed_jpegr_image->data),
+                                      DataSegment::BufferDispositionPolicy::kDontDelete);
+  DataSegmentDataSource data_source(seg);
+  JpegInfoBuilder jpeg_info_builder;
+  jpeg_info_builder.SetImageLimit(2);
+  JpegScanner jpeg_scanner(&msg_handler);
+  jpeg_scanner.Run(&data_source, &jpeg_info_builder);
+  data_source.Reset();
+
+  if (jpeg_scanner.HasError()) {
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  const auto& jpeg_info = jpeg_info_builder.GetInfo();
+  const auto& image_ranges = jpeg_info.GetImageRanges();
+  if (image_ranges.empty()) {
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (image_ranges.size() != 2) {
+    // Must be 2 JPEG Images
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (primary_image != nullptr) {
+    primary_image->data = static_cast<uint8_t*>(compressed_jpegr_image->data) +
+                                               image_ranges[0].GetBegin();
+    primary_image->length = image_ranges[0].GetLength();
+  }
+
+  if (gain_map != nullptr) {
+    gain_map->data = static_cast<uint8_t*>(compressed_jpegr_image->data) +
+                                              image_ranges[1].GetBegin();
+    gain_map->length = image_ranges[1].GetLength();
+  }
+
+  return NO_ERROR;
+}
+
+
+status_t JpegR::extractGainMap(jr_compressed_ptr compressed_jpegr_image,
+                               jr_compressed_ptr dest) {
+  if (compressed_jpegr_image == nullptr || dest == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  return extractPrimaryImageAndGainMap(compressed_jpegr_image, nullptr, dest);
+}
+
+// JPEG/R structure:
+// SOI (ff d8)
+//
+// (Optional, only if EXIF package is from outside)
+// APP1 (ff e1)
+// 2 bytes of length (2 + length of exif package)
+// EXIF package (this includes the first two bytes representing the package length)
+//
+// (Required, XMP package) APP1 (ff e1)
+// 2 bytes of length (2 + 29 + length of xmp package)
+// name space ("http://ns.adobe.com/xap/1.0/\0")
+// XMP
+//
+// (Required, MPF package) APP2 (ff e2)
+// 2 bytes of length
+// MPF
+//
+// (Required) primary image (without the first two bytes (SOI), may have other packages)
+//
+// SOI (ff d8)
+//
+// (Required, XMP package) APP1 (ff e1)
+// 2 bytes of length (2 + 29 + length of xmp package)
+// name space ("http://ns.adobe.com/xap/1.0/\0")
+// XMP
+//
+// (Required) secondary image (the gain map, without the first two bytes (SOI))
+//
+// Metadata versions we are using:
+// ECMA TR-98 for JFIF marker
+// Exif 2.2 spec for EXIF marker
+// Adobe XMP spec part 3 for XMP marker
+// ICC v4.3 spec for ICC
+status_t JpegR::appendGainMap(jr_compressed_ptr compressed_jpeg_image,
+                              jr_compressed_ptr compressed_gain_map,
+                              jr_exif_ptr exif,
+                              void* icc, size_t icc_size,
+                              ultrahdr_metadata_ptr metadata,
+                              jr_compressed_ptr dest) {
+  if (compressed_jpeg_image == nullptr
+   || compressed_gain_map == nullptr
+   || metadata == nullptr
+   || dest == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (metadata->version.compare("1.0")) {
+    ALOGE("received bad value for version: %s", metadata->version.c_str());
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+  if (metadata->maxContentBoost < metadata->minContentBoost) {
+    ALOGE("received bad value for content boost min %f, max %f", metadata->minContentBoost,
+           metadata->maxContentBoost);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (metadata->hdrCapacityMax < metadata->hdrCapacityMin || metadata->hdrCapacityMin < 1.0f) {
+    ALOGE("received bad value for hdr capacity min %f, max %f", metadata->hdrCapacityMin,
+           metadata->hdrCapacityMax);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (metadata->offsetSdr < 0.0f || metadata->offsetHdr < 0.0f) {
+    ALOGE("received bad value for offset sdr %f, hdr %f", metadata->offsetSdr,
+           metadata->offsetHdr);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  if (metadata->gamma <= 0.0f) {
+    ALOGE("received bad value for gamma %f", metadata->gamma);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+
+  const string nameSpace = "http://ns.adobe.com/xap/1.0/";
+  const int nameSpaceLength = nameSpace.size() + 1;  // need to count the null terminator
+
+  // calculate secondary image length first, because the length will be written into the primary
+  // image xmp
+  const string xmp_secondary = generateXmpForSecondaryImage(*metadata);
+  const int xmp_secondary_length = 2 /* 2 bytes representing the length of the package */
+                                 + nameSpaceLength /* 29 bytes length of name space including \0 */
+                                 + xmp_secondary.size(); /* length of xmp packet */
+  const int secondary_image_size = 2 /* 2 bytes length of APP1 sign */
+                                 + xmp_secondary_length
+                                 + compressed_gain_map->length;
+  // primary image
+  const string xmp_primary = generateXmpForPrimaryImage(secondary_image_size, *metadata);
+  // same as primary
+  const int xmp_primary_length = 2 + nameSpaceLength + xmp_primary.size();
+
+  int pos = 0;
+  // Begin primary image
+  // Write SOI
+  JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+  JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kSOI, 1, pos));
+
+  // Write EXIF
+  if (exif != nullptr) {
+    const int length = 2 + exif->length;
+    const uint8_t lengthH = ((length >> 8) & 0xff);
+    const uint8_t lengthL = (length & 0xff);
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+    JPEGR_CHECK(Write(dest, exif->data, exif->length, pos));
+  }
+
+  // Prepare and write XMP
+  {
+    const int length = xmp_primary_length;
+    const uint8_t lengthH = ((length >> 8) & 0xff);
+    const uint8_t lengthL = (length & 0xff);
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+    JPEGR_CHECK(Write(dest, (void*)nameSpace.c_str(), nameSpaceLength, pos));
+    JPEGR_CHECK(Write(dest, (void*)xmp_primary.c_str(), xmp_primary.size(), pos));
+  }
+
+  // Write ICC
+  if (icc != nullptr && icc_size > 0) {
+      const int length = icc_size + 2;
+      const uint8_t lengthH = ((length >> 8) & 0xff);
+      const uint8_t lengthL = (length & 0xff);
+      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
+      JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+      JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+      JPEGR_CHECK(Write(dest, icc, icc_size, pos));
+  }
+
+  // Prepare and write MPF
+  {
+      const int length = 2 + calculateMpfSize();
+      const uint8_t lengthH = ((length >> 8) & 0xff);
+      const uint8_t lengthL = (length & 0xff);
+      int primary_image_size = pos + length + compressed_jpeg_image->length;
+      // between APP2 + package size + signature
+      // ff e2 00 58 4d 50 46 00
+      // 2 + 2 + 4 = 8 (bytes)
+      // and ff d8 sign of the secondary image
+      int secondary_image_offset = primary_image_size - pos - 8;
+      sp<DataStruct> mpf = generateMpf(primary_image_size,
+                                       0, /* primary_image_offset */
+                                       secondary_image_size,
+                                       secondary_image_offset);
+      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
+      JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+      JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+      JPEGR_CHECK(Write(dest, (void*)mpf->getData(), mpf->getLength(), pos));
+  }
+
+  // Write primary image
+  JPEGR_CHECK(Write(dest,
+      (uint8_t*)compressed_jpeg_image->data + 2, compressed_jpeg_image->length - 2, pos));
+  // Finish primary image
+
+  // Begin secondary image (gain map)
+  // Write SOI
+  JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+  JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kSOI, 1, pos));
+
+  // Prepare and write XMP
+  {
+    const int length = xmp_secondary_length;
+    const uint8_t lengthH = ((length >> 8) & 0xff);
+    const uint8_t lengthL = (length & 0xff);
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP1, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+    JPEGR_CHECK(Write(dest, (void*)nameSpace.c_str(), nameSpaceLength, pos));
+    JPEGR_CHECK(Write(dest, (void*)xmp_secondary.c_str(), xmp_secondary.size(), pos));
+  }
+
+  // Write secondary image
+  JPEGR_CHECK(Write(dest,
+        (uint8_t*)compressed_gain_map->data + 2, compressed_gain_map->length - 2, pos));
+
+  // Set back length
+  dest->length = pos;
+
+  // Done!
+  return NO_ERROR;
+}
+
+status_t JpegR::toneMap(jr_uncompressed_ptr src, jr_uncompressed_ptr dest) {
+  if (src == nullptr || dest == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  uint16_t* src_luma_data = reinterpret_cast<uint16_t*>(src->data);
+  size_t src_luma_stride = src->luma_stride == 0 ? src->width : src->luma_stride;
+
+  uint16_t* src_chroma_data;
+  size_t src_chroma_stride;
+  if (src->chroma_data == nullptr) {
+     src_chroma_stride = src_luma_stride;
+     src_chroma_data = &reinterpret_cast<uint16_t*>(src->data)[src_luma_stride * src->height];
+  } else {
+     src_chroma_stride = src->chroma_stride;
+     src_chroma_data = reinterpret_cast<uint16_t*>(src->chroma_data);
+  }
+  dest->width = src->width;
+  dest->height = src->height;
+
+  size_t dest_luma_pixel_count = dest->width * dest->height;
+
+  for (size_t y = 0; y < src->height; ++y) {
+    for (size_t x = 0; x < src->width; ++x) {
+      size_t src_y_idx = y * src_luma_stride + x;
+      size_t src_u_idx = (y >> 1) * src_chroma_stride + (x & ~0x1);
+      size_t src_v_idx = src_u_idx + 1;
+
+      uint16_t y_uint = src_luma_data[src_y_idx] >> 6;
+      uint16_t u_uint = src_chroma_data[src_u_idx] >> 6;
+      uint16_t v_uint = src_chroma_data[src_v_idx] >> 6;
+
+      size_t dest_y_idx = x + y * dest->width;
+      size_t dest_uv_idx = x / 2 + (y / 2) * (dest->width / 2);
+
+      uint8_t* y = &reinterpret_cast<uint8_t*>(dest->data)[dest_y_idx];
+      uint8_t* u = &reinterpret_cast<uint8_t*>(dest->data)[dest_luma_pixel_count + dest_uv_idx];
+      uint8_t* v = &reinterpret_cast<uint8_t*>(
+              dest->data)[dest_luma_pixel_count * 5 / 4 + dest_uv_idx];
+
+      *y = static_cast<uint8_t>((y_uint >> 2) & 0xff);
+      *u = static_cast<uint8_t>((u_uint >> 2) & 0xff);
+      *v = static_cast<uint8_t>((v_uint >> 2) & 0xff);
+    }
+  }
+
+  dest->colorGamut = src->colorGamut;
+
+  return NO_ERROR;
+}
+
+status_t JpegR::convertYuv(jr_uncompressed_ptr image,
+                           ultrahdr_color_gamut src_encoding,
+                           ultrahdr_color_gamut dest_encoding) {
+  if (image == nullptr) {
+    return ERROR_JPEGR_INVALID_NULL_PTR;
+  }
+
+  if (src_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED
+   || dest_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
+    return ERROR_JPEGR_INVALID_COLORGAMUT;
+  }
+
+  ColorTransformFn conversionFn = nullptr;
+  switch (src_encoding) {
+    case ULTRAHDR_COLORGAMUT_BT709:
+      switch (dest_encoding) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+          return NO_ERROR;
+        case ULTRAHDR_COLORGAMUT_P3:
+          conversionFn = yuv709To601;
+          break;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+          conversionFn = yuv709To2100;
+          break;
+        default:
+          // Should be impossible to hit after input validation
+          return ERROR_JPEGR_INVALID_COLORGAMUT;
+      }
+      break;
+    case ULTRAHDR_COLORGAMUT_P3:
+      switch (dest_encoding) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+          conversionFn = yuv601To709;
+          break;
+        case ULTRAHDR_COLORGAMUT_P3:
+          return NO_ERROR;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+          conversionFn = yuv601To2100;
+          break;
+        default:
+          // Should be impossible to hit after input validation
+          return ERROR_JPEGR_INVALID_COLORGAMUT;
+      }
+      break;
+    case ULTRAHDR_COLORGAMUT_BT2100:
+      switch (dest_encoding) {
+        case ULTRAHDR_COLORGAMUT_BT709:
+          conversionFn = yuv2100To709;
+          break;
+        case ULTRAHDR_COLORGAMUT_P3:
+          conversionFn = yuv2100To601;
+          break;
+        case ULTRAHDR_COLORGAMUT_BT2100:
+          return NO_ERROR;
+        default:
+          // Should be impossible to hit after input validation
+          return ERROR_JPEGR_INVALID_COLORGAMUT;
+      }
+      break;
+    default:
+      // Should be impossible to hit after input validation
+      return ERROR_JPEGR_INVALID_COLORGAMUT;
+  }
+
+  if (conversionFn == nullptr) {
+    // Should be impossible to hit after input validation
+    return ERROR_JPEGR_INVALID_COLORGAMUT;
+  }
+
+  for (size_t y = 0; y < image->height / 2; ++y) {
+    for (size_t x = 0; x < image->width / 2; ++x) {
+      transformYuv420(image, x, y, conversionFn);
+    }
+  }
+
+  return NO_ERROR;
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/jpegrutils.cpp b/libs/ultrahdr/jpegrutils.cpp
new file mode 100644
index 0000000..c434eb6
--- /dev/null
+++ b/libs/ultrahdr/jpegrutils.cpp
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ultrahdr/jpegrutils.h>
+
+#include <algorithm>
+#include <cmath>
+
+#include <image_io/xml/xml_reader.h>
+#include <image_io/xml/xml_writer.h>
+#include <image_io/base/message_handler.h>
+#include <image_io/xml/xml_element_rules.h>
+#include <image_io/xml/xml_handler.h>
+#include <image_io/xml/xml_rule.h>
+#include <utils/Log.h>
+
+using namespace photos_editing_formats::image_io;
+using namespace std;
+
+namespace android::ultrahdr {
+/*
+ * Helper function used for generating XMP metadata.
+ *
+ * @param prefix The prefix part of the name.
+ * @param suffix The suffix part of the name.
+ * @return A name of the form "prefix:suffix".
+ */
+static inline string Name(const string &prefix, const string &suffix) {
+  std::stringstream ss;
+  ss << prefix << ":" << suffix;
+  return ss.str();
+}
+
+DataStruct::DataStruct(int s) {
+    data = malloc(s);
+    length = s;
+    memset(data, 0, s);
+    writePos = 0;
+}
+
+DataStruct::~DataStruct() {
+    if (data != nullptr) {
+        free(data);
+    }
+}
+
+void* DataStruct::getData() {
+    return data;
+}
+
+int DataStruct::getLength() {
+    return length;
+}
+
+int DataStruct::getBytesWritten() {
+    return writePos;
+}
+
+bool DataStruct::write8(uint8_t value) {
+    uint8_t v = value;
+    return write(&v, 1);
+}
+
+bool DataStruct::write16(uint16_t value) {
+    uint16_t v = value;
+    return write(&v, 2);
+}
+bool DataStruct::write32(uint32_t value) {
+    uint32_t v = value;
+    return write(&v, 4);
+}
+
+bool DataStruct::write(const void* src, int size) {
+    if (writePos + size > length) {
+        ALOGE("Writing out of boundary: write position: %d, size: %d, capacity: %d",
+                writePos, size, length);
+        return false;
+    }
+    memcpy((uint8_t*) data + writePos, src, size);
+    writePos += size;
+    return true;
+}
+
+/*
+ * Helper function used for writing data to destination.
+ */
+status_t Write(jr_compressed_ptr destination, const void* source, size_t length, int &position) {
+  if (position + length > destination->maxLength) {
+    return ERROR_JPEGR_BUFFER_TOO_SMALL;
+  }
+
+  memcpy((uint8_t*)destination->data + sizeof(uint8_t) * position, source, length);
+  position += length;
+  return NO_ERROR;
+}
+
+// Extremely simple XML Handler - just searches for interesting elements
+class XMPXmlHandler : public XmlHandler {
+public:
+
+    XMPXmlHandler() : XmlHandler() {
+        state = NotStrarted;
+        versionFound = false;
+        minContentBoostFound = false;
+        maxContentBoostFound = false;
+        gammaFound = false;
+        offsetSdrFound = false;
+        offsetHdrFound = false;
+        hdrCapacityMinFound = false;
+        hdrCapacityMaxFound = false;
+        baseRenditionIsHdrFound = false;
+    }
+
+    enum ParseState {
+        NotStrarted,
+        Started,
+        Done
+    };
+
+    virtual DataMatchResult StartElement(const XmlTokenContext& context) {
+        string val;
+        if (context.BuildTokenValue(&val)) {
+            if (!val.compare(containerName)) {
+                state = Started;
+            } else {
+                if (state != Done) {
+                    state = NotStrarted;
+                }
+            }
+        }
+        return context.GetResult();
+    }
+
+    virtual DataMatchResult FinishElement(const XmlTokenContext& context) {
+        if (state == Started) {
+            state = Done;
+            lastAttributeName = "";
+        }
+        return context.GetResult();
+    }
+
+    virtual DataMatchResult AttributeName(const XmlTokenContext& context) {
+        string val;
+        if (state == Started) {
+            if (context.BuildTokenValue(&val)) {
+                if (!val.compare(versionAttrName)) {
+                    lastAttributeName = versionAttrName;
+                } else if (!val.compare(maxContentBoostAttrName)) {
+                    lastAttributeName = maxContentBoostAttrName;
+                } else if (!val.compare(minContentBoostAttrName)) {
+                    lastAttributeName = minContentBoostAttrName;
+                } else if (!val.compare(gammaAttrName)) {
+                    lastAttributeName = gammaAttrName;
+                } else if (!val.compare(offsetSdrAttrName)) {
+                    lastAttributeName = offsetSdrAttrName;
+                } else if (!val.compare(offsetHdrAttrName)) {
+                    lastAttributeName = offsetHdrAttrName;
+                } else if (!val.compare(hdrCapacityMinAttrName)) {
+                    lastAttributeName = hdrCapacityMinAttrName;
+                } else if (!val.compare(hdrCapacityMaxAttrName)) {
+                    lastAttributeName = hdrCapacityMaxAttrName;
+                } else if (!val.compare(baseRenditionIsHdrAttrName)) {
+                    lastAttributeName = baseRenditionIsHdrAttrName;
+                } else {
+                    lastAttributeName = "";
+                }
+            }
+        }
+        return context.GetResult();
+    }
+
+    virtual DataMatchResult AttributeValue(const XmlTokenContext& context) {
+        string val;
+        if (state == Started) {
+            if (context.BuildTokenValue(&val, true)) {
+                if (!lastAttributeName.compare(versionAttrName)) {
+                    versionStr = val;
+                    versionFound = true;
+                } else if (!lastAttributeName.compare(maxContentBoostAttrName)) {
+                    maxContentBoostStr = val;
+                    maxContentBoostFound = true;
+                } else if (!lastAttributeName.compare(minContentBoostAttrName)) {
+                    minContentBoostStr = val;
+                    minContentBoostFound = true;
+                } else if (!lastAttributeName.compare(gammaAttrName)) {
+                    gammaStr = val;
+                    gammaFound = true;
+                } else if (!lastAttributeName.compare(offsetSdrAttrName)) {
+                    offsetSdrStr = val;
+                    offsetSdrFound = true;
+                } else if (!lastAttributeName.compare(offsetHdrAttrName)) {
+                    offsetHdrStr = val;
+                    offsetHdrFound = true;
+                } else if (!lastAttributeName.compare(hdrCapacityMinAttrName)) {
+                    hdrCapacityMinStr = val;
+                    hdrCapacityMinFound = true;
+                } else if (!lastAttributeName.compare(hdrCapacityMaxAttrName)) {
+                    hdrCapacityMaxStr = val;
+                    hdrCapacityMaxFound = true;
+                } else if (!lastAttributeName.compare(baseRenditionIsHdrAttrName)) {
+                    baseRenditionIsHdrStr = val;
+                    baseRenditionIsHdrFound = true;
+                }
+            }
+        }
+        return context.GetResult();
+    }
+
+    bool getVersion(string* version, bool* present) {
+        if (state == Done) {
+            *version = versionStr;
+            *present = versionFound;
+            return true;
+        } else {
+            return false;
+        }
+    }
+
+    bool getMaxContentBoost(float* max_content_boost, bool* present) {
+        if (state == Done) {
+            *present = maxContentBoostFound;
+            stringstream ss(maxContentBoostStr);
+            float val;
+            if (ss >> val) {
+                *max_content_boost = exp2(val);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+    bool getMinContentBoost(float* min_content_boost, bool* present) {
+        if (state == Done) {
+            *present = minContentBoostFound;
+            stringstream ss(minContentBoostStr);
+            float val;
+            if (ss >> val) {
+                *min_content_boost = exp2(val);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+    bool getGamma(float* gamma, bool* present) {
+        if (state == Done) {
+            *present = gammaFound;
+            stringstream ss(gammaStr);
+            float val;
+            if (ss >> val) {
+                *gamma = val;
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+
+    bool getOffsetSdr(float* offset_sdr, bool* present) {
+        if (state == Done) {
+            *present = offsetSdrFound;
+            stringstream ss(offsetSdrStr);
+            float val;
+            if (ss >> val) {
+                *offset_sdr = val;
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+
+    bool getOffsetHdr(float* offset_hdr, bool* present) {
+        if (state == Done) {
+            *present = offsetHdrFound;
+            stringstream ss(offsetHdrStr);
+            float val;
+            if (ss >> val) {
+                *offset_hdr = val;
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+
+    bool getHdrCapacityMin(float* hdr_capacity_min, bool* present) {
+        if (state == Done) {
+            *present = hdrCapacityMinFound;
+            stringstream ss(hdrCapacityMinStr);
+            float val;
+            if (ss >> val) {
+                *hdr_capacity_min = exp2(val);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+
+    bool getHdrCapacityMax(float* hdr_capacity_max, bool* present) {
+        if (state == Done) {
+            *present = hdrCapacityMaxFound;
+            stringstream ss(hdrCapacityMaxStr);
+            float val;
+            if (ss >> val) {
+                *hdr_capacity_max = exp2(val);
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+
+    bool getBaseRenditionIsHdr(bool* base_rendition_is_hdr, bool* present) {
+        if (state == Done) {
+            *present = baseRenditionIsHdrFound;
+            if (!baseRenditionIsHdrStr.compare("False")) {
+                *base_rendition_is_hdr = false;
+                return true;
+            } else if (!baseRenditionIsHdrStr.compare("True")) {
+                *base_rendition_is_hdr = true;
+                return true;
+            } else {
+                return false;
+            }
+        } else {
+            return false;
+        }
+    }
+
+
+
+private:
+    static const string containerName;
+
+    static const string versionAttrName;
+    string              versionStr;
+    bool                versionFound;
+    static const string maxContentBoostAttrName;
+    string              maxContentBoostStr;
+    bool                maxContentBoostFound;
+    static const string minContentBoostAttrName;
+    string              minContentBoostStr;
+    bool                minContentBoostFound;
+    static const string gammaAttrName;
+    string              gammaStr;
+    bool                gammaFound;
+    static const string offsetSdrAttrName;
+    string              offsetSdrStr;
+    bool                offsetSdrFound;
+    static const string offsetHdrAttrName;
+    string              offsetHdrStr;
+    bool                offsetHdrFound;
+    static const string hdrCapacityMinAttrName;
+    string              hdrCapacityMinStr;
+    bool                hdrCapacityMinFound;
+    static const string hdrCapacityMaxAttrName;
+    string              hdrCapacityMaxStr;
+    bool                hdrCapacityMaxFound;
+    static const string baseRenditionIsHdrAttrName;
+    string              baseRenditionIsHdrStr;
+    bool                baseRenditionIsHdrFound;
+
+    string              lastAttributeName;
+    ParseState          state;
+};
+
+// GContainer XMP constants - URI and namespace prefix
+const string kContainerUri        = "http://ns.google.com/photos/1.0/container/";
+const string kContainerPrefix     = "Container";
+
+// GContainer XMP constants - element and attribute names
+const string kConDirectory            = Name(kContainerPrefix, "Directory");
+const string kConItem                 = Name(kContainerPrefix, "Item");
+
+// GContainer XMP constants - names for XMP handlers
+const string XMPXmlHandler::containerName = "rdf:Description";
+// Item XMP constants - URI and namespace prefix
+const string kItemUri        = "http://ns.google.com/photos/1.0/container/item/";
+const string kItemPrefix     = "Item";
+
+// Item XMP constants - element and attribute names
+const string kItemLength           = Name(kItemPrefix, "Length");
+const string kItemMime             = Name(kItemPrefix, "Mime");
+const string kItemSemantic         = Name(kItemPrefix, "Semantic");
+
+// Item XMP constants - element and attribute values
+const string kSemanticPrimary = "Primary";
+const string kSemanticGainMap = "GainMap";
+const string kMimeImageJpeg   = "image/jpeg";
+
+// GainMap XMP constants - URI and namespace prefix
+const string kGainMapUri      = "http://ns.adobe.com/hdr-gain-map/1.0/";
+const string kGainMapPrefix   = "hdrgm";
+
+// GainMap XMP constants - element and attribute names
+const string kMapVersion            = Name(kGainMapPrefix, "Version");
+const string kMapGainMapMin         = Name(kGainMapPrefix, "GainMapMin");
+const string kMapGainMapMax         = Name(kGainMapPrefix, "GainMapMax");
+const string kMapGamma              = Name(kGainMapPrefix, "Gamma");
+const string kMapOffsetSdr          = Name(kGainMapPrefix, "OffsetSDR");
+const string kMapOffsetHdr          = Name(kGainMapPrefix, "OffsetHDR");
+const string kMapHDRCapacityMin     = Name(kGainMapPrefix, "HDRCapacityMin");
+const string kMapHDRCapacityMax     = Name(kGainMapPrefix, "HDRCapacityMax");
+const string kMapBaseRenditionIsHDR = Name(kGainMapPrefix, "BaseRenditionIsHDR");
+
+// GainMap XMP constants - names for XMP handlers
+const string XMPXmlHandler::versionAttrName = kMapVersion;
+const string XMPXmlHandler::minContentBoostAttrName = kMapGainMapMin;
+const string XMPXmlHandler::maxContentBoostAttrName = kMapGainMapMax;
+const string XMPXmlHandler::gammaAttrName = kMapGamma;
+const string XMPXmlHandler::offsetSdrAttrName = kMapOffsetSdr;
+const string XMPXmlHandler::offsetHdrAttrName = kMapOffsetHdr;
+const string XMPXmlHandler::hdrCapacityMinAttrName = kMapHDRCapacityMin;
+const string XMPXmlHandler::hdrCapacityMaxAttrName = kMapHDRCapacityMax;
+const string XMPXmlHandler::baseRenditionIsHdrAttrName = kMapBaseRenditionIsHDR;
+
+bool getMetadataFromXMP(uint8_t* xmp_data, size_t xmp_size, ultrahdr_metadata_struct* metadata) {
+    string nameSpace = "http://ns.adobe.com/xap/1.0/\0";
+
+    if (xmp_size < nameSpace.size()+2) {
+        // Data too short
+        return false;
+    }
+
+    if (strncmp(reinterpret_cast<char*>(xmp_data), nameSpace.c_str(), nameSpace.size())) {
+        // Not correct namespace
+        return false;
+    }
+
+    // Position the pointers to the start of XMP XML portion
+    xmp_data += nameSpace.size()+1;
+    xmp_size -= nameSpace.size()+1;
+    XMPXmlHandler handler;
+
+    // We need to remove tail data until the closing tag. Otherwise parser will throw an error.
+    while(xmp_data[xmp_size-1]!='>' && xmp_size > 1) {
+        xmp_size--;
+    }
+
+    string str(reinterpret_cast<const char*>(xmp_data), xmp_size);
+    MessageHandler msg_handler;
+    unique_ptr<XmlRule> rule(new XmlElementRule);
+    XmlReader reader(&handler, &msg_handler);
+    reader.StartParse(std::move(rule));
+    reader.Parse(str);
+    reader.FinishParse();
+    if (reader.HasErrors()) {
+        // Parse error
+        return false;
+    }
+
+    // Apply default values to any not-present fields, except for Version,
+    // maxContentBoost, and hdrCapacityMax, which are required. Return false if
+    // we encounter a present field that couldn't be parsed, since this
+    // indicates it is invalid (eg. string where there should be a float).
+    bool present = false;
+    if (!handler.getVersion(&metadata->version, &present) || !present) {
+        return false;
+    }
+    if (!handler.getMaxContentBoost(&metadata->maxContentBoost, &present) || !present) {
+        return false;
+    }
+    if (!handler.getHdrCapacityMax(&metadata->hdrCapacityMax, &present) || !present) {
+        return false;
+    }
+    if (!handler.getMinContentBoost(&metadata->minContentBoost, &present)) {
+        if (present) return false;
+        metadata->minContentBoost = 1.0f;
+    }
+    if (!handler.getGamma(&metadata->gamma, &present)) {
+        if (present) return false;
+        metadata->gamma = 1.0f;
+    }
+    if (!handler.getOffsetSdr(&metadata->offsetSdr, &present)) {
+        if (present) return false;
+        metadata->offsetSdr = 1.0f / 64.0f;
+    }
+    if (!handler.getOffsetHdr(&metadata->offsetHdr, &present)) {
+        if (present) return false;
+        metadata->offsetHdr = 1.0f / 64.0f;
+    }
+    if (!handler.getHdrCapacityMin(&metadata->hdrCapacityMin, &present)) {
+        if (present) return false;
+        metadata->hdrCapacityMin = 1.0f;
+    }
+
+    bool base_rendition_is_hdr;
+    if (!handler.getBaseRenditionIsHdr(&base_rendition_is_hdr, &present)) {
+        if (present) return false;
+        base_rendition_is_hdr = false;
+    }
+    if (base_rendition_is_hdr) {
+        ALOGE("Base rendition of HDR is not supported!");
+        return false;
+    }
+
+    return true;
+}
+
+string generateXmpForPrimaryImage(int secondary_image_length, ultrahdr_metadata_struct& metadata) {
+  const vector<string> kConDirSeq({kConDirectory, string("rdf:Seq")});
+  const vector<string> kLiItem({string("rdf:li"), kConItem});
+
+  std::stringstream ss;
+  photos_editing_formats::image_io::XmlWriter writer(ss);
+  writer.StartWritingElement("x:xmpmeta");
+  writer.WriteXmlns("x", "adobe:ns:meta/");
+  writer.WriteAttributeNameAndValue("x:xmptk", "Adobe XMP Core 5.1.2");
+  writer.StartWritingElement("rdf:RDF");
+  writer.WriteXmlns("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#");
+  writer.StartWritingElement("rdf:Description");
+  writer.WriteXmlns(kContainerPrefix, kContainerUri);
+  writer.WriteXmlns(kItemPrefix, kItemUri);
+  writer.WriteXmlns(kGainMapPrefix, kGainMapUri);
+  writer.WriteAttributeNameAndValue(kMapVersion, metadata.version);
+
+  writer.StartWritingElements(kConDirSeq);
+
+  size_t item_depth = writer.StartWritingElement("rdf:li");
+  writer.WriteAttributeNameAndValue("rdf:parseType", "Resource");
+  writer.StartWritingElement(kConItem);
+  writer.WriteAttributeNameAndValue(kItemSemantic, kSemanticPrimary);
+  writer.WriteAttributeNameAndValue(kItemMime, kMimeImageJpeg);
+  writer.FinishWritingElementsToDepth(item_depth);
+
+  writer.StartWritingElement("rdf:li");
+  writer.WriteAttributeNameAndValue("rdf:parseType", "Resource");
+  writer.StartWritingElement(kConItem);
+  writer.WriteAttributeNameAndValue(kItemSemantic, kSemanticGainMap);
+  writer.WriteAttributeNameAndValue(kItemMime, kMimeImageJpeg);
+  writer.WriteAttributeNameAndValue(kItemLength, secondary_image_length);
+
+  writer.FinishWriting();
+
+  return ss.str();
+}
+
+string generateXmpForSecondaryImage(ultrahdr_metadata_struct& metadata) {
+  const vector<string> kConDirSeq({kConDirectory, string("rdf:Seq")});
+
+  std::stringstream ss;
+  photos_editing_formats::image_io::XmlWriter writer(ss);
+  writer.StartWritingElement("x:xmpmeta");
+  writer.WriteXmlns("x", "adobe:ns:meta/");
+  writer.WriteAttributeNameAndValue("x:xmptk", "Adobe XMP Core 5.1.2");
+  writer.StartWritingElement("rdf:RDF");
+  writer.WriteXmlns("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#");
+  writer.StartWritingElement("rdf:Description");
+  writer.WriteXmlns(kGainMapPrefix, kGainMapUri);
+  writer.WriteAttributeNameAndValue(kMapVersion, metadata.version);
+  writer.WriteAttributeNameAndValue(kMapGainMapMin, log2(metadata.minContentBoost));
+  writer.WriteAttributeNameAndValue(kMapGainMapMax, log2(metadata.maxContentBoost));
+  writer.WriteAttributeNameAndValue(kMapGamma, metadata.gamma);
+  writer.WriteAttributeNameAndValue(kMapOffsetSdr, metadata.offsetSdr);
+  writer.WriteAttributeNameAndValue(kMapOffsetHdr, metadata.offsetHdr);
+  writer.WriteAttributeNameAndValue(kMapHDRCapacityMin, log2(metadata.hdrCapacityMin));
+  writer.WriteAttributeNameAndValue(kMapHDRCapacityMax, log2(metadata.hdrCapacityMax));
+  writer.WriteAttributeNameAndValue(kMapBaseRenditionIsHDR, "False");
+  writer.FinishWriting();
+
+  return ss.str();
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/multipictureformat.cpp b/libs/ultrahdr/multipictureformat.cpp
new file mode 100644
index 0000000..f1679ef
--- /dev/null
+++ b/libs/ultrahdr/multipictureformat.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <ultrahdr/multipictureformat.h>
+#include <ultrahdr/jpegrutils.h>
+
+namespace android::ultrahdr {
+size_t calculateMpfSize() {
+    return sizeof(kMpfSig) +                 // Signature
+            kMpEndianSize +                   // Endianness
+            sizeof(uint32_t) +                // Index IFD Offset
+            sizeof(uint16_t) +                // Tag count
+            kTagSerializedCount * kTagSize +  // 3 tags at 12 bytes each
+            sizeof(uint32_t) +                // Attribute IFD offset
+            kNumPictures * kMPEntrySize;      // MP Entries for each image
+}
+
+sp<DataStruct> generateMpf(int primary_image_size, int primary_image_offset,
+        int secondary_image_size, int secondary_image_offset) {
+    size_t mpf_size = calculateMpfSize();
+    sp<DataStruct> dataStruct = sp<DataStruct>::make(mpf_size);
+
+    dataStruct->write(static_cast<const void*>(kMpfSig), sizeof(kMpfSig));
+#if USE_BIG_ENDIAN
+    dataStruct->write(static_cast<const void*>(kMpBigEndian), kMpEndianSize);
+#else
+    dataStruct->write(static_cast<const void*>(kMpLittleEndian), kMpEndianSize);
+#endif
+
+    // Set the Index IFD offset be the position after the endianness value and this offset.
+    constexpr uint32_t indexIfdOffset =
+            static_cast<uint16_t>(kMpEndianSize + sizeof(kMpfSig));
+    dataStruct->write32(Endian_SwapBE32(indexIfdOffset));
+
+    // We will write 3 tags (version, number of images, MP entries).
+    dataStruct->write16(Endian_SwapBE16(kTagSerializedCount));
+
+    // Write the version tag.
+    dataStruct->write16(Endian_SwapBE16(kVersionTag));
+    dataStruct->write16(Endian_SwapBE16(kVersionType));
+    dataStruct->write32(Endian_SwapBE32(kVersionCount));
+    dataStruct->write(kVersionExpected, kVersionSize);
+
+    // Write the number of images.
+    dataStruct->write16(Endian_SwapBE16(kNumberOfImagesTag));
+    dataStruct->write16(Endian_SwapBE16(kNumberOfImagesType));
+    dataStruct->write32(Endian_SwapBE32(kNumberOfImagesCount));
+    dataStruct->write32(Endian_SwapBE32(kNumPictures));
+
+    // Write the MP entries.
+    dataStruct->write16(Endian_SwapBE16(kMPEntryTag));
+    dataStruct->write16(Endian_SwapBE16(kMPEntryType));
+    dataStruct->write32(Endian_SwapBE32(kMPEntrySize * kNumPictures));
+    const uint32_t mpEntryOffset =
+            static_cast<uint32_t>(dataStruct->getBytesWritten() -  // The bytes written so far
+                                  sizeof(kMpfSig) +   // Excluding the MPF signature
+                                  sizeof(uint32_t) +  // The 4 bytes for this offset
+                                  sizeof(uint32_t));  // The 4 bytes for the attribute IFD offset.
+    dataStruct->write32(Endian_SwapBE32(mpEntryOffset));
+
+    // Write the attribute IFD offset (zero because we don't write it).
+    dataStruct->write32(0);
+
+    // Write the MP entries for primary image
+    dataStruct->write32(
+            Endian_SwapBE32(kMPEntryAttributeFormatJpeg | kMPEntryAttributeTypePrimary));
+    dataStruct->write32(Endian_SwapBE32(primary_image_size));
+    dataStruct->write32(Endian_SwapBE32(primary_image_offset));
+    dataStruct->write16(0);
+    dataStruct->write16(0);
+
+    // Write the MP entries for secondary image
+    dataStruct->write32(Endian_SwapBE32(kMPEntryAttributeFormatJpeg));
+    dataStruct->write32(Endian_SwapBE32(secondary_image_size));
+    dataStruct->write32(Endian_SwapBE32(secondary_image_offset));
+    dataStruct->write16(0);
+    dataStruct->write16(0);
+
+    return dataStruct;
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/tests/Android.bp b/libs/ultrahdr/tests/Android.bp
new file mode 100644
index 0000000..5944130
--- /dev/null
+++ b/libs/ultrahdr/tests/Android.bp
@@ -0,0 +1,79 @@
+// Copyright 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_native_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_native_license"],
+}
+
+cc_test {
+    name: "libultrahdr_test",
+    test_suites: ["device-tests"],
+    srcs: [
+        "gainmapmath_test.cpp",
+        "icchelper_test.cpp",
+        "jpegr_test.cpp",
+    ],
+    shared_libs: [
+        "libimage_io",
+        "libjpeg",
+        "liblog",
+    ],
+    static_libs: [
+        "libgmock",
+        "libgtest",
+        "libjpegdecoder",
+        "libjpegencoder",
+        "libultrahdr",
+        "libutils",
+    ],
+}
+
+cc_test {
+    name: "libjpegencoderhelper_test",
+    test_suites: ["device-tests"],
+    srcs: [
+        "jpegencoderhelper_test.cpp",
+    ],
+    shared_libs: [
+        "libjpeg",
+        "liblog",
+    ],
+    static_libs: [
+        "libgtest",
+        "libjpegencoder",
+    ],
+}
+
+cc_test {
+    name: "libjpegdecoderhelper_test",
+    test_suites: ["device-tests"],
+    srcs: [
+        "jpegdecoderhelper_test.cpp",
+    ],
+    shared_libs: [
+        "libjpeg",
+        "liblog",
+    ],
+    static_libs: [
+        "libgtest",
+        "libjpegdecoder",
+        "libultrahdr",
+        "libutils",
+    ],
+}
diff --git a/libs/ultrahdr/tests/data/jpeg_image.jpg b/libs/ultrahdr/tests/data/jpeg_image.jpg
new file mode 100644
index 0000000..e285742
--- /dev/null
+++ b/libs/ultrahdr/tests/data/jpeg_image.jpg
Binary files differ
diff --git a/libs/ultrahdr/tests/data/minnie-318x240.yu12 b/libs/ultrahdr/tests/data/minnie-318x240.yu12
new file mode 100644
index 0000000..7b2fc71
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-318x240.yu12
Binary files differ
diff --git a/libs/ultrahdr/tests/data/minnie-320x240-y.jpg b/libs/ultrahdr/tests/data/minnie-320x240-y.jpg
new file mode 100644
index 0000000..20b5a2c
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-320x240-y.jpg
Binary files differ
diff --git a/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg b/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg
new file mode 100644
index 0000000..c7f4538
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-320x240-yuv-icc.jpg
Binary files differ
diff --git a/libs/ultrahdr/tests/data/minnie-320x240-yuv.jpg b/libs/ultrahdr/tests/data/minnie-320x240-yuv.jpg
new file mode 100644
index 0000000..41300f4
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-320x240-yuv.jpg
Binary files differ
diff --git a/libs/ultrahdr/tests/data/minnie-320x240.y b/libs/ultrahdr/tests/data/minnie-320x240.y
new file mode 100644
index 0000000..f9d8371
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-320x240.y
Binary files differ
diff --git a/libs/ultrahdr/tests/data/minnie-320x240.yu12 b/libs/ultrahdr/tests/data/minnie-320x240.yu12
new file mode 100644
index 0000000..0d66f53
--- /dev/null
+++ b/libs/ultrahdr/tests/data/minnie-320x240.yu12
Binary files differ
diff --git a/libs/ultrahdr/tests/data/raw_p010_image.p010 b/libs/ultrahdr/tests/data/raw_p010_image.p010
new file mode 100644
index 0000000..01673bf
--- /dev/null
+++ b/libs/ultrahdr/tests/data/raw_p010_image.p010
Binary files differ
diff --git a/libs/ultrahdr/tests/data/raw_p010_image_with_stride.p010 b/libs/ultrahdr/tests/data/raw_p010_image_with_stride.p010
new file mode 100644
index 0000000..e7a5dc8
--- /dev/null
+++ b/libs/ultrahdr/tests/data/raw_p010_image_with_stride.p010
Binary files differ
diff --git a/libs/ultrahdr/tests/data/raw_yuv420_image.yuv420 b/libs/ultrahdr/tests/data/raw_yuv420_image.yuv420
new file mode 100644
index 0000000..c043da6
--- /dev/null
+++ b/libs/ultrahdr/tests/data/raw_yuv420_image.yuv420
Binary files differ
diff --git a/libs/ultrahdr/tests/gainmapmath_test.cpp b/libs/ultrahdr/tests/gainmapmath_test.cpp
new file mode 100644
index 0000000..af90365
--- /dev/null
+++ b/libs/ultrahdr/tests/gainmapmath_test.cpp
@@ -0,0 +1,1356 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cmath>
+#include <gtest/gtest.h>
+#include <gmock/gmock.h>
+#include <ultrahdr/gainmapmath.h>
+
+namespace android::ultrahdr {
+
+class GainMapMathTest : public testing::Test {
+public:
+  GainMapMathTest();
+  ~GainMapMathTest();
+
+  float ComparisonEpsilon() { return 1e-4f; }
+  float LuminanceEpsilon() { return 1e-2f; }
+  float YuvConversionEpsilon() { return 1.0f / (255.0f * 2.0f); }
+
+  Color Yuv420(uint8_t y, uint8_t u, uint8_t v) {
+      return {{{ static_cast<float>(y) / 255.0f,
+                 (static_cast<float>(u) - 128.0f) / 255.0f,
+                 (static_cast<float>(v) - 128.0f) / 255.0f }}};
+  }
+
+  Color P010(uint16_t y, uint16_t u, uint16_t v) {
+      return {{{ (static_cast<float>(y) - 64.0f) / 876.0f,
+                 (static_cast<float>(u) - 64.0f) / 896.0f - 0.5f,
+                 (static_cast<float>(v) - 64.0f) / 896.0f - 0.5f }}};
+  }
+
+  float Map(uint8_t e) {
+    return static_cast<float>(e) / 255.0f;
+  }
+
+  Color ColorMin(Color e1, Color e2) {
+    return {{{ fmin(e1.r, e2.r), fmin(e1.g, e2.g), fmin(e1.b, e2.b) }}};
+  }
+
+  Color ColorMax(Color e1, Color e2) {
+    return {{{ fmax(e1.r, e2.r), fmax(e1.g, e2.g), fmax(e1.b, e2.b) }}};
+  }
+
+  Color RgbBlack() { return {{{ 0.0f, 0.0f, 0.0f }}}; }
+  Color RgbWhite() { return {{{ 1.0f, 1.0f, 1.0f }}}; }
+
+  Color RgbRed() { return {{{ 1.0f, 0.0f, 0.0f }}}; }
+  Color RgbGreen() { return {{{ 0.0f, 1.0f, 0.0f }}}; }
+  Color RgbBlue() { return {{{ 0.0f, 0.0f, 1.0f }}}; }
+
+  Color YuvBlack() { return {{{ 0.0f, 0.0f, 0.0f }}}; }
+  Color YuvWhite() { return {{{ 1.0f, 0.0f, 0.0f }}}; }
+
+  Color SrgbYuvRed() { return {{{ 0.2126f, -0.11457f, 0.5f }}}; }
+  Color SrgbYuvGreen() { return {{{ 0.7152f, -0.38543f, -0.45415f }}}; }
+  Color SrgbYuvBlue() { return {{{ 0.0722f, 0.5f, -0.04585f }}}; }
+
+  Color P3YuvRed() { return {{{ 0.299f, -0.16874f, 0.5f }}}; }
+  Color P3YuvGreen() { return {{{ 0.587f, -0.33126f, -0.41869f }}}; }
+  Color P3YuvBlue() { return {{{ 0.114f, 0.5f, -0.08131f }}}; }
+
+  Color Bt2100YuvRed() { return {{{ 0.2627f, -0.13963f, 0.5f }}}; }
+  Color Bt2100YuvGreen() { return {{{ 0.6780f, -0.36037f, -0.45979f }}}; }
+  Color Bt2100YuvBlue() { return {{{ 0.0593f, 0.5f, -0.04021f }}}; }
+
+  float SrgbYuvToLuminance(Color yuv_gamma, ColorCalculationFn luminanceFn) {
+    Color rgb_gamma = srgbYuvToRgb(yuv_gamma);
+    Color rgb = srgbInvOetf(rgb_gamma);
+    float luminance_scaled = luminanceFn(rgb);
+    return luminance_scaled * kSdrWhiteNits;
+  }
+
+  float P3YuvToLuminance(Color yuv_gamma, ColorCalculationFn luminanceFn) {
+    Color rgb_gamma = p3YuvToRgb(yuv_gamma);
+    Color rgb = srgbInvOetf(rgb_gamma);
+    float luminance_scaled = luminanceFn(rgb);
+    return luminance_scaled * kSdrWhiteNits;
+  }
+
+  float Bt2100YuvToLuminance(Color yuv_gamma, ColorTransformFn hdrInvOetf,
+                             ColorTransformFn gamutConversionFn, ColorCalculationFn luminanceFn,
+                             float scale_factor) {
+    Color rgb_gamma = bt2100YuvToRgb(yuv_gamma);
+    Color rgb = hdrInvOetf(rgb_gamma);
+    rgb = gamutConversionFn(rgb);
+    float luminance_scaled = luminanceFn(rgb);
+    return luminance_scaled * scale_factor;
+  }
+
+  Color Recover(Color yuv_gamma, float gain, ultrahdr_metadata_ptr metadata) {
+    Color rgb_gamma = srgbYuvToRgb(yuv_gamma);
+    Color rgb = srgbInvOetf(rgb_gamma);
+    return applyGain(rgb, gain, metadata);
+  }
+
+  jpegr_uncompressed_struct Yuv420Image() {
+    static uint8_t pixels[] = {
+      // Y
+      0x00, 0x10, 0x20, 0x30,
+      0x01, 0x11, 0x21, 0x31,
+      0x02, 0x12, 0x22, 0x32,
+      0x03, 0x13, 0x23, 0x33,
+      // U
+      0xA0, 0xA1,
+      0xA2, 0xA3,
+      // V
+      0xB0, 0xB1,
+      0xB2, 0xB3,
+    };
+    return { pixels, 4, 4, ULTRAHDR_COLORGAMUT_BT709 };
+  }
+
+  Color (*Yuv420Colors())[4] {
+    static Color colors[4][4] = {
+      {
+        Yuv420(0x00, 0xA0, 0xB0), Yuv420(0x10, 0xA0, 0xB0),
+        Yuv420(0x20, 0xA1, 0xB1), Yuv420(0x30, 0xA1, 0xB1),
+      }, {
+        Yuv420(0x01, 0xA0, 0xB0), Yuv420(0x11, 0xA0, 0xB0),
+        Yuv420(0x21, 0xA1, 0xB1), Yuv420(0x31, 0xA1, 0xB1),
+      }, {
+        Yuv420(0x02, 0xA2, 0xB2), Yuv420(0x12, 0xA2, 0xB2),
+        Yuv420(0x22, 0xA3, 0xB3), Yuv420(0x32, 0xA3, 0xB3),
+      }, {
+        Yuv420(0x03, 0xA2, 0xB2), Yuv420(0x13, 0xA2, 0xB2),
+        Yuv420(0x23, 0xA3, 0xB3), Yuv420(0x33, 0xA3, 0xB3),
+      },
+    };
+    return colors;
+  }
+
+  jpegr_uncompressed_struct P010Image() {
+    static uint16_t pixels[] = {
+      // Y
+      0x00 << 6, 0x10 << 6, 0x20 << 6, 0x30 << 6,
+      0x01 << 6, 0x11 << 6, 0x21 << 6, 0x31 << 6,
+      0x02 << 6, 0x12 << 6, 0x22 << 6, 0x32 << 6,
+      0x03 << 6, 0x13 << 6, 0x23 << 6, 0x33 << 6,
+      // UV
+      0xA0 << 6, 0xB0 << 6, 0xA1 << 6, 0xB1 << 6,
+      0xA2 << 6, 0xB2 << 6, 0xA3 << 6, 0xB3 << 6,
+    };
+    return { pixels, 4, 4, ULTRAHDR_COLORGAMUT_BT709 };
+  }
+
+  Color (*P010Colors())[4] {
+    static Color colors[4][4] = {
+      {
+        P010(0x00, 0xA0, 0xB0), P010(0x10, 0xA0, 0xB0),
+        P010(0x20, 0xA1, 0xB1), P010(0x30, 0xA1, 0xB1),
+      }, {
+        P010(0x01, 0xA0, 0xB0), P010(0x11, 0xA0, 0xB0),
+        P010(0x21, 0xA1, 0xB1), P010(0x31, 0xA1, 0xB1),
+      }, {
+        P010(0x02, 0xA2, 0xB2), P010(0x12, 0xA2, 0xB2),
+        P010(0x22, 0xA3, 0xB3), P010(0x32, 0xA3, 0xB3),
+      }, {
+        P010(0x03, 0xA2, 0xB2), P010(0x13, 0xA2, 0xB2),
+        P010(0x23, 0xA3, 0xB3), P010(0x33, 0xA3, 0xB3),
+      },
+    };
+    return colors;
+  }
+
+  jpegr_uncompressed_struct MapImage() {
+    static uint8_t pixels[] = {
+      0x00, 0x10, 0x20, 0x30,
+      0x01, 0x11, 0x21, 0x31,
+      0x02, 0x12, 0x22, 0x32,
+      0x03, 0x13, 0x23, 0x33,
+    };
+    return { pixels, 4, 4, ULTRAHDR_COLORGAMUT_UNSPECIFIED };
+  }
+
+  float (*MapValues())[4] {
+    static float values[4][4] = {
+      {
+        Map(0x00), Map(0x10), Map(0x20), Map(0x30),
+      }, {
+        Map(0x01), Map(0x11), Map(0x21), Map(0x31),
+      }, {
+        Map(0x02), Map(0x12), Map(0x22), Map(0x32),
+      }, {
+        Map(0x03), Map(0x13), Map(0x23), Map(0x33),
+      },
+    };
+    return values;
+  }
+
+protected:
+  virtual void SetUp();
+  virtual void TearDown();
+};
+
+GainMapMathTest::GainMapMathTest() {}
+GainMapMathTest::~GainMapMathTest() {}
+
+void GainMapMathTest::SetUp() {}
+void GainMapMathTest::TearDown() {}
+
+#define EXPECT_RGB_EQ(e1, e2)       \
+  EXPECT_FLOAT_EQ((e1).r, (e2).r);  \
+  EXPECT_FLOAT_EQ((e1).g, (e2).g);  \
+  EXPECT_FLOAT_EQ((e1).b, (e2).b)
+
+#define EXPECT_RGB_NEAR(e1, e2)                     \
+  EXPECT_NEAR((e1).r, (e2).r, ComparisonEpsilon()); \
+  EXPECT_NEAR((e1).g, (e2).g, ComparisonEpsilon()); \
+  EXPECT_NEAR((e1).b, (e2).b, ComparisonEpsilon())
+
+#define EXPECT_RGB_CLOSE(e1, e2)                            \
+  EXPECT_NEAR((e1).r, (e2).r, ComparisonEpsilon() * 10.0f); \
+  EXPECT_NEAR((e1).g, (e2).g, ComparisonEpsilon() * 10.0f); \
+  EXPECT_NEAR((e1).b, (e2).b, ComparisonEpsilon() * 10.0f)
+
+#define EXPECT_YUV_EQ(e1, e2)       \
+  EXPECT_FLOAT_EQ((e1).y, (e2).y);  \
+  EXPECT_FLOAT_EQ((e1).u, (e2).u);  \
+  EXPECT_FLOAT_EQ((e1).v, (e2).v)
+
+#define EXPECT_YUV_NEAR(e1, e2)                     \
+  EXPECT_NEAR((e1).y, (e2).y, ComparisonEpsilon()); \
+  EXPECT_NEAR((e1).u, (e2).u, ComparisonEpsilon()); \
+  EXPECT_NEAR((e1).v, (e2).v, ComparisonEpsilon())
+
+#define EXPECT_YUV_BETWEEN(e, min, max)                                           \
+  EXPECT_THAT((e).y, testing::AllOf(testing::Ge((min).y), testing::Le((max).y))); \
+  EXPECT_THAT((e).u, testing::AllOf(testing::Ge((min).u), testing::Le((max).u))); \
+  EXPECT_THAT((e).v, testing::AllOf(testing::Ge((min).v), testing::Le((max).v)))
+
+// TODO: a bunch of these tests can be parameterized.
+
+TEST_F(GainMapMathTest, ColorConstruct) {
+  Color e1 = {{{ 0.1f, 0.2f, 0.3f }}};
+
+  EXPECT_FLOAT_EQ(e1.r, 0.1f);
+  EXPECT_FLOAT_EQ(e1.g, 0.2f);
+  EXPECT_FLOAT_EQ(e1.b, 0.3f);
+
+  EXPECT_FLOAT_EQ(e1.y, 0.1f);
+  EXPECT_FLOAT_EQ(e1.u, 0.2f);
+  EXPECT_FLOAT_EQ(e1.v, 0.3f);
+}
+
+TEST_F(GainMapMathTest, ColorAddColor) {
+  Color e1 = {{{ 0.1f, 0.2f, 0.3f }}};
+
+  Color e2 = e1 + e1;
+  EXPECT_FLOAT_EQ(e2.r, e1.r * 2.0f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g * 2.0f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b * 2.0f);
+
+  e2 += e1;
+  EXPECT_FLOAT_EQ(e2.r, e1.r * 3.0f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g * 3.0f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b * 3.0f);
+}
+
+TEST_F(GainMapMathTest, ColorAddFloat) {
+  Color e1 = {{{ 0.1f, 0.2f, 0.3f }}};
+
+  Color e2 = e1 + 0.1f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r + 0.1f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g + 0.1f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b + 0.1f);
+
+  e2 += 0.1f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r + 0.2f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g + 0.2f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b + 0.2f);
+}
+
+TEST_F(GainMapMathTest, ColorSubtractColor) {
+  Color e1 = {{{ 0.1f, 0.2f, 0.3f }}};
+
+  Color e2 = e1 - e1;
+  EXPECT_FLOAT_EQ(e2.r, 0.0f);
+  EXPECT_FLOAT_EQ(e2.g, 0.0f);
+  EXPECT_FLOAT_EQ(e2.b, 0.0f);
+
+  e2 -= e1;
+  EXPECT_FLOAT_EQ(e2.r, -e1.r);
+  EXPECT_FLOAT_EQ(e2.g, -e1.g);
+  EXPECT_FLOAT_EQ(e2.b, -e1.b);
+}
+
+TEST_F(GainMapMathTest, ColorSubtractFloat) {
+  Color e1 = {{{ 0.1f, 0.2f, 0.3f }}};
+
+  Color e2 = e1 - 0.1f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r - 0.1f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g - 0.1f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b - 0.1f);
+
+  e2 -= 0.1f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r - 0.2f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g - 0.2f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b - 0.2f);
+}
+
+TEST_F(GainMapMathTest, ColorMultiplyFloat) {
+  Color e1 = {{{ 0.1f, 0.2f, 0.3f }}};
+
+  Color e2 = e1 * 2.0f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r * 2.0f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g * 2.0f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b * 2.0f);
+
+  e2 *= 2.0f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r * 4.0f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g * 4.0f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b * 4.0f);
+}
+
+TEST_F(GainMapMathTest, ColorDivideFloat) {
+  Color e1 = {{{ 0.1f, 0.2f, 0.3f }}};
+
+  Color e2 = e1 / 2.0f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r / 2.0f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g / 2.0f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b / 2.0f);
+
+  e2 /= 2.0f;
+  EXPECT_FLOAT_EQ(e2.r, e1.r / 4.0f);
+  EXPECT_FLOAT_EQ(e2.g, e1.g / 4.0f);
+  EXPECT_FLOAT_EQ(e2.b, e1.b / 4.0f);
+}
+
+TEST_F(GainMapMathTest, SrgbLuminance) {
+  EXPECT_FLOAT_EQ(srgbLuminance(RgbBlack()), 0.0f);
+  EXPECT_FLOAT_EQ(srgbLuminance(RgbWhite()), 1.0f);
+  EXPECT_FLOAT_EQ(srgbLuminance(RgbRed()), 0.2126f);
+  EXPECT_FLOAT_EQ(srgbLuminance(RgbGreen()), 0.7152f);
+  EXPECT_FLOAT_EQ(srgbLuminance(RgbBlue()), 0.0722f);
+}
+
+TEST_F(GainMapMathTest, SrgbYuvToRgb) {
+  Color rgb_black = srgbYuvToRgb(YuvBlack());
+  EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+  Color rgb_white = srgbYuvToRgb(YuvWhite());
+  EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+  Color rgb_r = srgbYuvToRgb(SrgbYuvRed());
+  EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+  Color rgb_g = srgbYuvToRgb(SrgbYuvGreen());
+  EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+  Color rgb_b = srgbYuvToRgb(SrgbYuvBlue());
+  EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+
+TEST_F(GainMapMathTest, SrgbRgbToYuv) {
+  Color yuv_black = srgbRgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv_black, YuvBlack());
+
+  Color yuv_white = srgbRgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv_white, YuvWhite());
+
+  Color yuv_r = srgbRgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv_r, SrgbYuvRed());
+
+  Color yuv_g = srgbRgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv_g, SrgbYuvGreen());
+
+  Color yuv_b = srgbRgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv_b, SrgbYuvBlue());
+}
+
+TEST_F(GainMapMathTest, SrgbRgbYuvRoundtrip) {
+  Color rgb_black = srgbYuvToRgb(srgbRgbToYuv(RgbBlack()));
+  EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+  Color rgb_white = srgbYuvToRgb(srgbRgbToYuv(RgbWhite()));
+  EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+  Color rgb_r = srgbYuvToRgb(srgbRgbToYuv(RgbRed()));
+  EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+  Color rgb_g = srgbYuvToRgb(srgbRgbToYuv(RgbGreen()));
+  EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+  Color rgb_b = srgbYuvToRgb(srgbRgbToYuv(RgbBlue()));
+  EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+
+TEST_F(GainMapMathTest, SrgbTransferFunction) {
+  EXPECT_FLOAT_EQ(srgbInvOetf(0.0f), 0.0f);
+  EXPECT_NEAR(srgbInvOetf(0.02f), 0.00154f, ComparisonEpsilon());
+  EXPECT_NEAR(srgbInvOetf(0.04045f), 0.00313f, ComparisonEpsilon());
+  EXPECT_NEAR(srgbInvOetf(0.5f), 0.21404f, ComparisonEpsilon());
+  EXPECT_FLOAT_EQ(srgbInvOetf(1.0f), 1.0f);
+}
+
+TEST_F(GainMapMathTest, P3Luminance) {
+  EXPECT_FLOAT_EQ(p3Luminance(RgbBlack()), 0.0f);
+  EXPECT_FLOAT_EQ(p3Luminance(RgbWhite()), 1.0f);
+  EXPECT_FLOAT_EQ(p3Luminance(RgbRed()), 0.20949f);
+  EXPECT_FLOAT_EQ(p3Luminance(RgbGreen()), 0.72160f);
+  EXPECT_FLOAT_EQ(p3Luminance(RgbBlue()), 0.06891f);
+}
+
+TEST_F(GainMapMathTest, P3YuvToRgb) {
+  Color rgb_black = p3YuvToRgb(YuvBlack());
+  EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+  Color rgb_white = p3YuvToRgb(YuvWhite());
+  EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+  Color rgb_r = p3YuvToRgb(P3YuvRed());
+  EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+  Color rgb_g = p3YuvToRgb(P3YuvGreen());
+  EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+  Color rgb_b = p3YuvToRgb(P3YuvBlue());
+  EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+
+TEST_F(GainMapMathTest, P3RgbToYuv) {
+  Color yuv_black = p3RgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv_black, YuvBlack());
+
+  Color yuv_white = p3RgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv_white, YuvWhite());
+
+  Color yuv_r = p3RgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv_r, P3YuvRed());
+
+  Color yuv_g = p3RgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv_g, P3YuvGreen());
+
+  Color yuv_b = p3RgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv_b, P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, P3RgbYuvRoundtrip) {
+  Color rgb_black = p3YuvToRgb(p3RgbToYuv(RgbBlack()));
+  EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+  Color rgb_white = p3YuvToRgb(p3RgbToYuv(RgbWhite()));
+  EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+  Color rgb_r = p3YuvToRgb(p3RgbToYuv(RgbRed()));
+  EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+  Color rgb_g = p3YuvToRgb(p3RgbToYuv(RgbGreen()));
+  EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+  Color rgb_b = p3YuvToRgb(p3RgbToYuv(RgbBlue()));
+  EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+TEST_F(GainMapMathTest, Bt2100Luminance) {
+  EXPECT_FLOAT_EQ(bt2100Luminance(RgbBlack()), 0.0f);
+  EXPECT_FLOAT_EQ(bt2100Luminance(RgbWhite()), 1.0f);
+  EXPECT_FLOAT_EQ(bt2100Luminance(RgbRed()), 0.2627f);
+  EXPECT_FLOAT_EQ(bt2100Luminance(RgbGreen()), 0.6780f);
+  EXPECT_FLOAT_EQ(bt2100Luminance(RgbBlue()), 0.0593f);
+}
+
+TEST_F(GainMapMathTest, Bt2100YuvToRgb) {
+  Color rgb_black = bt2100YuvToRgb(YuvBlack());
+  EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+  Color rgb_white = bt2100YuvToRgb(YuvWhite());
+  EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+  Color rgb_r = bt2100YuvToRgb(Bt2100YuvRed());
+  EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+  Color rgb_g = bt2100YuvToRgb(Bt2100YuvGreen());
+  EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+  Color rgb_b = bt2100YuvToRgb(Bt2100YuvBlue());
+  EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100RgbToYuv) {
+  Color yuv_black = bt2100RgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv_black, YuvBlack());
+
+  Color yuv_white = bt2100RgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv_white, YuvWhite());
+
+  Color yuv_r = bt2100RgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv_r, Bt2100YuvRed());
+
+  Color yuv_g = bt2100RgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv_g, Bt2100YuvGreen());
+
+  Color yuv_b = bt2100RgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv_b, Bt2100YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100RgbYuvRoundtrip) {
+  Color rgb_black = bt2100YuvToRgb(bt2100RgbToYuv(RgbBlack()));
+  EXPECT_RGB_NEAR(rgb_black, RgbBlack());
+
+  Color rgb_white = bt2100YuvToRgb(bt2100RgbToYuv(RgbWhite()));
+  EXPECT_RGB_NEAR(rgb_white, RgbWhite());
+
+  Color rgb_r = bt2100YuvToRgb(bt2100RgbToYuv(RgbRed()));
+  EXPECT_RGB_NEAR(rgb_r, RgbRed());
+
+  Color rgb_g = bt2100YuvToRgb(bt2100RgbToYuv(RgbGreen()));
+  EXPECT_RGB_NEAR(rgb_g, RgbGreen());
+
+  Color rgb_b = bt2100YuvToRgb(bt2100RgbToYuv(RgbBlue()));
+  EXPECT_RGB_NEAR(rgb_b, RgbBlue());
+}
+
+TEST_F(GainMapMathTest, Bt709ToBt601YuvConversion) {
+  Color yuv_black = srgbRgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv709To601(yuv_black), YuvBlack());
+
+  Color yuv_white = srgbRgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv709To601(yuv_white), YuvWhite());
+
+  Color yuv_r = srgbRgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv709To601(yuv_r), P3YuvRed());
+
+  Color yuv_g = srgbRgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv709To601(yuv_g), P3YuvGreen());
+
+  Color yuv_b = srgbRgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv709To601(yuv_b), P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt709ToBt2100YuvConversion) {
+  Color yuv_black = srgbRgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv709To2100(yuv_black), YuvBlack());
+
+  Color yuv_white = srgbRgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv709To2100(yuv_white), YuvWhite());
+
+  Color yuv_r = srgbRgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv709To2100(yuv_r), Bt2100YuvRed());
+
+  Color yuv_g = srgbRgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv709To2100(yuv_g), Bt2100YuvGreen());
+
+  Color yuv_b = srgbRgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv709To2100(yuv_b), Bt2100YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt601ToBt709YuvConversion) {
+  Color yuv_black = p3RgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv601To709(yuv_black), YuvBlack());
+
+  Color yuv_white = p3RgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv601To709(yuv_white), YuvWhite());
+
+  Color yuv_r = p3RgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv601To709(yuv_r), SrgbYuvRed());
+
+  Color yuv_g = p3RgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv601To709(yuv_g), SrgbYuvGreen());
+
+  Color yuv_b = p3RgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv601To709(yuv_b), SrgbYuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt601ToBt2100YuvConversion) {
+  Color yuv_black = p3RgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv601To2100(yuv_black), YuvBlack());
+
+  Color yuv_white = p3RgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv601To2100(yuv_white), YuvWhite());
+
+  Color yuv_r = p3RgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv601To2100(yuv_r), Bt2100YuvRed());
+
+  Color yuv_g = p3RgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv601To2100(yuv_g), Bt2100YuvGreen());
+
+  Color yuv_b = p3RgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv601To2100(yuv_b), Bt2100YuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100ToBt709YuvConversion) {
+  Color yuv_black = bt2100RgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv2100To709(yuv_black), YuvBlack());
+
+  Color yuv_white = bt2100RgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv2100To709(yuv_white), YuvWhite());
+
+  Color yuv_r = bt2100RgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv2100To709(yuv_r), SrgbYuvRed());
+
+  Color yuv_g = bt2100RgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv2100To709(yuv_g), SrgbYuvGreen());
+
+  Color yuv_b = bt2100RgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv2100To709(yuv_b), SrgbYuvBlue());
+}
+
+TEST_F(GainMapMathTest, Bt2100ToBt601YuvConversion) {
+  Color yuv_black = bt2100RgbToYuv(RgbBlack());
+  EXPECT_YUV_NEAR(yuv2100To601(yuv_black), YuvBlack());
+
+  Color yuv_white = bt2100RgbToYuv(RgbWhite());
+  EXPECT_YUV_NEAR(yuv2100To601(yuv_white), YuvWhite());
+
+  Color yuv_r = bt2100RgbToYuv(RgbRed());
+  EXPECT_YUV_NEAR(yuv2100To601(yuv_r), P3YuvRed());
+
+  Color yuv_g = bt2100RgbToYuv(RgbGreen());
+  EXPECT_YUV_NEAR(yuv2100To601(yuv_g), P3YuvGreen());
+
+  Color yuv_b = bt2100RgbToYuv(RgbBlue());
+  EXPECT_YUV_NEAR(yuv2100To601(yuv_b), P3YuvBlue());
+}
+
+TEST_F(GainMapMathTest, TransformYuv420) {
+  ColorTransformFn transforms[] = { yuv709To601, yuv709To2100, yuv601To709, yuv601To2100,
+                                    yuv2100To709, yuv2100To601 };
+  for (const ColorTransformFn& transform : transforms) {
+    jpegr_uncompressed_struct input = Yuv420Image();
+
+    size_t out_buf_size = input.width * input.height * 3 / 2;
+    std::unique_ptr<uint8_t[]> out_buf = std::make_unique<uint8_t[]>(out_buf_size);
+    memcpy(out_buf.get(), input.data, out_buf_size);
+    jpegr_uncompressed_struct output = Yuv420Image();
+    output.data = out_buf.get();
+
+    transformYuv420(&output, 1, 1, transform);
+
+    for (size_t y = 0; y < 4; ++y) {
+      for (size_t x = 0; x < 4; ++x) {
+        // Skip the last chroma sample, which we modified above
+        if (x >= 2 && y >= 2) {
+          continue;
+        }
+
+        // All other pixels should remain unchanged
+        EXPECT_YUV_EQ(getYuv420Pixel(&input, x, y), getYuv420Pixel(&output, x, y));
+      }
+    }
+
+    // modified pixels should be updated as intended by the transformYuv420 algorithm
+    Color in1 = getYuv420Pixel(&input,   2, 2);
+    Color in2 = getYuv420Pixel(&input,   3, 2);
+    Color in3 = getYuv420Pixel(&input,   2, 3);
+    Color in4 = getYuv420Pixel(&input,   3, 3);
+    Color out1 = getYuv420Pixel(&output, 2, 2);
+    Color out2 = getYuv420Pixel(&output, 3, 2);
+    Color out3 = getYuv420Pixel(&output, 2, 3);
+    Color out4 = getYuv420Pixel(&output, 3, 3);
+
+    EXPECT_NEAR(transform(in1).y, out1.y, YuvConversionEpsilon());
+    EXPECT_NEAR(transform(in2).y, out2.y, YuvConversionEpsilon());
+    EXPECT_NEAR(transform(in3).y, out3.y, YuvConversionEpsilon());
+    EXPECT_NEAR(transform(in4).y, out4.y, YuvConversionEpsilon());
+
+    Color expect_uv = (transform(in1) + transform(in2) + transform(in3) + transform(in4)) / 4.0f;
+
+    EXPECT_NEAR(expect_uv.u, out1.u, YuvConversionEpsilon());
+    EXPECT_NEAR(expect_uv.u, out2.u, YuvConversionEpsilon());
+    EXPECT_NEAR(expect_uv.u, out3.u, YuvConversionEpsilon());
+    EXPECT_NEAR(expect_uv.u, out4.u, YuvConversionEpsilon());
+
+    EXPECT_NEAR(expect_uv.v, out1.v, YuvConversionEpsilon());
+    EXPECT_NEAR(expect_uv.v, out2.v, YuvConversionEpsilon());
+    EXPECT_NEAR(expect_uv.v, out3.v, YuvConversionEpsilon());
+    EXPECT_NEAR(expect_uv.v, out4.v, YuvConversionEpsilon());
+  }
+}
+
+TEST_F(GainMapMathTest, HlgOetf) {
+  EXPECT_FLOAT_EQ(hlgOetf(0.0f), 0.0f);
+  EXPECT_NEAR(hlgOetf(0.04167f), 0.35357f, ComparisonEpsilon());
+  EXPECT_NEAR(hlgOetf(0.08333f), 0.5f, ComparisonEpsilon());
+  EXPECT_NEAR(hlgOetf(0.5f), 0.87164f, ComparisonEpsilon());
+  EXPECT_FLOAT_EQ(hlgOetf(1.0f), 1.0f);
+
+  Color e = {{{ 0.04167f, 0.08333f, 0.5f }}};
+  Color e_gamma = {{{ 0.35357f, 0.5f, 0.87164f }}};
+  EXPECT_RGB_NEAR(hlgOetf(e), e_gamma);
+}
+
+TEST_F(GainMapMathTest, HlgInvOetf) {
+  EXPECT_FLOAT_EQ(hlgInvOetf(0.0f), 0.0f);
+  EXPECT_NEAR(hlgInvOetf(0.25f), 0.02083f, ComparisonEpsilon());
+  EXPECT_NEAR(hlgInvOetf(0.5f), 0.08333f, ComparisonEpsilon());
+  EXPECT_NEAR(hlgInvOetf(0.75f), 0.26496f, ComparisonEpsilon());
+  EXPECT_FLOAT_EQ(hlgInvOetf(1.0f), 1.0f);
+
+  Color e_gamma = {{{ 0.25f, 0.5f, 0.75f }}};
+  Color e = {{{ 0.02083f, 0.08333f, 0.26496f }}};
+  EXPECT_RGB_NEAR(hlgInvOetf(e_gamma), e);
+}
+
+TEST_F(GainMapMathTest, HlgTransferFunctionRoundtrip) {
+  EXPECT_FLOAT_EQ(hlgInvOetf(hlgOetf(0.0f)), 0.0f);
+  EXPECT_NEAR(hlgInvOetf(hlgOetf(0.04167f)), 0.04167f, ComparisonEpsilon());
+  EXPECT_NEAR(hlgInvOetf(hlgOetf(0.08333f)), 0.08333f, ComparisonEpsilon());
+  EXPECT_NEAR(hlgInvOetf(hlgOetf(0.5f)), 0.5f, ComparisonEpsilon());
+  EXPECT_FLOAT_EQ(hlgInvOetf(hlgOetf(1.0f)), 1.0f);
+}
+
+TEST_F(GainMapMathTest, PqOetf) {
+  EXPECT_FLOAT_EQ(pqOetf(0.0f), 0.0f);
+  EXPECT_NEAR(pqOetf(0.01f), 0.50808f, ComparisonEpsilon());
+  EXPECT_NEAR(pqOetf(0.5f), 0.92655f, ComparisonEpsilon());
+  EXPECT_NEAR(pqOetf(0.99f), 0.99895f, ComparisonEpsilon());
+  EXPECT_FLOAT_EQ(pqOetf(1.0f), 1.0f);
+
+  Color e = {{{ 0.01f, 0.5f, 0.99f }}};
+  Color e_gamma = {{{ 0.50808f, 0.92655f, 0.99895f }}};
+  EXPECT_RGB_NEAR(pqOetf(e), e_gamma);
+}
+
+TEST_F(GainMapMathTest, PqInvOetf) {
+  EXPECT_FLOAT_EQ(pqInvOetf(0.0f), 0.0f);
+  EXPECT_NEAR(pqInvOetf(0.01f), 2.31017e-7f, ComparisonEpsilon());
+  EXPECT_NEAR(pqInvOetf(0.5f), 0.00922f, ComparisonEpsilon());
+  EXPECT_NEAR(pqInvOetf(0.99f), 0.90903f, ComparisonEpsilon());
+  EXPECT_FLOAT_EQ(pqInvOetf(1.0f), 1.0f);
+
+  Color e_gamma = {{{ 0.01f, 0.5f, 0.99f }}};
+  Color e = {{{ 2.31017e-7f, 0.00922f, 0.90903f }}};
+  EXPECT_RGB_NEAR(pqInvOetf(e_gamma), e);
+}
+
+TEST_F(GainMapMathTest, PqInvOetfLUT) {
+    for (int idx = 0; idx < kPqInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqInvOETFNumEntries - 1);
+      EXPECT_FLOAT_EQ(pqInvOetf(value), pqInvOetfLUT(value));
+    }
+}
+
+TEST_F(GainMapMathTest, HlgInvOetfLUT) {
+    for (int idx = 0; idx < kHlgInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgInvOETFNumEntries - 1);
+      EXPECT_FLOAT_EQ(hlgInvOetf(value), hlgInvOetfLUT(value));
+    }
+}
+
+TEST_F(GainMapMathTest, pqOetfLUT) {
+    for (int idx = 0; idx < kPqOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqOETFNumEntries - 1);
+      EXPECT_FLOAT_EQ(pqOetf(value), pqOetfLUT(value));
+    }
+}
+
+TEST_F(GainMapMathTest, hlgOetfLUT) {
+    for (int idx = 0; idx < kHlgOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgOETFNumEntries - 1);
+      EXPECT_FLOAT_EQ(hlgOetf(value), hlgOetfLUT(value));
+    }
+}
+
+TEST_F(GainMapMathTest, srgbInvOetfLUT) {
+    for (int idx = 0; idx < kSrgbInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kSrgbInvOETFNumEntries - 1);
+      EXPECT_FLOAT_EQ(srgbInvOetf(value), srgbInvOetfLUT(value));
+    }
+}
+
+TEST_F(GainMapMathTest, applyGainLUT) {
+  for (int boost = 1; boost <= 10; boost++) {
+    ultrahdr_metadata_struct metadata = { .maxContentBoost = static_cast<float>(boost),
+                                       .minContentBoost = 1.0f / static_cast<float>(boost) };
+    GainLUT gainLUT(&metadata);
+    GainLUT gainLUTWithBoost(&metadata, metadata.maxContentBoost);
+    for (int idx = 0; idx < kGainFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kGainFactorNumEntries - 1);
+      EXPECT_RGB_NEAR(applyGain(RgbBlack(), value, &metadata),
+                      applyGainLUT(RgbBlack(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbWhite(), value, &metadata),
+                      applyGainLUT(RgbWhite(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbRed(), value, &metadata),
+                      applyGainLUT(RgbRed(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbGreen(), value, &metadata),
+                      applyGainLUT(RgbGreen(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbBlue(), value, &metadata),
+                      applyGainLUT(RgbBlue(), value, gainLUT));
+      EXPECT_RGB_EQ(applyGainLUT(RgbBlack(), value, gainLUT),
+                    applyGainLUT(RgbBlack(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbWhite(), value, gainLUT),
+                    applyGainLUT(RgbWhite(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbRed(), value, gainLUT),
+                    applyGainLUT(RgbRed(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbGreen(), value, gainLUT),
+                    applyGainLUT(RgbGreen(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbBlue(), value, gainLUT),
+                    applyGainLUT(RgbBlue(), value, gainLUTWithBoost));
+    }
+  }
+
+  for (int boost = 1; boost <= 10; boost++) {
+    ultrahdr_metadata_struct metadata = { .maxContentBoost = static_cast<float>(boost),
+                                       .minContentBoost = 1.0f };
+    GainLUT gainLUT(&metadata);
+    GainLUT gainLUTWithBoost(&metadata, metadata.maxContentBoost);
+    for (int idx = 0; idx < kGainFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kGainFactorNumEntries - 1);
+      EXPECT_RGB_NEAR(applyGain(RgbBlack(), value, &metadata),
+                      applyGainLUT(RgbBlack(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbWhite(), value, &metadata),
+                      applyGainLUT(RgbWhite(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbRed(), value, &metadata),
+                      applyGainLUT(RgbRed(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbGreen(), value, &metadata),
+                      applyGainLUT(RgbGreen(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbBlue(), value, &metadata),
+                      applyGainLUT(RgbBlue(), value, gainLUT));
+      EXPECT_RGB_EQ(applyGainLUT(RgbBlack(), value, gainLUT),
+                    applyGainLUT(RgbBlack(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbWhite(), value, gainLUT),
+                    applyGainLUT(RgbWhite(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbRed(), value, gainLUT),
+                    applyGainLUT(RgbRed(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbGreen(), value, gainLUT),
+                    applyGainLUT(RgbGreen(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbBlue(), value, gainLUT),
+                    applyGainLUT(RgbBlue(), value, gainLUTWithBoost));
+    }
+  }
+
+  for (int boost = 1; boost <= 10; boost++) {
+    ultrahdr_metadata_struct metadata = { .maxContentBoost = static_cast<float>(boost),
+                                       .minContentBoost = 1.0f / pow(static_cast<float>(boost),
+                                                              1.0f / 3.0f) };
+    GainLUT gainLUT(&metadata);
+    GainLUT gainLUTWithBoost(&metadata, metadata.maxContentBoost);
+    for (int idx = 0; idx < kGainFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kGainFactorNumEntries - 1);
+      EXPECT_RGB_NEAR(applyGain(RgbBlack(), value, &metadata),
+                      applyGainLUT(RgbBlack(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbWhite(), value, &metadata),
+                      applyGainLUT(RgbWhite(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbRed(), value, &metadata),
+                      applyGainLUT(RgbRed(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbGreen(), value, &metadata),
+                      applyGainLUT(RgbGreen(), value, gainLUT));
+      EXPECT_RGB_NEAR(applyGain(RgbBlue(), value, &metadata),
+                      applyGainLUT(RgbBlue(), value, gainLUT));
+      EXPECT_RGB_EQ(applyGainLUT(RgbBlack(), value, gainLUT),
+                    applyGainLUT(RgbBlack(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbWhite(), value, gainLUT),
+                    applyGainLUT(RgbWhite(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbRed(), value, gainLUT),
+                    applyGainLUT(RgbRed(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbGreen(), value, gainLUT),
+                    applyGainLUT(RgbGreen(), value, gainLUTWithBoost));
+      EXPECT_RGB_EQ(applyGainLUT(RgbBlue(), value, gainLUT),
+                    applyGainLUT(RgbBlue(), value, gainLUTWithBoost));
+    }
+  }
+}
+
+TEST_F(GainMapMathTest, PqTransferFunctionRoundtrip) {
+  EXPECT_FLOAT_EQ(pqInvOetf(pqOetf(0.0f)), 0.0f);
+  EXPECT_NEAR(pqInvOetf(pqOetf(0.01f)), 0.01f, ComparisonEpsilon());
+  EXPECT_NEAR(pqInvOetf(pqOetf(0.5f)), 0.5f, ComparisonEpsilon());
+  EXPECT_NEAR(pqInvOetf(pqOetf(0.99f)), 0.99f, ComparisonEpsilon());
+  EXPECT_FLOAT_EQ(pqInvOetf(pqOetf(1.0f)), 1.0f);
+}
+
+TEST_F(GainMapMathTest, ColorConversionLookup) {
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT709, ULTRAHDR_COLORGAMUT_UNSPECIFIED),
+            nullptr);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT709, ULTRAHDR_COLORGAMUT_BT709),
+            identityConversion);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT709, ULTRAHDR_COLORGAMUT_P3),
+            p3ToBt709);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT709, ULTRAHDR_COLORGAMUT_BT2100),
+            bt2100ToBt709);
+
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_P3, ULTRAHDR_COLORGAMUT_UNSPECIFIED),
+            nullptr);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_P3, ULTRAHDR_COLORGAMUT_BT709),
+            bt709ToP3);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_P3, ULTRAHDR_COLORGAMUT_P3),
+            identityConversion);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_P3, ULTRAHDR_COLORGAMUT_BT2100),
+            bt2100ToP3);
+
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT2100, ULTRAHDR_COLORGAMUT_UNSPECIFIED),
+            nullptr);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT2100, ULTRAHDR_COLORGAMUT_BT709),
+            bt709ToBt2100);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT2100, ULTRAHDR_COLORGAMUT_P3),
+            p3ToBt2100);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_BT2100, ULTRAHDR_COLORGAMUT_BT2100),
+            identityConversion);
+
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_UNSPECIFIED, ULTRAHDR_COLORGAMUT_UNSPECIFIED),
+            nullptr);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_UNSPECIFIED, ULTRAHDR_COLORGAMUT_BT709),
+            nullptr);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_UNSPECIFIED, ULTRAHDR_COLORGAMUT_P3),
+            nullptr);
+  EXPECT_EQ(getHdrConversionFn(ULTRAHDR_COLORGAMUT_UNSPECIFIED, ULTRAHDR_COLORGAMUT_BT2100),
+            nullptr);
+}
+
+TEST_F(GainMapMathTest, EncodeGain) {
+  ultrahdr_metadata_struct metadata = { .maxContentBoost = 4.0f,
+                                        .minContentBoost = 1.0f / 4.0f };
+
+  EXPECT_EQ(encodeGain(0.0f, 0.0f, &metadata), 127);
+  EXPECT_EQ(encodeGain(0.0f, 1.0f, &metadata), 127);
+  EXPECT_EQ(encodeGain(1.0f, 0.0f, &metadata), 0);
+  EXPECT_EQ(encodeGain(0.5f, 0.0f, &metadata), 0);
+
+  EXPECT_EQ(encodeGain(1.0f, 1.0f, &metadata), 127);
+  EXPECT_EQ(encodeGain(1.0f, 4.0f, &metadata), 255);
+  EXPECT_EQ(encodeGain(1.0f, 5.0f, &metadata), 255);
+  EXPECT_EQ(encodeGain(4.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeGain(4.0f, 0.5f, &metadata), 0);
+  EXPECT_EQ(encodeGain(1.0f, 2.0f, &metadata), 191);
+  EXPECT_EQ(encodeGain(2.0f, 1.0f, &metadata), 63);
+
+  metadata.maxContentBoost = 2.0f;
+  metadata.minContentBoost = 1.0f / 2.0f;
+
+  EXPECT_EQ(encodeGain(1.0f, 2.0f, &metadata), 255);
+  EXPECT_EQ(encodeGain(2.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeGain(1.0f, 1.41421f, &metadata), 191);
+  EXPECT_EQ(encodeGain(1.41421f, 1.0f, &metadata), 63);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f / 8.0f;
+
+  EXPECT_EQ(encodeGain(1.0f, 8.0f, &metadata), 255);
+  EXPECT_EQ(encodeGain(8.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeGain(1.0f, 2.82843f, &metadata), 191);
+  EXPECT_EQ(encodeGain(2.82843f, 1.0f, &metadata), 63);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f;
+
+  EXPECT_EQ(encodeGain(0.0f, 0.0f, &metadata), 0);
+  EXPECT_EQ(encodeGain(1.0f, 0.0f, &metadata), 0);
+
+  EXPECT_EQ(encodeGain(1.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeGain(1.0f, 8.0f, &metadata), 255);
+  EXPECT_EQ(encodeGain(1.0f, 4.0f, &metadata), 170);
+  EXPECT_EQ(encodeGain(1.0f, 2.0f, &metadata), 85);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 0.5f;
+
+  EXPECT_EQ(encodeGain(0.0f, 0.0f, &metadata), 63);
+  EXPECT_EQ(encodeGain(1.0f, 0.0f, &metadata), 0);
+
+  EXPECT_EQ(encodeGain(1.0f, 1.0f, &metadata), 63);
+  EXPECT_EQ(encodeGain(1.0f, 8.0f, &metadata), 255);
+  EXPECT_EQ(encodeGain(1.0f, 4.0f, &metadata), 191);
+  EXPECT_EQ(encodeGain(1.0f, 2.0f, &metadata), 127);
+  EXPECT_EQ(encodeGain(1.0f, 0.7071f, &metadata), 31);
+  EXPECT_EQ(encodeGain(1.0f, 0.5f, &metadata), 0);
+}
+
+TEST_F(GainMapMathTest, ApplyGain) {
+  ultrahdr_metadata_struct metadata = { .maxContentBoost = 4.0f,
+                                        .minContentBoost = 1.0f / 4.0f };
+  float displayBoost = metadata.maxContentBoost;
+
+  EXPECT_RGB_NEAR(applyGain(RgbBlack(), 0.0f, &metadata), RgbBlack());
+  EXPECT_RGB_NEAR(applyGain(RgbBlack(), 0.5f, &metadata), RgbBlack());
+  EXPECT_RGB_NEAR(applyGain(RgbBlack(), 1.0f, &metadata), RgbBlack());
+
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.0f, &metadata), RgbWhite() / 4.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.25f, &metadata), RgbWhite() / 2.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.5f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.75f, &metadata), RgbWhite() * 2.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 1.0f, &metadata), RgbWhite() * 4.0f);
+
+  metadata.maxContentBoost = 2.0f;
+  metadata.minContentBoost = 1.0f / 2.0f;
+
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.0f, &metadata), RgbWhite() / 2.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.25f, &metadata), RgbWhite() / 1.41421f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.5f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.75f, &metadata), RgbWhite() * 1.41421f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 1.0f, &metadata), RgbWhite() * 2.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f / 8.0f;
+
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.0f, &metadata), RgbWhite() / 8.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.25f, &metadata), RgbWhite() / 2.82843f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.5f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.75f, &metadata), RgbWhite() * 2.82843f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 1.0f, &metadata), RgbWhite() * 8.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f;
+
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.0f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 1.0f / 3.0f, &metadata), RgbWhite() * 2.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 2.0f / 3.0f, &metadata), RgbWhite() * 4.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 1.0f, &metadata), RgbWhite() * 8.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 0.5f;
+
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.0f, &metadata), RgbWhite() / 2.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.25f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.5f, &metadata), RgbWhite() * 2.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 0.75f, &metadata), RgbWhite() * 4.0f);
+  EXPECT_RGB_NEAR(applyGain(RgbWhite(), 1.0f, &metadata), RgbWhite() * 8.0f);
+
+  Color e = {{{ 0.0f, 0.5f, 1.0f }}};
+  metadata.maxContentBoost = 4.0f;
+  metadata.minContentBoost = 1.0f / 4.0f;
+
+  EXPECT_RGB_NEAR(applyGain(e, 0.0f, &metadata), e / 4.0f);
+  EXPECT_RGB_NEAR(applyGain(e, 0.25f, &metadata), e / 2.0f);
+  EXPECT_RGB_NEAR(applyGain(e, 0.5f, &metadata), e);
+  EXPECT_RGB_NEAR(applyGain(e, 0.75f, &metadata), e * 2.0f);
+  EXPECT_RGB_NEAR(applyGain(e, 1.0f, &metadata), e * 4.0f);
+
+  EXPECT_RGB_EQ(applyGain(RgbBlack(), 1.0f, &metadata),
+                applyGain(RgbBlack(), 1.0f, &metadata, displayBoost));
+  EXPECT_RGB_EQ(applyGain(RgbWhite(), 1.0f, &metadata),
+                applyGain(RgbWhite(), 1.0f, &metadata, displayBoost));
+  EXPECT_RGB_EQ(applyGain(RgbRed(), 1.0f, &metadata),
+                applyGain(RgbRed(), 1.0f, &metadata, displayBoost));
+  EXPECT_RGB_EQ(applyGain(RgbGreen(), 1.0f, &metadata),
+                applyGain(RgbGreen(), 1.0f, &metadata, displayBoost));
+  EXPECT_RGB_EQ(applyGain(RgbBlue(), 1.0f, &metadata),
+                applyGain(RgbBlue(), 1.0f, &metadata, displayBoost));
+  EXPECT_RGB_EQ(applyGain(e, 1.0f, &metadata),
+                applyGain(e, 1.0f, &metadata, displayBoost));
+}
+
+TEST_F(GainMapMathTest, GetYuv420Pixel) {
+  jpegr_uncompressed_struct image = Yuv420Image();
+  Color (*colors)[4] = Yuv420Colors();
+
+  for (size_t y = 0; y < 4; ++y) {
+    for (size_t x = 0; x < 4; ++x) {
+      EXPECT_YUV_NEAR(getYuv420Pixel(&image, x, y), colors[y][x]);
+    }
+  }
+}
+
+TEST_F(GainMapMathTest, GetP010Pixel) {
+  jpegr_uncompressed_struct image = P010Image();
+  Color (*colors)[4] = P010Colors();
+
+  for (size_t y = 0; y < 4; ++y) {
+    for (size_t x = 0; x < 4; ++x) {
+      EXPECT_YUV_NEAR(getP010Pixel(&image, x, y), colors[y][x]);
+    }
+  }
+}
+
+TEST_F(GainMapMathTest, SampleYuv420) {
+  jpegr_uncompressed_struct image = Yuv420Image();
+  Color (*colors)[4] = Yuv420Colors();
+
+  static const size_t kMapScaleFactor = 2;
+  for (size_t y = 0; y < 4 / kMapScaleFactor; ++y) {
+    for (size_t x = 0; x < 4 / kMapScaleFactor; ++x) {
+      Color min = {{{ 1.0f, 1.0f, 1.0f }}};
+      Color max = {{{ -1.0f, -1.0f, -1.0f }}};
+
+      for (size_t dy = 0; dy < kMapScaleFactor; ++dy) {
+        for (size_t dx = 0; dx < kMapScaleFactor; ++dx) {
+          Color e = colors[y * kMapScaleFactor + dy][x * kMapScaleFactor + dx];
+          min = ColorMin(min, e);
+          max = ColorMax(max, e);
+        }
+      }
+
+      // Instead of reimplementing the sampling algorithm, confirm that the
+      // sample output is within the range of the min and max of the nearest
+      // points.
+      EXPECT_YUV_BETWEEN(sampleYuv420(&image, kMapScaleFactor, x, y), min, max);
+    }
+  }
+}
+
+TEST_F(GainMapMathTest, SampleP010) {
+  jpegr_uncompressed_struct image = P010Image();
+  Color (*colors)[4] = P010Colors();
+
+  static const size_t kMapScaleFactor = 2;
+  for (size_t y = 0; y < 4 / kMapScaleFactor; ++y) {
+    for (size_t x = 0; x < 4 / kMapScaleFactor; ++x) {
+      Color min = {{{ 1.0f, 1.0f, 1.0f }}};
+      Color max = {{{ -1.0f, -1.0f, -1.0f }}};
+
+      for (size_t dy = 0; dy < kMapScaleFactor; ++dy) {
+        for (size_t dx = 0; dx < kMapScaleFactor; ++dx) {
+          Color e = colors[y * kMapScaleFactor + dy][x * kMapScaleFactor + dx];
+          min = ColorMin(min, e);
+          max = ColorMax(max, e);
+        }
+      }
+
+      // Instead of reimplementing the sampling algorithm, confirm that the
+      // sample output is within the range of the min and max of the nearest
+      // points.
+      EXPECT_YUV_BETWEEN(sampleP010(&image, kMapScaleFactor, x, y), min, max);
+    }
+  }
+}
+
+TEST_F(GainMapMathTest, SampleMap) {
+  jpegr_uncompressed_struct image = MapImage();
+  float (*values)[4] = MapValues();
+
+  static const size_t kMapScaleFactor = 2;
+  ShepardsIDW idwTable(kMapScaleFactor);
+  for (size_t y = 0; y < 4 * kMapScaleFactor; ++y) {
+    for (size_t x = 0; x < 4 * kMapScaleFactor; ++x) {
+      size_t x_base = x / kMapScaleFactor;
+      size_t y_base = y / kMapScaleFactor;
+
+      float min = 1.0f;
+      float max = -1.0f;
+
+      min = fmin(min, values[y_base][x_base]);
+      max = fmax(max, values[y_base][x_base]);
+      if (y_base + 1 < 4) {
+        min = fmin(min, values[y_base + 1][x_base]);
+        max = fmax(max, values[y_base + 1][x_base]);
+      }
+      if (x_base + 1 < 4) {
+        min = fmin(min, values[y_base][x_base + 1]);
+        max = fmax(max, values[y_base][x_base + 1]);
+      }
+      if (y_base + 1 < 4 && x_base + 1 < 4) {
+        min = fmin(min, values[y_base + 1][x_base + 1]);
+        max = fmax(max, values[y_base + 1][x_base + 1]);
+      }
+
+      // Instead of reimplementing the sampling algorithm, confirm that the
+      // sample output is within the range of the min and max of the nearest
+      // points.
+      EXPECT_THAT(sampleMap(&image, kMapScaleFactor, x, y),
+                  testing::AllOf(testing::Ge(min), testing::Le(max)));
+      EXPECT_EQ(sampleMap(&image, kMapScaleFactor, x, y, idwTable),
+                sampleMap(&image, kMapScaleFactor, x, y));
+    }
+  }
+}
+
+TEST_F(GainMapMathTest, ColorToRgba1010102) {
+  EXPECT_EQ(colorToRgba1010102(RgbBlack()), 0x3 << 30);
+  EXPECT_EQ(colorToRgba1010102(RgbWhite()), 0xFFFFFFFF);
+  EXPECT_EQ(colorToRgba1010102(RgbRed()), 0x3 << 30 | 0x3ff);
+  EXPECT_EQ(colorToRgba1010102(RgbGreen()), 0x3 << 30 | 0x3ff << 10);
+  EXPECT_EQ(colorToRgba1010102(RgbBlue()), 0x3 << 30 | 0x3ff << 20);
+
+  Color e_gamma = {{{ 0.1f, 0.2f, 0.3f }}};
+  EXPECT_EQ(colorToRgba1010102(e_gamma),
+            0x3 << 30
+          | static_cast<uint32_t>(0.1f * static_cast<float>(0x3ff))
+          | static_cast<uint32_t>(0.2f * static_cast<float>(0x3ff)) << 10
+          | static_cast<uint32_t>(0.3f * static_cast<float>(0x3ff)) << 20);
+}
+
+TEST_F(GainMapMathTest, ColorToRgbaF16) {
+  EXPECT_EQ(colorToRgbaF16(RgbBlack()), ((uint64_t) 0x3C00) << 48);
+  EXPECT_EQ(colorToRgbaF16(RgbWhite()), 0x3C003C003C003C00);
+  EXPECT_EQ(colorToRgbaF16(RgbRed()),   (((uint64_t) 0x3C00) << 48) | ((uint64_t) 0x3C00));
+  EXPECT_EQ(colorToRgbaF16(RgbGreen()), (((uint64_t) 0x3C00) << 48) | (((uint64_t) 0x3C00) << 16));
+  EXPECT_EQ(colorToRgbaF16(RgbBlue()),  (((uint64_t) 0x3C00) << 48) | (((uint64_t) 0x3C00) << 32));
+
+  Color e_gamma = {{{ 0.1f, 0.2f, 0.3f }}};
+  EXPECT_EQ(colorToRgbaF16(e_gamma), 0x3C0034CD32662E66);
+}
+
+TEST_F(GainMapMathTest, Float32ToFloat16) {
+  EXPECT_EQ(floatToHalf(0.1f), 0x2E66);
+  EXPECT_EQ(floatToHalf(0.0f), 0x0);
+  EXPECT_EQ(floatToHalf(1.0f), 0x3C00);
+  EXPECT_EQ(floatToHalf(-1.0f), 0xBC00);
+  EXPECT_EQ(floatToHalf(0x1.fffffep127f), 0x7FFF);  // float max
+  EXPECT_EQ(floatToHalf(-0x1.fffffep127f), 0xFFFF);  // float min
+  EXPECT_EQ(floatToHalf(0x1.0p-126f), 0x0);  // float zero
+}
+
+TEST_F(GainMapMathTest, GenerateMapLuminanceSrgb) {
+  EXPECT_FLOAT_EQ(SrgbYuvToLuminance(YuvBlack(), srgbLuminance),
+                  0.0f);
+  EXPECT_FLOAT_EQ(SrgbYuvToLuminance(YuvWhite(), srgbLuminance),
+                  kSdrWhiteNits);
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvRed(), srgbLuminance),
+              srgbLuminance(RgbRed()) * kSdrWhiteNits, LuminanceEpsilon());
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvGreen(), srgbLuminance),
+              srgbLuminance(RgbGreen()) * kSdrWhiteNits, LuminanceEpsilon());
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvBlue(), srgbLuminance),
+              srgbLuminance(RgbBlue()) * kSdrWhiteNits, LuminanceEpsilon());
+}
+
+TEST_F(GainMapMathTest, GenerateMapLuminanceSrgbP3) {
+  EXPECT_FLOAT_EQ(SrgbYuvToLuminance(YuvBlack(), p3Luminance),
+                  0.0f);
+  EXPECT_FLOAT_EQ(SrgbYuvToLuminance(YuvWhite(), p3Luminance),
+                  kSdrWhiteNits);
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvRed(), p3Luminance),
+              p3Luminance(RgbRed()) * kSdrWhiteNits, LuminanceEpsilon());
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvGreen(), p3Luminance),
+              p3Luminance(RgbGreen()) * kSdrWhiteNits, LuminanceEpsilon());
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvBlue(), p3Luminance),
+              p3Luminance(RgbBlue()) * kSdrWhiteNits, LuminanceEpsilon());
+}
+
+TEST_F(GainMapMathTest, GenerateMapLuminanceSrgbBt2100) {
+  EXPECT_FLOAT_EQ(SrgbYuvToLuminance(YuvBlack(), bt2100Luminance),
+                  0.0f);
+  EXPECT_FLOAT_EQ(SrgbYuvToLuminance(YuvWhite(), bt2100Luminance),
+                  kSdrWhiteNits);
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvRed(), bt2100Luminance),
+              bt2100Luminance(RgbRed()) * kSdrWhiteNits, LuminanceEpsilon());
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvGreen(), bt2100Luminance),
+              bt2100Luminance(RgbGreen()) * kSdrWhiteNits, LuminanceEpsilon());
+  EXPECT_NEAR(SrgbYuvToLuminance(SrgbYuvBlue(), bt2100Luminance),
+              bt2100Luminance(RgbBlue()) * kSdrWhiteNits, LuminanceEpsilon());
+}
+
+TEST_F(GainMapMathTest, GenerateMapLuminanceHlg) {
+  EXPECT_FLOAT_EQ(Bt2100YuvToLuminance(YuvBlack(), hlgInvOetf, identityConversion,
+                                       bt2100Luminance, kHlgMaxNits),
+                  0.0f);
+  EXPECT_FLOAT_EQ(Bt2100YuvToLuminance(YuvWhite(), hlgInvOetf, identityConversion,
+                                       bt2100Luminance, kHlgMaxNits),
+                  kHlgMaxNits);
+  EXPECT_NEAR(Bt2100YuvToLuminance(Bt2100YuvRed(), hlgInvOetf, identityConversion,
+                                   bt2100Luminance, kHlgMaxNits),
+              bt2100Luminance(RgbRed()) * kHlgMaxNits, LuminanceEpsilon());
+  EXPECT_NEAR(Bt2100YuvToLuminance(Bt2100YuvGreen(), hlgInvOetf, identityConversion,
+                                   bt2100Luminance, kHlgMaxNits),
+              bt2100Luminance(RgbGreen()) * kHlgMaxNits, LuminanceEpsilon());
+  EXPECT_NEAR(Bt2100YuvToLuminance(Bt2100YuvBlue(), hlgInvOetf, identityConversion,
+                                   bt2100Luminance, kHlgMaxNits),
+              bt2100Luminance(RgbBlue()) * kHlgMaxNits, LuminanceEpsilon());
+}
+
+TEST_F(GainMapMathTest, GenerateMapLuminancePq) {
+  EXPECT_FLOAT_EQ(Bt2100YuvToLuminance(YuvBlack(), pqInvOetf, identityConversion,
+                                       bt2100Luminance, kPqMaxNits),
+                  0.0f);
+  EXPECT_FLOAT_EQ(Bt2100YuvToLuminance(YuvWhite(), pqInvOetf, identityConversion,
+                                       bt2100Luminance, kPqMaxNits),
+                  kPqMaxNits);
+  EXPECT_NEAR(Bt2100YuvToLuminance(Bt2100YuvRed(), pqInvOetf, identityConversion,
+                                       bt2100Luminance, kPqMaxNits),
+              bt2100Luminance(RgbRed()) * kPqMaxNits, LuminanceEpsilon());
+  EXPECT_NEAR(Bt2100YuvToLuminance(Bt2100YuvGreen(), pqInvOetf, identityConversion,
+                                       bt2100Luminance, kPqMaxNits),
+              bt2100Luminance(RgbGreen()) * kPqMaxNits, LuminanceEpsilon());
+  EXPECT_NEAR(Bt2100YuvToLuminance(Bt2100YuvBlue(), pqInvOetf, identityConversion,
+                                       bt2100Luminance, kPqMaxNits),
+              bt2100Luminance(RgbBlue()) * kPqMaxNits, LuminanceEpsilon());
+}
+
+TEST_F(GainMapMathTest, ApplyMap) {
+  ultrahdr_metadata_struct metadata = { .maxContentBoost = 8.0f,
+                                     .minContentBoost = 1.0f / 8.0f };
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f, &metadata),
+                RgbWhite() * 8.0f);
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 1.0f, &metadata),
+                RgbBlack());
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 1.0f, &metadata),
+                  RgbRed() * 8.0f);
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 1.0f, &metadata),
+                  RgbGreen() * 8.0f);
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 1.0f, &metadata),
+                  RgbBlue() * 8.0f);
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.75f, &metadata),
+                RgbWhite() * sqrt(8.0f));
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.75f, &metadata),
+                RgbBlack());
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.75f, &metadata),
+                  RgbRed() * sqrt(8.0f));
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.75f, &metadata),
+                  RgbGreen() * sqrt(8.0f));
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.75f, &metadata),
+                  RgbBlue() * sqrt(8.0f));
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.5f, &metadata),
+                RgbWhite());
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.5f, &metadata),
+                RgbBlack());
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.5f, &metadata),
+                  RgbRed());
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.5f, &metadata),
+                  RgbGreen());
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.5f, &metadata),
+                  RgbBlue());
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.25f, &metadata),
+                RgbWhite() / sqrt(8.0f));
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.25f, &metadata),
+                RgbBlack());
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.25f, &metadata),
+                  RgbRed() / sqrt(8.0f));
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.25f, &metadata),
+                  RgbGreen() / sqrt(8.0f));
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.25f, &metadata),
+                  RgbBlue() / sqrt(8.0f));
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.0f, &metadata),
+                RgbWhite() / 8.0f);
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.0f, &metadata),
+                RgbBlack());
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.0f, &metadata),
+                  RgbRed() / 8.0f);
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.0f, &metadata),
+                  RgbGreen() / 8.0f);
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.0f, &metadata),
+                  RgbBlue() / 8.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f;
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f, &metadata),
+                RgbWhite() * 8.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 2.0f / 3.0f, &metadata),
+                RgbWhite() * 4.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f / 3.0f, &metadata),
+                RgbWhite() * 2.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.0f, &metadata),
+                RgbWhite());
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 0.5f;;
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f, &metadata),
+                RgbWhite() * 8.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.75, &metadata),
+                RgbWhite() * 4.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.5f, &metadata),
+                RgbWhite() * 2.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.25f, &metadata),
+                RgbWhite());
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.0f, &metadata),
+                RgbWhite() / 2.0f);
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/tests/icchelper_test.cpp b/libs/ultrahdr/tests/icchelper_test.cpp
new file mode 100644
index 0000000..ff61c08
--- /dev/null
+++ b/libs/ultrahdr/tests/icchelper_test.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <ultrahdr/icc.h>
+#include <ultrahdr/ultrahdr.h>
+#include <utils/Log.h>
+
+namespace android::ultrahdr {
+
+class IccHelperTest : public testing::Test {
+public:
+    IccHelperTest();
+    ~IccHelperTest();
+protected:
+    virtual void SetUp();
+    virtual void TearDown();
+};
+
+IccHelperTest::IccHelperTest() {}
+
+IccHelperTest::~IccHelperTest() {}
+
+void IccHelperTest::SetUp() {}
+
+void IccHelperTest::TearDown() {}
+
+TEST_F(IccHelperTest, iccWriteThenRead) {
+    sp<DataStruct> iccBt709 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+                                                         ULTRAHDR_COLORGAMUT_BT709);
+    ASSERT_NE(iccBt709->getLength(), 0);
+    ASSERT_NE(iccBt709->getData(), nullptr);
+    EXPECT_EQ(IccHelper::readIccColorGamut(iccBt709->getData(), iccBt709->getLength()),
+              ULTRAHDR_COLORGAMUT_BT709);
+
+    sp<DataStruct> iccP3 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, ULTRAHDR_COLORGAMUT_P3);
+    ASSERT_NE(iccP3->getLength(), 0);
+    ASSERT_NE(iccP3->getData(), nullptr);
+    EXPECT_EQ(IccHelper::readIccColorGamut(iccP3->getData(), iccP3->getLength()),
+              ULTRAHDR_COLORGAMUT_P3);
+
+    sp<DataStruct> iccBt2100 = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
+                                                          ULTRAHDR_COLORGAMUT_BT2100);
+    ASSERT_NE(iccBt2100->getLength(), 0);
+    ASSERT_NE(iccBt2100->getData(), nullptr);
+    EXPECT_EQ(IccHelper::readIccColorGamut(iccBt2100->getData(), iccBt2100->getLength()),
+              ULTRAHDR_COLORGAMUT_BT2100);
+}
+
+TEST_F(IccHelperTest, iccEndianness) {
+    sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, ULTRAHDR_COLORGAMUT_BT709);
+    size_t profile_size = icc->getLength() - kICCIdentifierSize;
+
+    uint8_t* icc_bytes = reinterpret_cast<uint8_t*>(icc->getData()) + kICCIdentifierSize;
+    uint32_t encoded_size = static_cast<uint32_t>(icc_bytes[0]) << 24 |
+                            static_cast<uint32_t>(icc_bytes[1]) << 16 |
+                            static_cast<uint32_t>(icc_bytes[2]) << 8 |
+                            static_cast<uint32_t>(icc_bytes[3]);
+
+    EXPECT_EQ(static_cast<size_t>(encoded_size), profile_size);
+}
+
+}  // namespace android::ultrahdr
+
diff --git a/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp b/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
new file mode 100644
index 0000000..e2da01c
--- /dev/null
+++ b/libs/ultrahdr/tests/jpegdecoderhelper_test.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ultrahdr/jpegdecoderhelper.h>
+#include <ultrahdr/icc.h>
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+#include <fcntl.h>
+
+namespace android::ultrahdr {
+
+// No ICC or EXIF
+#define YUV_IMAGE "/sdcard/Documents/minnie-320x240-yuv.jpg"
+#define YUV_IMAGE_SIZE 20193
+// Has ICC and EXIF
+#define YUV_ICC_IMAGE "/sdcard/Documents/minnie-320x240-yuv-icc.jpg"
+#define YUV_ICC_IMAGE_SIZE 34266
+// No ICC or EXIF
+#define GREY_IMAGE "/sdcard/Documents/minnie-320x240-y.jpg"
+#define GREY_IMAGE_SIZE 20193
+
+#define IMAGE_WIDTH 320
+#define IMAGE_HEIGHT 240
+
+class JpegDecoderHelperTest : public testing::Test {
+public:
+    struct Image {
+        std::unique_ptr<uint8_t[]> buffer;
+        size_t size;
+    };
+    JpegDecoderHelperTest();
+    ~JpegDecoderHelperTest();
+protected:
+    virtual void SetUp();
+    virtual void TearDown();
+
+    Image mYuvImage, mYuvIccImage, mGreyImage;
+};
+
+JpegDecoderHelperTest::JpegDecoderHelperTest() {}
+
+JpegDecoderHelperTest::~JpegDecoderHelperTest() {}
+
+static size_t getFileSize(int fd) {
+    struct stat st;
+    if (fstat(fd, &st) < 0) {
+        ALOGW("%s : fstat failed", __func__);
+        return 0;
+    }
+    return st.st_size; // bytes
+}
+
+static bool loadFile(const char filename[], JpegDecoderHelperTest::Image* result) {
+    int fd = open(filename, O_CLOEXEC);
+    if (fd < 0) {
+        return false;
+    }
+    int length = getFileSize(fd);
+    if (length == 0) {
+        close(fd);
+        return false;
+    }
+    result->buffer.reset(new uint8_t[length]);
+    if (read(fd, result->buffer.get(), length) != static_cast<ssize_t>(length)) {
+        close(fd);
+        return false;
+    }
+    close(fd);
+    return true;
+}
+
+void JpegDecoderHelperTest::SetUp() {
+    if (!loadFile(YUV_IMAGE, &mYuvImage)) {
+        FAIL() << "Load file " << YUV_IMAGE << " failed";
+    }
+    mYuvImage.size = YUV_IMAGE_SIZE;
+    if (!loadFile(YUV_ICC_IMAGE, &mYuvIccImage)) {
+        FAIL() << "Load file " << YUV_ICC_IMAGE << " failed";
+    }
+    mYuvIccImage.size = YUV_ICC_IMAGE_SIZE;
+    if (!loadFile(GREY_IMAGE, &mGreyImage)) {
+        FAIL() << "Load file " << GREY_IMAGE << " failed";
+    }
+    mGreyImage.size = GREY_IMAGE_SIZE;
+}
+
+void JpegDecoderHelperTest::TearDown() {}
+
+TEST_F(JpegDecoderHelperTest, decodeYuvImage) {
+    JpegDecoderHelper decoder;
+    EXPECT_TRUE(decoder.decompressImage(mYuvImage.buffer.get(), mYuvImage.size));
+    ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
+    EXPECT_EQ(IccHelper::readIccColorGamut(decoder.getICCPtr(), decoder.getICCSize()),
+              ULTRAHDR_COLORGAMUT_UNSPECIFIED);
+}
+
+TEST_F(JpegDecoderHelperTest, decodeYuvIccImage) {
+    JpegDecoderHelper decoder;
+    EXPECT_TRUE(decoder.decompressImage(mYuvIccImage.buffer.get(), mYuvIccImage.size));
+    ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
+    EXPECT_EQ(IccHelper::readIccColorGamut(decoder.getICCPtr(), decoder.getICCSize()),
+              ULTRAHDR_COLORGAMUT_BT709);
+}
+
+TEST_F(JpegDecoderHelperTest, decodeGreyImage) {
+    JpegDecoderHelper decoder;
+    EXPECT_TRUE(decoder.decompressImage(mGreyImage.buffer.get(), mGreyImage.size));
+    ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
+}
+
+TEST_F(JpegDecoderHelperTest, getCompressedImageParameters) {
+    size_t width = 0, height = 0;
+    std::vector<uint8_t> icc, exif;
+
+    JpegDecoderHelper decoder;
+    EXPECT_TRUE(decoder.getCompressedImageParameters(mYuvImage.buffer.get(), mYuvImage.size,
+                                                     &width, &height, &icc, &exif));
+
+    EXPECT_EQ(width, IMAGE_WIDTH);
+    EXPECT_EQ(height, IMAGE_HEIGHT);
+    EXPECT_EQ(icc.size(), 0);
+    EXPECT_EQ(exif.size(), 0);
+}
+
+TEST_F(JpegDecoderHelperTest, getCompressedImageParametersIcc) {
+    size_t width = 0, height = 0;
+    std::vector<uint8_t> icc, exif;
+
+    JpegDecoderHelper decoder;
+    EXPECT_TRUE(decoder.getCompressedImageParameters(mYuvIccImage.buffer.get(), mYuvIccImage.size,
+                                                     &width, &height, &icc, &exif));
+
+    EXPECT_EQ(width, IMAGE_WIDTH);
+    EXPECT_EQ(height, IMAGE_HEIGHT);
+    EXPECT_GT(icc.size(), 0);
+    EXPECT_GT(exif.size(), 0);
+
+    EXPECT_EQ(IccHelper::readIccColorGamut(icc.data(), icc.size()),
+              ULTRAHDR_COLORGAMUT_BT709);
+}
+
+}  // namespace android::ultrahdr
diff --git a/libs/ultrahdr/tests/jpegencoderhelper_test.cpp b/libs/ultrahdr/tests/jpegencoderhelper_test.cpp
new file mode 100644
index 0000000..f0e1fa4
--- /dev/null
+++ b/libs/ultrahdr/tests/jpegencoderhelper_test.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ultrahdr/jpegencoderhelper.h>
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+#include <fcntl.h>
+
+namespace android::ultrahdr {
+
+#define ALIGNED_IMAGE "/sdcard/Documents/minnie-320x240.yu12"
+#define ALIGNED_IMAGE_WIDTH 320
+#define ALIGNED_IMAGE_HEIGHT 240
+#define SINGLE_CHANNEL_IMAGE "/sdcard/Documents/minnie-320x240.y"
+#define SINGLE_CHANNEL_IMAGE_WIDTH ALIGNED_IMAGE_WIDTH
+#define SINGLE_CHANNEL_IMAGE_HEIGHT ALIGNED_IMAGE_HEIGHT
+#define UNALIGNED_IMAGE "/sdcard/Documents/minnie-318x240.yu12"
+#define UNALIGNED_IMAGE_WIDTH 318
+#define UNALIGNED_IMAGE_HEIGHT 240
+#define JPEG_QUALITY 90
+
+class JpegEncoderHelperTest : public testing::Test {
+public:
+    struct Image {
+        std::unique_ptr<uint8_t[]> buffer;
+        size_t width;
+        size_t height;
+    };
+    JpegEncoderHelperTest();
+    ~JpegEncoderHelperTest();
+protected:
+    virtual void SetUp();
+    virtual void TearDown();
+
+    Image mAlignedImage, mUnalignedImage, mSingleChannelImage;
+};
+
+JpegEncoderHelperTest::JpegEncoderHelperTest() {}
+
+JpegEncoderHelperTest::~JpegEncoderHelperTest() {}
+
+static size_t getFileSize(int fd) {
+    struct stat st;
+    if (fstat(fd, &st) < 0) {
+        ALOGW("%s : fstat failed", __func__);
+        return 0;
+    }
+    return st.st_size; // bytes
+}
+
+static bool loadFile(const char filename[], JpegEncoderHelperTest::Image* result) {
+    int fd = open(filename, O_CLOEXEC);
+    if (fd < 0) {
+        return false;
+    }
+    int length = getFileSize(fd);
+    if (length == 0) {
+        close(fd);
+        return false;
+    }
+    result->buffer.reset(new uint8_t[length]);
+    if (read(fd, result->buffer.get(), length) != static_cast<ssize_t>(length)) {
+        close(fd);
+        return false;
+    }
+    close(fd);
+    return true;
+}
+
+void JpegEncoderHelperTest::SetUp() {
+    if (!loadFile(ALIGNED_IMAGE, &mAlignedImage)) {
+        FAIL() << "Load file " << ALIGNED_IMAGE << " failed";
+    }
+    mAlignedImage.width = ALIGNED_IMAGE_WIDTH;
+    mAlignedImage.height = ALIGNED_IMAGE_HEIGHT;
+    if (!loadFile(UNALIGNED_IMAGE, &mUnalignedImage)) {
+        FAIL() << "Load file " << UNALIGNED_IMAGE << " failed";
+    }
+    mUnalignedImage.width = UNALIGNED_IMAGE_WIDTH;
+    mUnalignedImage.height = UNALIGNED_IMAGE_HEIGHT;
+    if (!loadFile(SINGLE_CHANNEL_IMAGE, &mSingleChannelImage)) {
+        FAIL() << "Load file " << SINGLE_CHANNEL_IMAGE << " failed";
+    }
+    mSingleChannelImage.width = SINGLE_CHANNEL_IMAGE_WIDTH;
+    mSingleChannelImage.height = SINGLE_CHANNEL_IMAGE_HEIGHT;
+}
+
+void JpegEncoderHelperTest::TearDown() {}
+
+TEST_F(JpegEncoderHelperTest, encodeAlignedImage) {
+    JpegEncoderHelper encoder;
+    EXPECT_TRUE(encoder.compressImage(mAlignedImage.buffer.get(), mAlignedImage.width,
+                                      mAlignedImage.height, JPEG_QUALITY, NULL, 0));
+    ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
+}
+
+TEST_F(JpegEncoderHelperTest, encodeUnalignedImage) {
+    JpegEncoderHelper encoder;
+    EXPECT_TRUE(encoder.compressImage(mUnalignedImage.buffer.get(), mUnalignedImage.width,
+                                      mUnalignedImage.height, JPEG_QUALITY, NULL, 0));
+    ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
+}
+
+TEST_F(JpegEncoderHelperTest, encodeSingleChannelImage) {
+    JpegEncoderHelper encoder;
+    EXPECT_TRUE(encoder.compressImage(mSingleChannelImage.buffer.get(), mSingleChannelImage.width,
+                                         mSingleChannelImage.height, JPEG_QUALITY, NULL, 0, true));
+    ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
+}
+
+}  // namespace android::ultrahdr
+
diff --git a/libs/ultrahdr/tests/jpegr_test.cpp b/libs/ultrahdr/tests/jpegr_test.cpp
new file mode 100644
index 0000000..41d55ec
--- /dev/null
+++ b/libs/ultrahdr/tests/jpegr_test.cpp
@@ -0,0 +1,1375 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ultrahdr/jpegr.h>
+#include <ultrahdr/jpegrutils.h>
+#include <ultrahdr/gainmapmath.h>
+#include <fcntl.h>
+#include <fstream>
+#include <gtest/gtest.h>
+#include <sys/time.h>
+#include <utils/Log.h>
+
+#define RAW_P010_IMAGE "/sdcard/Documents/raw_p010_image.p010"
+#define RAW_P010_IMAGE_WITH_STRIDE "/sdcard/Documents/raw_p010_image_with_stride.p010"
+#define RAW_YUV420_IMAGE "/sdcard/Documents/raw_yuv420_image.yuv420"
+#define JPEG_IMAGE "/sdcard/Documents/jpeg_image.jpg"
+#define TEST_IMAGE_WIDTH 1280
+#define TEST_IMAGE_HEIGHT 720
+#define TEST_IMAGE_STRIDE 1288
+#define DEFAULT_JPEG_QUALITY 90
+
+#define SAVE_ENCODING_RESULT true
+#define SAVE_DECODING_RESULT true
+#define SAVE_INPUT_RGBA true
+
+namespace android::ultrahdr {
+
+struct Timer {
+  struct timeval StartingTime;
+  struct timeval EndingTime;
+  struct timeval ElapsedMicroseconds;
+};
+
+void timerStart(Timer *t) {
+  gettimeofday(&t->StartingTime, nullptr);
+}
+
+void timerStop(Timer *t) {
+  gettimeofday(&t->EndingTime, nullptr);
+}
+
+int64_t elapsedTime(Timer *t) {
+  t->ElapsedMicroseconds.tv_sec = t->EndingTime.tv_sec - t->StartingTime.tv_sec;
+  t->ElapsedMicroseconds.tv_usec = t->EndingTime.tv_usec - t->StartingTime.tv_usec;
+  return t->ElapsedMicroseconds.tv_sec * 1000000 + t->ElapsedMicroseconds.tv_usec;
+}
+
+static size_t getFileSize(int fd) {
+  struct stat st;
+  if (fstat(fd, &st) < 0) {
+    ALOGW("%s : fstat failed", __func__);
+    return 0;
+  }
+  return st.st_size; // bytes
+}
+
+static bool loadFile(const char filename[], void*& result, int* fileLength) {
+  int fd = open(filename, O_CLOEXEC);
+  if (fd < 0) {
+    return false;
+  }
+  int length = getFileSize(fd);
+  if (length == 0) {
+    close(fd);
+    return false;
+  }
+  if (fileLength != nullptr) {
+    *fileLength = length;
+  }
+  result = malloc(length);
+  if (read(fd, result, length) != static_cast<ssize_t>(length)) {
+    close(fd);
+    return false;
+  }
+  close(fd);
+  return true;
+}
+
+static bool loadP010Image(const char *filename, jr_uncompressed_ptr img,
+                          bool isUVContiguous) {
+  int fd = open(filename, O_CLOEXEC);
+  if (fd < 0) {
+    return false;
+  }
+  const int bpp = 2;
+  int lumaStride = img->luma_stride == 0 ? img->width : img->luma_stride;
+  int lumaSize = bpp * lumaStride * img->height;
+  int chromaSize = bpp * (img->height / 2) *
+                   (isUVContiguous ? lumaStride : img->chroma_stride);
+  img->data = malloc(lumaSize + (isUVContiguous ? chromaSize : 0));
+  if (img->data == nullptr) {
+    ALOGE("loadP010Image(): failed to allocate memory for luma data.");
+    return false;
+  }
+  uint8_t *mem = static_cast<uint8_t *>(img->data);
+  for (int i = 0; i < img->height; i++) {
+    if (read(fd, mem, img->width * bpp) != img->width * bpp) {
+      close(fd);
+      return false;
+    }
+    mem += lumaStride * bpp;
+  }
+  int chromaStride = lumaStride;
+  if (!isUVContiguous) {
+    img->chroma_data = malloc(chromaSize);
+    if (img->chroma_data == nullptr) {
+      ALOGE("loadP010Image(): failed to allocate memory for chroma data.");
+      return false;
+    }
+    mem = static_cast<uint8_t *>(img->chroma_data);
+    chromaStride = img->chroma_stride;
+  }
+  for (int i = 0; i < img->height / 2; i++) {
+    if (read(fd, mem, img->width * bpp) != img->width * bpp) {
+      close(fd);
+      return false;
+    }
+    mem += chromaStride * bpp;
+  }
+  close(fd);
+  return true;
+}
+
+class JpegRTest : public testing::Test {
+public:
+  JpegRTest();
+  ~JpegRTest();
+
+protected:
+  virtual void SetUp();
+  virtual void TearDown();
+
+  struct jpegr_uncompressed_struct mRawP010Image{};
+  struct jpegr_uncompressed_struct mRawP010ImageWithStride{};
+  struct jpegr_uncompressed_struct mRawP010ImageWithChromaData{};
+  struct jpegr_uncompressed_struct mRawYuv420Image{};
+  struct jpegr_compressed_struct mJpegImage{};
+};
+
+JpegRTest::JpegRTest() {}
+JpegRTest::~JpegRTest() {}
+
+void JpegRTest::SetUp() {}
+void JpegRTest::TearDown() {
+  free(mRawP010Image.data);
+  free(mRawP010Image.chroma_data);
+  free(mRawP010ImageWithStride.data);
+  free(mRawP010ImageWithStride.chroma_data);
+  free(mRawP010ImageWithChromaData.data);
+  free(mRawP010ImageWithChromaData.chroma_data);
+  free(mRawYuv420Image.data);
+  free(mJpegImage.data);
+}
+
+class JpegRBenchmark : public JpegR {
+public:
+ void BenchmarkGenerateGainMap(jr_uncompressed_ptr yuv420Image, jr_uncompressed_ptr p010Image,
+                               ultrahdr_metadata_ptr metadata, jr_uncompressed_ptr map);
+ void BenchmarkApplyGainMap(jr_uncompressed_ptr yuv420Image, jr_uncompressed_ptr map,
+                            ultrahdr_metadata_ptr metadata, jr_uncompressed_ptr dest);
+private:
+ const int kProfileCount = 10;
+};
+
+void JpegRBenchmark::BenchmarkGenerateGainMap(jr_uncompressed_ptr yuv420Image,
+                                              jr_uncompressed_ptr p010Image,
+                                              ultrahdr_metadata_ptr metadata,
+                                              jr_uncompressed_ptr map) {
+  ASSERT_EQ(yuv420Image->width, p010Image->width);
+  ASSERT_EQ(yuv420Image->height, p010Image->height);
+
+  Timer genRecMapTime;
+
+  timerStart(&genRecMapTime);
+  for (auto i = 0; i < kProfileCount; i++) {
+      ASSERT_EQ(OK, generateGainMap(
+          yuv420Image, p010Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, metadata, map));
+      if (i != kProfileCount - 1) delete[] static_cast<uint8_t *>(map->data);
+  }
+  timerStop(&genRecMapTime);
+
+  ALOGE("Generate Gain Map:- Res = %i x %i, time = %f ms",
+        yuv420Image->width, yuv420Image->height,
+        elapsedTime(&genRecMapTime) / (kProfileCount * 1000.f));
+
+}
+
+void JpegRBenchmark::BenchmarkApplyGainMap(jr_uncompressed_ptr yuv420Image,
+                                           jr_uncompressed_ptr map,
+                                           ultrahdr_metadata_ptr metadata,
+                                           jr_uncompressed_ptr dest) {
+  Timer applyRecMapTime;
+
+  timerStart(&applyRecMapTime);
+  for (auto i = 0; i < kProfileCount; i++) {
+      ASSERT_EQ(OK, applyGainMap(yuv420Image, map, metadata, ULTRAHDR_OUTPUT_HDR_HLG,
+                                 metadata->maxContentBoost /* displayBoost */, dest));
+  }
+  timerStop(&applyRecMapTime);
+
+  ALOGE("Apply Gain Map:- Res = %i x %i, time = %f ms",
+        yuv420Image->width, yuv420Image->height,
+        elapsedTime(&applyRecMapTime) / (kProfileCount * 1000.f));
+}
+
+TEST_F(JpegRTest, build) {
+  // Force all of the gain map lib to be linked by calling all public functions.
+  JpegR jpegRCodec;
+  jpegRCodec.encodeJPEGR(nullptr, static_cast<ultrahdr_transfer_function>(0), nullptr, 0, nullptr);
+  jpegRCodec.encodeJPEGR(nullptr, nullptr, static_cast<ultrahdr_transfer_function>(0),
+                         nullptr, 0, nullptr);
+  jpegRCodec.encodeJPEGR(nullptr, nullptr, nullptr, static_cast<ultrahdr_transfer_function>(0),
+                         nullptr);
+  jpegRCodec.encodeJPEGR(nullptr, nullptr, static_cast<ultrahdr_transfer_function>(0), nullptr);
+  jpegRCodec.decodeJPEGR(nullptr, nullptr);
+}
+
+/* Test Encode API-0 invalid arguments */
+TEST_F(JpegRTest, encodeAPI0ForInvalidArgs) {
+  int ret;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = 16 * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+
+  JpegR jpegRCodec;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  mRawP010ImageWithStride.data = malloc(16);
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  // test quality factor
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      -1, nullptr)) << "fail, API allows bad jpeg quality factor";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      101, nullptr)) << "fail, API allows bad jpeg quality factor";
+
+  // test hdr transfer function
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_UNSPECIFIED, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride,
+      static_cast<ultrahdr_transfer_function>(ultrahdr_transfer_function::ULTRAHDR_TF_MAX + 1),
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride,
+      static_cast<ultrahdr_transfer_function>(-10),
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad hdr transfer function";
+
+  // test dest
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, nullptr,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows nullptr dest";
+
+  // test p010 input
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      nullptr, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows nullptr p010 image";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = static_cast<ultrahdr_color_gamut>(
+      ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_MAX + 1);
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH - 1;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT - 1;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = 0;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = 0;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad luma stride";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.chroma_data = mRawP010ImageWithStride.data;
+  mRawP010ImageWithStride.chroma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad chroma stride";
+
+  mRawP010ImageWithStride.chroma_data = nullptr;
+
+  free(jpegR.data);
+}
+
+/* Test Encode API-1 invalid arguments */
+TEST_F(JpegRTest, encodeAPI1ForInvalidArgs) {
+  int ret;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = 16 * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+
+  JpegR jpegRCodec;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  mRawP010ImageWithStride.data = malloc(16);
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  mRawYuv420Image.data = malloc(16);
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+
+  // test quality factor
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, -1, nullptr)) << "fail, API allows bad jpeg quality factor";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, 101, nullptr)) << "fail, API allows bad jpeg quality factor";
+
+  // test hdr transfer function
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image,
+      ultrahdr_transfer_function::ULTRAHDR_TF_UNSPECIFIED, &jpegR, DEFAULT_JPEG_QUALITY,
+      nullptr)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image,
+      static_cast<ultrahdr_transfer_function>(ultrahdr_transfer_function::ULTRAHDR_TF_MAX + 1),
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image,
+      static_cast<ultrahdr_transfer_function>(-10),
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad hdr transfer function";
+
+  // test dest
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      nullptr, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows nullptr dest";
+
+  // test p010 input
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      nullptr, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows nullptr p010 image";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = static_cast<ultrahdr_color_gamut>(
+      ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_MAX + 1);
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH - 1;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT - 1;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = 0;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = 0;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad luma stride";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.chroma_data = mRawP010ImageWithStride.data;
+  mRawP010ImageWithStride.chroma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad chroma stride";
+
+  // test 420 input
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.chroma_data = nullptr;
+  mRawP010ImageWithStride.chroma_stride = 0;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, nullptr, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows nullptr for 420 image";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad 420 image width";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH - 2;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad 420 image height";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.luma_stride = TEST_IMAGE_STRIDE;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad luma stride for 420";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.luma_stride = 0;
+  mRawYuv420Image.chroma_data = mRawYuv420Image.data;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows chroma pointer for 420";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.luma_stride = 0;
+  mRawYuv420Image.chroma_data = nullptr;
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad 420 color gamut";
+
+  mRawYuv420Image.colorGamut = static_cast<ultrahdr_color_gamut>(
+      ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_MAX + 1);
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR, DEFAULT_JPEG_QUALITY, nullptr)) << "fail, API allows bad 420 color gamut";
+
+  free(jpegR.data);
+}
+
+/* Test Encode API-2 invalid arguments */
+TEST_F(JpegRTest, encodeAPI2ForInvalidArgs) {
+  int ret;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = 16 * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+
+  JpegR jpegRCodec;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  mRawP010ImageWithStride.data = malloc(16);
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  mRawYuv420Image.data = malloc(16);
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+
+  // test hdr transfer function
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_UNSPECIFIED,
+      &jpegR)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      static_cast<ultrahdr_transfer_function>(ultrahdr_transfer_function::ULTRAHDR_TF_MAX + 1),
+      &jpegR)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      static_cast<ultrahdr_transfer_function>(-10),
+      &jpegR)) << "fail, API allows bad hdr transfer function";
+
+  // test dest
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG, nullptr)) << "fail, API allows nullptr dest";
+
+  // test p010 input
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      nullptr, &mRawYuv420Image, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows nullptr p010 image";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = static_cast<ultrahdr_color_gamut>(
+      ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_MAX + 1);
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH - 1;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT - 1;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = 0;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = 0;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR)) << "fail, API allows bad luma stride";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.chroma_data = mRawP010ImageWithStride.data;
+  mRawP010ImageWithStride.chroma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad chroma stride";
+
+  // test 420 input
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.chroma_data = nullptr;
+  mRawP010ImageWithStride.chroma_stride = 0;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, nullptr, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows nullptr for 420 image";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad 420 image width";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH - 2;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad 420 image height";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.luma_stride = TEST_IMAGE_STRIDE;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad luma stride for 420";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.luma_stride = 0;
+  mRawYuv420Image.chroma_data = mRawYuv420Image.data;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows chroma pointer for 420";
+
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.luma_stride = 0;
+  mRawYuv420Image.chroma_data = nullptr;
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad 420 color gamut";
+
+  mRawYuv420Image.colorGamut = static_cast<ultrahdr_color_gamut>(
+      ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_MAX + 1);
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, &jpegR,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad 420 color gamut";
+
+  // bad compressed image
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &mRawYuv420Image, nullptr,
+      ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad 420 color gamut";
+
+  free(jpegR.data);
+}
+
+/* Test Encode API-3 invalid arguments */
+TEST_F(JpegRTest, encodeAPI3ForInvalidArgs) {
+  int ret;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = 16 * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+
+  JpegR jpegRCodec;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  mRawP010ImageWithStride.data = malloc(16);
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  // test hdr transfer function
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_UNSPECIFIED,
+      &jpegR)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR,
+      static_cast<ultrahdr_transfer_function>(ultrahdr_transfer_function::ULTRAHDR_TF_MAX + 1),
+      &jpegR)) << "fail, API allows bad hdr transfer function";
+
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, static_cast<ultrahdr_transfer_function>(-10),
+      &jpegR)) << "fail, API allows bad hdr transfer function";
+
+  // test dest
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      nullptr)) << "fail, API allows nullptr dest";
+
+  // test p010 input
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      nullptr, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows nullptr p010 image";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = static_cast<ultrahdr_color_gamut>(
+      ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_MAX + 1);
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad p010 color gamut";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH - 1;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT - 1;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = 0;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad image width";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = 0;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad image height";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad luma stride";
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.chroma_data = mRawP010ImageWithStride.data;
+  mRawP010ImageWithStride.chroma_stride = TEST_IMAGE_WIDTH - 2;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, &jpegR, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad chroma stride";
+  mRawP010ImageWithStride.chroma_data = nullptr;
+
+  // bad compressed image
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, nullptr, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR)) << "fail, API allows bad 420 color gamut";
+
+  free(jpegR.data);
+}
+
+/* Test Encode API-4 invalid arguments */
+TEST_F(JpegRTest, encodeAPI4ForInvalidArgs) {
+  int ret;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = 16 * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+
+  JpegR jpegRCodec;
+
+  // test dest
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, &jpegR, nullptr, nullptr)) << "fail, API allows nullptr dest";
+
+  // test primary image
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      nullptr, &jpegR, nullptr, &jpegR)) << "fail, API allows nullptr primary image";
+
+  // test gain map
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, nullptr, &jpegR)) << "fail, API allows nullptr gainmap image";
+
+  // test metadata
+  ultrahdr_metadata_struct good_metadata;
+  good_metadata.version = "1.0";
+  good_metadata.minContentBoost = 1.0f;
+  good_metadata.maxContentBoost = 2.0f;
+  good_metadata.gamma = 1.0f;
+  good_metadata.offsetSdr = 0.0f;
+  good_metadata.offsetHdr = 0.0f;
+  good_metadata.hdrCapacityMin = 1.0f;
+  good_metadata.hdrCapacityMax = 2.0f;
+
+  ultrahdr_metadata_struct metadata = good_metadata;
+  metadata.version = "1.1";
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, &metadata, &jpegR)) << "fail, API allows bad metadata version";
+
+  metadata = good_metadata;
+  metadata.minContentBoost = 3.0f;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, &metadata, &jpegR)) << "fail, API allows bad metadata content boost";
+
+  metadata = good_metadata;
+  metadata.gamma = -0.1f;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, &metadata, &jpegR)) << "fail, API allows bad metadata gamma";
+
+  metadata = good_metadata;
+  metadata.offsetSdr = -0.1f;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, &metadata, &jpegR)) << "fail, API allows bad metadata offset sdr";
+
+  metadata = good_metadata;
+  metadata.offsetHdr = -0.1f;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, &metadata, &jpegR)) << "fail, API allows bad metadata offset hdr";
+
+  metadata = good_metadata;
+  metadata.hdrCapacityMax = 0.5f;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, &metadata, &jpegR)) << "fail, API allows bad metadata hdr capacity max";
+
+  metadata = good_metadata;
+  metadata.hdrCapacityMin = 0.5f;
+  EXPECT_NE(OK, jpegRCodec.encodeJPEGR(
+      &jpegR, nullptr, &metadata, &jpegR)) << "fail, API allows bad metadata hdr capacity min";
+
+  free(jpegR.data);
+}
+
+/* Test Decode API invalid arguments */
+TEST_F(JpegRTest, decodeAPIForInvalidArgs) {
+  int ret;
+
+  // we are not really compressing anything so lets keep allocs to a minimum
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = 16 * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+
+  // we are not really decoding anything so lets keep allocs to a minimum
+  mRawP010Image.data = malloc(16);
+
+  JpegR jpegRCodec;
+
+  // test jpegr image
+  EXPECT_NE(OK, jpegRCodec.decodeJPEGR(
+        nullptr, &mRawP010Image)) << "fail, API allows nullptr for jpegr img";
+
+  // test dest image
+  EXPECT_NE(OK, jpegRCodec.decodeJPEGR(
+        &jpegR, nullptr)) << "fail, API allows nullptr for dest";
+
+  // test max display boost
+  EXPECT_NE(OK, jpegRCodec.decodeJPEGR(
+        &jpegR, &mRawP010Image, 0.5)) << "fail, API allows invalid max display boost";
+
+  // test output format
+  EXPECT_NE(OK, jpegRCodec.decodeJPEGR(
+        &jpegR, &mRawP010Image, 0.5, nullptr,
+        static_cast<ultrahdr_output_format>(-1))) << "fail, API allows invalid output format";
+
+  EXPECT_NE(OK, jpegRCodec.decodeJPEGR(
+        &jpegR, &mRawP010Image, 0.5, nullptr,
+        static_cast<ultrahdr_output_format>(ULTRAHDR_OUTPUT_MAX + 1)))
+        << "fail, API allows invalid output format";
+
+  free(jpegR.data);
+}
+
+TEST_F(JpegRTest, writeXmpThenRead) {
+  ultrahdr_metadata_struct metadata_expected;
+  metadata_expected.version = "1.0";
+  metadata_expected.maxContentBoost = 1.25f;
+  metadata_expected.minContentBoost = 0.75f;
+  metadata_expected.gamma = 1.0f;
+  metadata_expected.offsetSdr = 0.0f;
+  metadata_expected.offsetHdr = 0.0f;
+  metadata_expected.hdrCapacityMin = 1.0f;
+  metadata_expected.hdrCapacityMax = metadata_expected.maxContentBoost;
+  const std::string nameSpace = "http://ns.adobe.com/xap/1.0/\0";
+  const int nameSpaceLength = nameSpace.size() + 1;  // need to count the null terminator
+
+  std::string xmp = generateXmpForSecondaryImage(metadata_expected);
+
+  std::vector<uint8_t> xmpData;
+  xmpData.reserve(nameSpaceLength + xmp.size());
+  xmpData.insert(xmpData.end(), reinterpret_cast<const uint8_t*>(nameSpace.c_str()),
+                  reinterpret_cast<const uint8_t*>(nameSpace.c_str()) + nameSpaceLength);
+  xmpData.insert(xmpData.end(), reinterpret_cast<const uint8_t*>(xmp.c_str()),
+                  reinterpret_cast<const uint8_t*>(xmp.c_str()) + xmp.size());
+
+  ultrahdr_metadata_struct metadata_read;
+  EXPECT_TRUE(getMetadataFromXMP(xmpData.data(), xmpData.size(), &metadata_read));
+  EXPECT_FLOAT_EQ(metadata_expected.maxContentBoost, metadata_read.maxContentBoost);
+  EXPECT_FLOAT_EQ(metadata_expected.minContentBoost, metadata_read.minContentBoost);
+  EXPECT_FLOAT_EQ(metadata_expected.gamma, metadata_read.gamma);
+  EXPECT_FLOAT_EQ(metadata_expected.offsetSdr, metadata_read.offsetSdr);
+  EXPECT_FLOAT_EQ(metadata_expected.offsetHdr, metadata_read.offsetHdr);
+  EXPECT_FLOAT_EQ(metadata_expected.hdrCapacityMin, metadata_read.hdrCapacityMin);
+  EXPECT_FLOAT_EQ(metadata_expected.hdrCapacityMax, metadata_read.hdrCapacityMax);
+}
+
+/* Test Encode API-0 */
+TEST_F(JpegRTest, encodeFromP010) {
+  int ret;
+
+  mRawP010Image.width = TEST_IMAGE_WIDTH;
+  mRawP010Image.height = TEST_IMAGE_HEIGHT;
+  mRawP010Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+  // Load input files.
+  if (!loadP010Image(RAW_P010_IMAGE, &mRawP010Image, true)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+
+  JpegR jpegRCodec;
+
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR, DEFAULT_JPEG_QUALITY,
+      nullptr);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_WIDTH + 128;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+  // Load input files.
+  if (!loadP010Image(RAW_P010_IMAGE, &mRawP010ImageWithStride, true)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+
+  jpegr_compressed_struct jpegRWithStride;
+  jpegRWithStride.maxLength = jpegR.length;
+  jpegRWithStride.data = malloc(jpegRWithStride.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegRWithStride,
+      DEFAULT_JPEG_QUALITY, nullptr);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  ASSERT_EQ(jpegR.length, jpegRWithStride.length)
+      << "Same input is yielding different output";
+  ASSERT_EQ(0, memcmp(jpegR.data, jpegRWithStride.data, jpegR.length))
+      << "Same input is yielding different output";
+
+  mRawP010ImageWithChromaData.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithChromaData.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithChromaData.luma_stride = TEST_IMAGE_WIDTH + 64;
+  mRawP010ImageWithChromaData.chroma_stride = TEST_IMAGE_WIDTH + 256;
+  mRawP010ImageWithChromaData.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+  // Load input files.
+  if (!loadP010Image(RAW_P010_IMAGE, &mRawP010ImageWithChromaData, false)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  jpegr_compressed_struct jpegRWithChromaData;
+  jpegRWithChromaData.maxLength = jpegR.length;
+  jpegRWithChromaData.data = malloc(jpegRWithChromaData.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithChromaData, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegRWithChromaData, DEFAULT_JPEG_QUALITY, nullptr);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  ASSERT_EQ(jpegR.length, jpegRWithChromaData.length)
+      << "Same input is yielding different output";
+  ASSERT_EQ(0, memcmp(jpegR.data, jpegRWithChromaData.data, jpegR.length))
+      << "Same input is yielding different output";
+
+  free(jpegR.data);
+  free(jpegRWithStride.data);
+  free(jpegRWithChromaData.data);
+}
+
+/* Test Encode API-0 and decode */
+TEST_F(JpegRTest, encodeFromP010ThenDecode) {
+  int ret;
+
+  // Load input files.
+  if (!loadFile(RAW_P010_IMAGE, mRawP010Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawP010Image.width = TEST_IMAGE_WIDTH;
+  mRawP010Image.height = TEST_IMAGE_HEIGHT;
+  mRawP010Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  JpegR jpegRCodec;
+
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR, DEFAULT_JPEG_QUALITY,
+      nullptr);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_ENCODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/encoded_from_p010_input.jpgr";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)jpegR.data, jpegR.length);
+  }
+
+  jpegr_uncompressed_struct decodedJpegR;
+  int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 8;
+  decodedJpegR.data = malloc(decodedJpegRSize);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_DECODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/decoded_from_p010_input.rgb";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)decodedJpegR.data, decodedJpegRSize);
+  }
+
+  free(jpegR.data);
+  free(decodedJpegR.data);
+}
+
+/* Test Encode API-0 (with stride) and decode */
+TEST_F(JpegRTest, encodeFromP010WithStrideThenDecode) {
+  int ret;
+
+  // Load input files.
+  if (!loadFile(RAW_P010_IMAGE_WITH_STRIDE, mRawP010ImageWithStride.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE_WITH_STRIDE << " failed";
+  }
+  mRawP010ImageWithStride.width = TEST_IMAGE_WIDTH;
+  mRawP010ImageWithStride.height = TEST_IMAGE_HEIGHT;
+  mRawP010ImageWithStride.luma_stride = TEST_IMAGE_STRIDE;
+  mRawP010ImageWithStride.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  JpegR jpegRCodec;
+
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010ImageWithStride, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_ENCODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/encoded_from_p010_input.jpgr";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)jpegR.data, jpegR.length);
+  }
+
+  jpegr_uncompressed_struct decodedJpegR;
+  int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 8;
+  decodedJpegR.data = malloc(decodedJpegRSize);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_DECODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/decoded_from_p010_input.rgb";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)decodedJpegR.data, decodedJpegRSize);
+  }
+
+  free(jpegR.data);
+  free(decodedJpegR.data);
+}
+
+/* Test Encode API-1 and decode */
+TEST_F(JpegRTest, encodeFromRawHdrAndSdrThenDecode) {
+  int ret;
+
+  // Load input files.
+  if (!loadFile(RAW_P010_IMAGE, mRawP010Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawP010Image.width = TEST_IMAGE_WIDTH;
+  mRawP010Image.height = TEST_IMAGE_HEIGHT;
+  mRawP010Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  if (!loadFile(RAW_YUV420_IMAGE, mRawYuv420Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+
+  JpegR jpegRCodec;
+
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010Image, &mRawYuv420Image, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR,
+      DEFAULT_JPEG_QUALITY, nullptr);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_ENCODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/encoded_from_p010_yuv420p_input.jpgr";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)jpegR.data, jpegR.length);
+  }
+
+  jpegr_uncompressed_struct decodedJpegR;
+  int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 8;
+  decodedJpegR.data = malloc(decodedJpegRSize);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_DECODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/decoded_from_p010_yuv420p_input.rgb";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)decodedJpegR.data, decodedJpegRSize);
+  }
+
+  free(jpegR.data);
+  free(decodedJpegR.data);
+}
+
+/* Test Encode API-2 and decode */
+TEST_F(JpegRTest, encodeFromRawHdrAndSdrAndJpegThenDecode) {
+  int ret;
+
+  // Load input files.
+  if (!loadFile(RAW_P010_IMAGE, mRawP010Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawP010Image.width = TEST_IMAGE_WIDTH;
+  mRawP010Image.height = TEST_IMAGE_HEIGHT;
+  mRawP010Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  if (!loadFile(RAW_YUV420_IMAGE, mRawYuv420Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawYuv420Image.width = TEST_IMAGE_WIDTH;
+  mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+
+  if (!loadFile(JPEG_IMAGE, mJpegImage.data, &mJpegImage.length)) {
+    FAIL() << "Load file " << JPEG_IMAGE << " failed";
+  }
+  mJpegImage.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+
+  JpegR jpegRCodec;
+
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010Image, &mRawYuv420Image, &mJpegImage, ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+      &jpegR);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_ENCODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/encoded_from_p010_yuv420p_jpeg_input.jpgr";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)jpegR.data, jpegR.length);
+  }
+
+  jpegr_uncompressed_struct decodedJpegR;
+  int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 8;
+  decodedJpegR.data = malloc(decodedJpegRSize);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_DECODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/decoded_from_p010_yuv420p_jpeg_input.rgb";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)decodedJpegR.data, decodedJpegRSize);
+  }
+
+  free(jpegR.data);
+  free(decodedJpegR.data);
+}
+
+/* Test Encode API-3 and decode */
+TEST_F(JpegRTest, encodeFromJpegThenDecode) {
+  int ret;
+
+  // Load input files.
+  if (!loadFile(RAW_P010_IMAGE, mRawP010Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawP010Image.width = TEST_IMAGE_WIDTH;
+  mRawP010Image.height = TEST_IMAGE_HEIGHT;
+  mRawP010Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  if (SAVE_INPUT_RGBA) {
+    size_t rgbaSize = mRawP010Image.width * mRawP010Image.height * sizeof(uint32_t);
+    uint32_t *data = (uint32_t *)malloc(rgbaSize);
+
+    for (size_t y = 0; y < mRawP010Image.height; ++y) {
+      for (size_t x = 0; x < mRawP010Image.width; ++x) {
+        Color hdr_yuv_gamma = getP010Pixel(&mRawP010Image, x, y);
+        Color hdr_rgb_gamma = bt2100YuvToRgb(hdr_yuv_gamma);
+        uint32_t rgba1010102 = colorToRgba1010102(hdr_rgb_gamma);
+        size_t pixel_idx =  x + y * mRawP010Image.width;
+        reinterpret_cast<uint32_t*>(data)[pixel_idx] = rgba1010102;
+      }
+    }
+
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/input_from_p010.rgb10";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)data, rgbaSize);
+    free(data);
+  }
+  if (!loadFile(JPEG_IMAGE, mJpegImage.data, &mJpegImage.length)) {
+    FAIL() << "Load file " << JPEG_IMAGE << " failed";
+  }
+  mJpegImage.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+
+  JpegR jpegRCodec;
+
+  jpegr_compressed_struct jpegR;
+  jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
+  jpegR.data = malloc(jpegR.maxLength);
+  ret = jpegRCodec.encodeJPEGR(
+      &mRawP010Image, &mJpegImage, ultrahdr_transfer_function::ULTRAHDR_TF_HLG, &jpegR);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_ENCODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/encoded_from_p010_jpeg_input.jpgr";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)jpegR.data, jpegR.length);
+  }
+
+  jpegr_uncompressed_struct decodedJpegR;
+  int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 8;
+  decodedJpegR.data = malloc(decodedJpegRSize);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
+  if (ret != OK) {
+    FAIL() << "Error code is " << ret;
+  }
+  if (SAVE_DECODING_RESULT) {
+    // Output image data to file
+    std::string filePath = "/sdcard/Documents/decoded_from_p010_jpeg_input.rgb";
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+      ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+    }
+    imageFile.write((const char*)decodedJpegR.data, decodedJpegRSize);
+  }
+
+  free(jpegR.data);
+  free(decodedJpegR.data);
+}
+
+TEST_F(JpegRTest, ProfileGainMapFuncs) {
+  const size_t kWidth = TEST_IMAGE_WIDTH;
+  const size_t kHeight = TEST_IMAGE_HEIGHT;
+
+  // Load input files.
+  if (!loadFile(RAW_P010_IMAGE, mRawP010Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawP010Image.width = kWidth;
+  mRawP010Image.height = kHeight;
+  mRawP010Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100;
+
+  if (!loadFile(RAW_YUV420_IMAGE, mRawYuv420Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawYuv420Image.width = kWidth;
+  mRawYuv420Image.height = kHeight;
+  mRawYuv420Image.colorGamut = ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709;
+
+  JpegRBenchmark benchmark;
+
+  ultrahdr_metadata_struct metadata = { .version = "1.0" };
+
+  jpegr_uncompressed_struct map = { .data = NULL,
+                                    .width = 0,
+                                    .height = 0,
+                                    .colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED };
+
+  benchmark.BenchmarkGenerateGainMap(&mRawYuv420Image, &mRawP010Image, &metadata, &map);
+
+  const int dstSize = mRawYuv420Image.width * mRawYuv420Image.height * 4;
+  auto bufferDst = std::make_unique<uint8_t[]>(dstSize);
+  jpegr_uncompressed_struct dest = { .data = bufferDst.get(),
+                                     .width = 0,
+                                     .height = 0,
+                                     .colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED };
+
+  benchmark.BenchmarkApplyGainMap(&mRawYuv420Image, &map, &metadata, &dest);
+}
+
+} // namespace android::ultrahdr
diff --git a/libs/vibrator/ExternalVibration.cpp b/libs/vibrator/ExternalVibration.cpp
index ec90645..80e911c 100644
--- a/libs/vibrator/ExternalVibration.cpp
+++ b/libs/vibrator/ExternalVibration.cpp
@@ -22,15 +22,6 @@
 #include <log/log.h>
 #include <utils/Errors.h>
 
-
-// To guarantee if HapticScale enum has the same value as IExternalVibratorService
-static_assert(static_cast<int>(android::os::HapticScale::MUTE) == static_cast<int>(android::os::IExternalVibratorService::SCALE_MUTE));
-static_assert(static_cast<int>(android::os::HapticScale::VERY_LOW) == static_cast<int>(android::os::IExternalVibratorService::SCALE_VERY_LOW));
-static_assert(static_cast<int>(android::os::HapticScale::LOW) == static_cast<int>(android::os::IExternalVibratorService::SCALE_LOW));
-static_assert(static_cast<int>(android::os::HapticScale::NONE) == static_cast<int>(android::os::IExternalVibratorService::SCALE_NONE));
-static_assert(static_cast<int>(android::os::HapticScale::HIGH) == static_cast<int>(android::os::IExternalVibratorService::SCALE_HIGH));
-static_assert(static_cast<int>(android::os::HapticScale::VERY_HIGH) == static_cast<int>(android::os::IExternalVibratorService::SCALE_VERY_HIGH));
-
 void writeAudioAttributes(const audio_attributes_t& attrs, android::Parcel* out) {
     out->writeInt32(attrs.usage);
     out->writeInt32(attrs.content_type);
@@ -74,5 +65,25 @@
     return mToken == rhs.mToken;
 }
 
+os::HapticScale ExternalVibration::externalVibrationScaleToHapticScale(int externalVibrationScale) {
+    switch (externalVibrationScale) {
+        case IExternalVibratorService::SCALE_MUTE:
+            return os::HapticScale::MUTE;
+        case IExternalVibratorService::SCALE_VERY_LOW:
+            return os::HapticScale::VERY_LOW;
+        case IExternalVibratorService::SCALE_LOW:
+            return os::HapticScale::LOW;
+        case IExternalVibratorService::SCALE_NONE:
+            return os::HapticScale::NONE;
+        case IExternalVibratorService::SCALE_HIGH:
+            return os::HapticScale::HIGH;
+        case IExternalVibratorService::SCALE_VERY_HIGH:
+            return os::HapticScale::VERY_HIGH;
+        default:
+          ALOGE("Unknown ExternalVibrationScale %d, not applying scaling", externalVibrationScale);
+          return os::HapticScale::NONE;
+      }
+}
+
 } // namespace os
 } // namespace android
diff --git a/libs/vibrator/include/vibrator/ExternalVibration.h b/libs/vibrator/include/vibrator/ExternalVibration.h
index 760dbce..00cd3cd 100644
--- a/libs/vibrator/include/vibrator/ExternalVibration.h
+++ b/libs/vibrator/include/vibrator/ExternalVibration.h
@@ -23,6 +23,7 @@
 #include <binder/Parcelable.h>
 #include <system/audio.h>
 #include <utils/RefBase.h>
+#include <vibrator/ExternalVibrationUtils.h>
 
 namespace android {
 namespace os {
@@ -44,6 +45,10 @@
     audio_attributes_t getAudioAttributes() const { return mAttrs; }
     sp<IExternalVibrationController> getController() { return mController; }
 
+    /* Converts the scale from non-public ExternalVibrationService into the HapticScale
+     * used by the utils.
+     */
+    static os::HapticScale externalVibrationScaleToHapticScale(int externalVibrationScale);
 
 private:
     int32_t mUid;
@@ -53,7 +58,7 @@
     sp<IBinder> mToken = new BBinder();
 };
 
-} // namespace android
 } // namespace os
+} // namespace android
 
 #endif // ANDROID_EXTERNAL_VIBRATION_H
diff --git a/libs/vibrator/include/vibrator/ExternalVibrationUtils.h b/libs/vibrator/include/vibrator/ExternalVibrationUtils.h
index c588bfd..ca219d3 100644
--- a/libs/vibrator/include/vibrator/ExternalVibrationUtils.h
+++ b/libs/vibrator/include/vibrator/ExternalVibrationUtils.h
@@ -19,8 +19,6 @@
 
 namespace android::os {
 
-// Copied from frameworks/base/core/java/android/os/IExternalVibratorService.aidl
-// The values are checked in ExternalVibration.cpp
 enum class HapticScale {
     MUTE = -100,
     VERY_LOW = -2,