Merge "Add ASurfaceTransaction_setExtendedRangeBrightness"
diff --git a/TEST_MAPPING b/TEST_MAPPING
index f54f132..cd8f3cd 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -4,58 +4,7 @@
       "name": "SurfaceFlinger_test",
       "options": [
         {
-          "include-filter": "*CredentialsTest.*"
-        },
-        {
-          "include-filter": "*SurfaceFlingerStress.*"
-        },
-        {
-          "include-filter": "*SurfaceInterceptorTest.*"
-        },
-        {
-          "include-filter": "*LayerTransactionTest.*"
-        },
-        {
-          "include-filter": "*LayerTypeTransactionTest.*"
-        },
-        {
-          "include-filter": "*LayerUpdateTest.*"
-        },
-        {
-          "include-filter": "*GeometryLatchingTest.*"
-        },
-        {
-          "include-filter": "*CropLatchingTest.*"
-        },
-        {
-          "include-filter": "*ChildLayerTest.*"
-        },
-        {
-          "include-filter": "*ScreenCaptureTest.*"
-        },
-        {
-          "include-filter": "*ScreenCaptureChildOnlyTest.*"
-        },
-        {
-          "include-filter": "*DereferenceSurfaceControlTest.*"
-        },
-        {
-          "include-filter": "*BoundlessLayerTest.*"
-        },
-        {
-          "include-filter": "*MultiDisplayLayerBoundsTest.*"
-        },
-        {
-          "include-filter": "*InvalidHandleTest.*"
-        },
-        {
-          "include-filter": "*VirtualDisplayTest.*"
-        },
-        {
-          "include-filter": "*RelativeZTest.*"
-        },
-        {
-          "include-filter": "*RefreshRateOverlayTest.*"
+          "include-filter": "*"
         },
         {
           "exclude-filter": "*ChildLayerTest#ChildrenSurviveParentDestruction"
@@ -76,58 +25,7 @@
       "name": "SurfaceFlinger_test",
       "options": [
         {
-          "include-filter": "*CredentialsTest.*"
-        },
-        {
-          "include-filter": "*SurfaceFlingerStress.*"
-        },
-        {
-          "include-filter": "*SurfaceInterceptorTest.*"
-        },
-        {
-          "include-filter": "*LayerTransactionTest.*"
-        },
-        {
-          "include-filter": "*LayerTypeTransactionTest.*"
-        },
-        {
-          "include-filter": "*LayerUpdateTest.*"
-        },
-        {
-          "include-filter": "*GeometryLatchingTest.*"
-        },
-        {
-          "include-filter": "*CropLatchingTest.*"
-        },
-        {
-          "include-filter": "*ChildLayerTest.*"
-        },
-        {
-          "include-filter": "*ScreenCaptureTest.*"
-        },
-        {
-          "include-filter": "*ScreenCaptureChildOnlyTest.*"
-        },
-        {
-          "include-filter": "*DereferenceSurfaceControlTest.*"
-        },
-        {
-          "include-filter": "*BoundlessLayerTest.*"
-        },
-        {
-          "include-filter": "*MultiDisplayLayerBoundsTest.*"
-        },
-        {
-          "include-filter": "*InvalidHandleTest.*"
-        },
-        {
-          "include-filter": "*VirtualDisplayTest.*"
-        },
-        {
-          "include-filter": "*RelativeZTest.*"
-        },
-        {
-          "include-filter": "*RefreshRateOverlayTest.*"
+          "include-filter": "*"
         }
       ]
     }
diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp
index ecafcfc..a48313a 100644
--- a/cmds/dumpstate/dumpstate.cpp
+++ b/cmds/dumpstate/dumpstate.cpp
@@ -186,6 +186,7 @@
 #define SYSTEM_TRACE_SNAPSHOT "/data/misc/perfetto-traces/bugreport/systrace.pftrace"
 #define CGROUPFS_DIR "/sys/fs/cgroup"
 #define SDK_EXT_INFO "/apex/com.android.sdkext/bin/derive_sdk"
+#define DROPBOX_DIR "/data/system/dropbox"
 
 // TODO(narayan): Since this information has to be kept in sync
 // with tombstoned, we should just put it in a common header.
@@ -526,6 +527,15 @@
     return strcmp(path + len - sizeof(stat) + 1, stat); /* .../stat? */
 }
 
+static bool skip_wtf_strictmode(const char *path) {
+    if (strstr(path, "_wtf")) {
+        return true;
+    } else if (strstr(path, "_strictmode")) {
+        return true;
+    }
+    return false;
+}
+
 static bool skip_none(const char* path __attribute__((unused))) {
     return false;
 }
@@ -1895,6 +1905,11 @@
     DumpIpTablesAsRoot();
     DumpDynamicPartitionInfo();
     ds.AddDir(OTA_METADATA_DIR, true);
+    if (!PropertiesHelper::IsUserBuild()) {
+        // Include dropbox entry files inside ZIP, but exclude
+        // noisy WTF and StrictMode entries
+        dump_files("", DROPBOX_DIR, skip_wtf_strictmode, _add_file_from_fd);
+    }
 
     // Capture any IPSec policies in play. No keys are exposed here.
     RunCommand("IP XFRM POLICY", {"ip", "xfrm", "policy"}, CommandOptions::WithTimeout(10).Build());
diff --git a/cmds/dumpstate/dumpstate.rc b/cmds/dumpstate/dumpstate.rc
index 12a7cff..a80da4e 100644
--- a/cmds/dumpstate/dumpstate.rc
+++ b/cmds/dumpstate/dumpstate.rc
@@ -8,7 +8,6 @@
     socket dumpstate stream 0660 shell log
     disabled
     oneshot
-    capabilities CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID KILL NET_ADMIN NET_RAW SETGID SETUID SYS_PTRACE SYS_RESOURCE BLOCK_SUSPEND SYSLOG
 
 # dumpstatez generates a zipped bugreport but also uses a socket to print the file location once
 # it is finished.
@@ -17,11 +16,9 @@
     class main
     disabled
     oneshot
-    capabilities CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID KILL NET_ADMIN NET_RAW SETGID SETUID SYS_PTRACE SYS_RESOURCE BLOCK_SUSPEND SYSLOG
 
 # bugreportd starts dumpstate binder service and makes it wait for a listener to connect.
 service bugreportd /system/bin/dumpstate -w
     class main
     disabled
     oneshot
-    capabilities CHOWN DAC_OVERRIDE DAC_READ_SEARCH FOWNER FSETID KILL NET_ADMIN NET_RAW SETGID SETUID SYS_PTRACE SYS_RESOURCE BLOCK_SUSPEND SYSLOG
diff --git a/cmds/dumpstate/tests/dumpstate_test.cpp b/cmds/dumpstate/tests/dumpstate_test.cpp
index 87f9254..aa5219b 100644
--- a/cmds/dumpstate/tests/dumpstate_test.cpp
+++ b/cmds/dumpstate/tests/dumpstate_test.cpp
@@ -1051,7 +1051,8 @@
 };
 
 // Generate a quick LimitedOnly report redirected to a file, open it and verify entry exist.
-TEST_F(ZippedBugReportStreamTest, StreamLimitedOnlyReport) {
+// TODO: broken test tracked in b/249983726
+TEST_F(ZippedBugReportStreamTest, DISABLED_StreamLimitedOnlyReport) {
     std::string out_path = kTestDataPath + "StreamLimitedOnlyReportOut.zip";
     android::base::unique_fd out_fd;
     CreateFd(out_path, &out_fd);
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 91bcb8d..07809e2 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -227,6 +227,13 @@
 }
 #endif  // !VENDORSERVICEMANAGER
 
+ServiceManager::Service::~Service() {
+    if (!hasClients) {
+        // only expected to happen on process death
+        LOG(WARNING) << "a service was removed when there are clients";
+    }
+}
+
 ServiceManager::ServiceManager(std::unique_ptr<Access>&& access) : mAccess(std::move(access)) {
 // TODO(b/151696835): reenable performance hack when we solve bug, since with
 //     this hack and other fixes, it is unlikely we will see even an ephemeral
@@ -293,8 +300,13 @@
     }
 
     if (out) {
-        // Setting this guarantee each time we hand out a binder ensures that the client-checking
-        // loop knows about the event even if the client immediately drops the service
+        // Force onClients to get sent, and then make sure the timerfd won't clear it
+        // by setting guaranteeClient again. This logic could be simplified by using
+        // a time-based guarantee. However, forcing onClients(true) to get sent
+        // right here is always going to be important for processes serving multiple
+        // lazy interfaces.
+        service->guaranteeClient = true;
+        CHECK(handleServiceClientCallback(2 /* sm + transaction */, name, false));
         service->guaranteeClient = true;
     }
 
@@ -384,8 +396,13 @@
     };
 
     if (auto it = mNameToRegistrationCallback.find(name); it != mNameToRegistrationCallback.end()) {
+        // See also getService - handles case where client never gets the service,
+        // we want the service to quit.
+        mNameToService[name].guaranteeClient = true;
+        CHECK(handleServiceClientCallback(2 /* sm + transaction */, name, false));
+        mNameToService[name].guaranteeClient = true;
+
         for (const sp<IServiceCallback>& cb : it->second) {
-            mNameToService[name].guaranteeClient = true;
             // permission checked in registerForNotifications
             cb->onRegistration(name, binder);
         }
@@ -706,28 +723,28 @@
 
 void ServiceManager::handleClientCallbacks() {
     for (const auto& [name, service] : mNameToService) {
-        handleServiceClientCallback(name, true);
+        handleServiceClientCallback(1 /* sm has one refcount */, name, true);
     }
 }
 
-ssize_t ServiceManager::handleServiceClientCallback(const std::string& serviceName,
-                                                    bool isCalledOnInterval) {
+bool ServiceManager::handleServiceClientCallback(size_t knownClients,
+                                                 const std::string& serviceName,
+                                                 bool isCalledOnInterval) {
     auto serviceIt = mNameToService.find(serviceName);
     if (serviceIt == mNameToService.end() || mNameToClientCallback.count(serviceName) < 1) {
-        return -1;
+        return true; // return we do have clients a.k.a. DON'T DO ANYTHING
     }
 
     Service& service = serviceIt->second;
     ssize_t count = service.getNodeStrongRefCount();
 
-    // binder driver doesn't support this feature
-    if (count == -1) return count;
+    // binder driver doesn't support this feature, consider we have clients
+    if (count == -1) return true;
 
-    bool hasClients = count > 1; // this process holds a strong count
+    bool hasKernelReportedClients = static_cast<size_t>(count) > knownClients;
 
     if (service.guaranteeClient) {
-        // we have no record of this client
-        if (!service.hasClients && !hasClients) {
+        if (!service.hasClients && !hasKernelReportedClients) {
             sendClientCallbackNotifications(serviceName, true,
                                             "service is guaranteed to be in use");
         }
@@ -736,21 +753,23 @@
         service.guaranteeClient = false;
     }
 
-    // only send notifications if this was called via the interval checking workflow
-    if (isCalledOnInterval) {
-        if (hasClients && !service.hasClients) {
-            // client was retrieved in some other way
-            sendClientCallbackNotifications(serviceName, true, "we now have a record of a client");
-        }
+    // Regardless of this situation, we want to give this notification as soon as possible.
+    // This way, we have a chance of preventing further thrashing.
+    if (hasKernelReportedClients && !service.hasClients) {
+        sendClientCallbackNotifications(serviceName, true, "we now have a record of a client");
+    }
 
-        // there are no more clients, but the callback has not been called yet
-        if (!hasClients && service.hasClients) {
+    // But limit rate of shutting down service.
+    if (isCalledOnInterval) {
+        if (!hasKernelReportedClients && service.hasClients) {
             sendClientCallbackNotifications(serviceName, false,
                                             "we now have no record of a client");
         }
     }
 
-    return count;
+    // May be different than 'hasKernelReportedClients'. We intentionally delay
+    // information about clients going away to reduce thrashing.
+    return service.hasClients;
 }
 
 void ServiceManager::sendClientCallbackNotifications(const std::string& serviceName,
@@ -763,13 +782,10 @@
     }
     Service& service = serviceIt->second;
 
-    CHECK(hasClients != service.hasClients)
-            << "Record shows: " << service.hasClients
-            << " so we can't tell clients again that we have client: " << hasClients
-            << " when: " << context;
+    CHECK_NE(hasClients, service.hasClients) << context;
 
-    ALOGI("Notifying %s they %s have clients when %s", serviceName.c_str(),
-          hasClients ? "do" : "don't", context);
+    ALOGI("Notifying %s they %s (previously: %s) have clients when %s", serviceName.c_str(),
+          hasClients ? "do" : "don't", service.hasClients ? "do" : "don't", context);
 
     auto ccIt = mNameToClientCallback.find(serviceName);
     CHECK(ccIt != mNameToClientCallback.end())
@@ -813,26 +829,29 @@
         return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
     }
 
+    // important because we don't have timer-based guarantees, we don't want to clear
+    // this
     if (serviceIt->second.guaranteeClient) {
         ALOGI("Tried to unregister %s, but there is about to be a client.", name.c_str());
         return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
     }
 
-    int clients = handleServiceClientCallback(name, false);
-
-    // clients < 0: feature not implemented or other error. Assume clients.
-    // Otherwise:
     // - kernel driver will hold onto one refcount (during this transaction)
     // - servicemanager has a refcount (guaranteed by this transaction)
-    // So, if clients > 2, then at least one other service on the system must hold a refcount.
-    if (clients < 0 || clients > 2) {
-        // client callbacks are either disabled or there are other clients
-        ALOGI("Tried to unregister %s, but there are clients: %d", name.c_str(), clients);
-        // Set this flag to ensure the clients are acknowledged in the next callback
+    constexpr size_t kKnownClients = 2;
+
+    if (handleServiceClientCallback(kKnownClients, name, false)) {
+        ALOGI("Tried to unregister %s, but there are clients.", name.c_str());
+
+        // Since we had a failed registration attempt, and the HIDL implementation of
+        // delaying service shutdown for multiple periods wasn't ported here... this may
+        // help reduce thrashing, but we should be able to remove it.
         serviceIt->second.guaranteeClient = true;
+
         return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
     }
 
+    ALOGI("Unregistering %s", name.c_str());
     mNameToService.erase(name);
 
     return Status::ok();
diff --git a/cmds/servicemanager/ServiceManager.h b/cmds/servicemanager/ServiceManager.h
index f9d4f8f..3aa6731 100644
--- a/cmds/servicemanager/ServiceManager.h
+++ b/cmds/servicemanager/ServiceManager.h
@@ -80,6 +80,8 @@
 
         // the number of clients of the service, including servicemanager itself
         ssize_t getNodeStrongRefCount();
+
+        ~Service();
     };
 
     using ServiceCallbackMap = std::map<std::string, std::vector<sp<IServiceCallback>>>;
@@ -91,7 +93,9 @@
     void removeRegistrationCallback(const wp<IBinder>& who,
                         ServiceCallbackMap::iterator* it,
                         bool* found);
-    ssize_t handleServiceClientCallback(const std::string& serviceName, bool isCalledOnInterval);
+    // returns whether there are known clients in addition to the count provided
+    bool handleServiceClientCallback(size_t knownClients, const std::string& serviceName,
+                                     bool isCalledOnInterval);
     // Also updates mHasClients (of what the last callback was)
     void sendClientCallbackNotifications(const std::string& serviceName, bool hasClients,
                                          const char* context);
diff --git a/cmds/servicemanager/servicemanager.rc b/cmds/servicemanager/servicemanager.rc
index 3bd6db5..4f92b3a 100644
--- a/cmds/servicemanager/servicemanager.rc
+++ b/cmds/servicemanager/servicemanager.rc
@@ -5,7 +5,7 @@
     critical
     file /dev/kmsg w
     onrestart setprop servicemanager.ready false
-    onrestart restart apexd
+    onrestart restart --only-if-running apexd
     onrestart restart audioserver
     onrestart restart gatekeeperd
     onrestart class_restart --only-enabled main
diff --git a/include/input/MotionPredictor.h b/include/input/MotionPredictor.h
index 3fae4e6..68ebf75 100644
--- a/include/input/MotionPredictor.h
+++ b/include/input/MotionPredictor.h
@@ -19,6 +19,7 @@
 #include <cstdint>
 #include <memory>
 #include <mutex>
+#include <string>
 #include <unordered_map>
 
 #include <android-base/thread_annotations.h>
@@ -73,6 +74,7 @@
 
 private:
     const nsecs_t mPredictionTimestampOffsetNanos;
+    const std::string mModelPath;
     const std::function<bool()> mCheckMotionPredictionEnabled;
 
     std::unique_ptr<TfLiteMotionPredictorModel> mModel;
diff --git a/include/input/RingBuffer.h b/include/input/RingBuffer.h
new file mode 100644
index 0000000..67984b7
--- /dev/null
+++ b/include/input/RingBuffer.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <compare>
+#include <cstddef>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+
+namespace android {
+
+// A fixed-size ring buffer of elements.
+//
+// Elements can only be removed from the front/back or added to the front/back, but with O(1)
+// performance. Elements from the opposing side are evicted when new elements are pushed onto a full
+// buffer.
+template <typename T>
+class RingBuffer {
+public:
+    using value_type = T;
+    using size_type = size_t;
+    using difference_type = ptrdiff_t;
+    using reference = value_type&;
+    using const_reference = const value_type&;
+    using pointer = value_type*;
+    using const_pointer = const value_type*;
+
+    template <typename U>
+    class Iterator;
+    using iterator = Iterator<T>;
+    using const_iterator = Iterator<const T>;
+
+    // Creates an empty ring buffer that can hold some capacity.
+    explicit RingBuffer(size_type capacity)
+          : mBuffer(std::allocator<value_type>().allocate(capacity)), mCapacity(capacity) {}
+
+    // Creates a full ring buffer holding a fixed number of elements initialised to some value.
+    explicit RingBuffer(size_type count, const_reference value) : RingBuffer(count) {
+        while (count) {
+            pushBack(value);
+            --count;
+        }
+    }
+
+    RingBuffer(const RingBuffer& other) : RingBuffer(other.capacity()) {
+        for (const auto& element : other) {
+            pushBack(element);
+        }
+    }
+
+    RingBuffer(RingBuffer&& other) noexcept { *this = std::move(other); }
+
+    ~RingBuffer() {
+        if (mBuffer) {
+            clear();
+            std::allocator<value_type>().deallocate(mBuffer, mCapacity);
+        }
+    }
+
+    RingBuffer& operator=(const RingBuffer& other) { return *this = RingBuffer(other); }
+
+    RingBuffer& operator=(RingBuffer&& other) noexcept {
+        if (this == &other) {
+            return *this;
+        }
+        if (mBuffer) {
+            clear();
+            std::allocator<value_type>().deallocate(mBuffer, mCapacity);
+        }
+        mBuffer = std::move(other.mBuffer);
+        mCapacity = other.mCapacity;
+        mBegin = other.mBegin;
+        mSize = other.mSize;
+        other.mBuffer = nullptr;
+        other.mCapacity = 0;
+        other.mBegin = 0;
+        other.mSize = 0;
+        return *this;
+    }
+
+    iterator begin() { return {*this, 0}; }
+    const_iterator begin() const { return {*this, 0}; }
+    iterator end() { return {*this, mSize}; }
+    const_iterator end() const { return {*this, mSize}; }
+
+    reference operator[](size_type i) { return mBuffer[bufferIndex(i)]; }
+    const_reference operator[](size_type i) const { return mBuffer[bufferIndex(i)]; }
+
+    // Removes all elements from the buffer.
+    void clear() {
+        std::destroy(begin(), end());
+        mSize = 0;
+    }
+
+    // Removes and returns the first element from the buffer.
+    value_type popFront() {
+        value_type element = mBuffer[mBegin];
+        std::destroy_at(std::addressof(mBuffer[mBegin]));
+        mBegin = next(mBegin);
+        --mSize;
+        return element;
+    }
+
+    // Removes and returns the last element from the buffer.
+    value_type popBack() {
+        size_type backIndex = bufferIndex(mSize - 1);
+        value_type element = mBuffer[backIndex];
+        std::destroy_at(std::addressof(mBuffer[backIndex]));
+        --mSize;
+        return element;
+    }
+
+    // Adds an element to the front of the buffer.
+    void pushFront(const value_type& element) { pushFront(value_type(element)); }
+    void pushFront(value_type&& element) {
+        mBegin = previous(mBegin);
+        if (size() == capacity()) {
+            mBuffer[mBegin] = std::forward<value_type>(element);
+        } else {
+            // The space at mBuffer[mBegin] is uninitialised.
+            // TODO: Use std::construct_at when it becomes available.
+            new (std::addressof(mBuffer[mBegin])) value_type(std::forward<value_type>(element));
+            ++mSize;
+        }
+    }
+
+    // Adds an element to the back of the buffer.
+    void pushBack(const value_type& element) { pushBack(value_type(element)); }
+    void pushBack(value_type&& element) {
+        if (size() == capacity()) {
+            mBuffer[mBegin] = std::forward<value_type>(element);
+            mBegin = next(mBegin);
+        } else {
+            // The space at mBuffer[...] is uninitialised.
+            // TODO: Use std::construct_at when it becomes available.
+            new (std::addressof(mBuffer[bufferIndex(mSize)]))
+                    value_type(std::forward<value_type>(element));
+            ++mSize;
+        }
+    }
+
+    bool empty() const { return mSize == 0; }
+    size_type capacity() const { return mCapacity; }
+    size_type size() const { return mSize; }
+
+    void swap(RingBuffer& other) noexcept {
+        using std::swap;
+        swap(mBuffer, other.mBuffer);
+        swap(mCapacity, other.mCapacity);
+        swap(mBegin, other.mBegin);
+        swap(mSize, other.mSize);
+    }
+
+    friend void swap(RingBuffer& lhs, RingBuffer& rhs) noexcept { lhs.swap(rhs); }
+
+    template <typename U>
+    class Iterator {
+    private:
+        using ContainerType = std::conditional_t<std::is_const_v<U>, const RingBuffer, RingBuffer>;
+
+    public:
+        using iterator_category = std::random_access_iterator_tag;
+        using size_type = ContainerType::size_type;
+        using difference_type = ContainerType::difference_type;
+        using value_type = std::remove_cv_t<U>;
+        using pointer = U*;
+        using reference = U&;
+
+        Iterator(ContainerType& container, size_type index)
+              : mContainer(container), mIndex(index) {}
+
+        Iterator(const Iterator&) = default;
+        Iterator& operator=(const Iterator&) = default;
+
+        Iterator& operator++() {
+            ++mIndex;
+            return *this;
+        }
+
+        Iterator operator++(int) {
+            Iterator iterator(*this);
+            ++(*this);
+            return iterator;
+        }
+
+        Iterator& operator--() {
+            --mIndex;
+            return *this;
+        }
+
+        Iterator operator--(int) {
+            Iterator iterator(*this);
+            --(*this);
+            return iterator;
+        }
+
+        Iterator& operator+=(difference_type n) {
+            mIndex += n;
+            return *this;
+        }
+
+        Iterator operator+(difference_type n) {
+            Iterator iterator(*this);
+            return iterator += n;
+        }
+
+        Iterator& operator-=(difference_type n) { return *this += -n; }
+
+        Iterator operator-(difference_type n) {
+            Iterator iterator(*this);
+            return iterator -= n;
+        }
+
+        difference_type operator-(const Iterator& other) { return mIndex - other.mIndex; }
+
+        bool operator==(const Iterator& rhs) const { return mIndex == rhs.mIndex; }
+
+        bool operator!=(const Iterator& rhs) const { return !(*this == rhs); }
+
+        friend auto operator<=>(const Iterator& lhs, const Iterator& rhs) {
+            return lhs.mIndex <=> rhs.mIndex;
+        }
+
+        reference operator[](difference_type n) { return *(*this + n); }
+
+        reference operator*() const { return mContainer[mIndex]; }
+        pointer operator->() const { return std::addressof(mContainer[mIndex]); }
+
+    private:
+        ContainerType& mContainer;
+        size_type mIndex = 0;
+    };
+
+private:
+    // Returns the index of the next element in mBuffer.
+    size_type next(size_type index) const {
+        if (index == capacity() - 1) {
+            return 0;
+        } else {
+            return index + 1;
+        }
+    }
+
+    // Returns the index of the previous element in mBuffer.
+    size_type previous(size_type index) const {
+        if (index == 0) {
+            return capacity() - 1;
+        } else {
+            return index - 1;
+        }
+    }
+
+    // Converts the index of an element in [0, size()] to its corresponding index in mBuffer.
+    size_type bufferIndex(size_type elementIndex) const {
+        CHECK_LE(elementIndex, size());
+        size_type index = mBegin + elementIndex;
+        if (index >= capacity()) {
+            index -= capacity();
+        }
+        CHECK_LT(index, capacity())
+                << android::base::StringPrintf("Invalid index calculated for element (%zu) "
+                                               "in buffer of size %zu",
+                                               elementIndex, size());
+        return index;
+    }
+
+    pointer mBuffer = nullptr;
+    size_type mCapacity = 0; // Total capacity of mBuffer.
+    size_type mBegin = 0;    // Index of the first initialised element in mBuffer.
+    size_type mSize = 0;     // Total number of initialised elements.
+};
+
+} // namespace android
diff --git a/include/input/TfLiteMotionPredictor.h b/include/input/TfLiteMotionPredictor.h
index ff0f51c..54e2851 100644
--- a/include/input/TfLiteMotionPredictor.h
+++ b/include/input/TfLiteMotionPredictor.h
@@ -22,8 +22,9 @@
 #include <memory>
 #include <optional>
 #include <span>
-#include <string>
-#include <vector>
+
+#include <android-base/mapped_file.h>
+#include <input/RingBuffer.h>
 
 #include <tensorflow/lite/core/api/error_reporter.h>
 #include <tensorflow/lite/interpreter.h>
@@ -83,11 +84,11 @@
 private:
     int64_t mTimestamp = 0;
 
-    std::vector<float> mInputR;
-    std::vector<float> mInputPhi;
-    std::vector<float> mInputPressure;
-    std::vector<float> mInputTilt;
-    std::vector<float> mInputOrientation;
+    RingBuffer<float> mInputR;
+    RingBuffer<float> mInputPhi;
+    RingBuffer<float> mInputPressure;
+    RingBuffer<float> mInputTilt;
+    RingBuffer<float> mInputOrientation;
 
     // The samples defining the current polar axis.
     std::optional<TfLiteMotionPredictorSample> mAxisFrom;
@@ -100,6 +101,8 @@
     // Creates a model from an encoded Flatbuffer model.
     static std::unique_ptr<TfLiteMotionPredictorModel> create(const char* modelPath);
 
+    ~TfLiteMotionPredictorModel();
+
     // Returns the length of the model's input buffers.
     size_t inputLength() const;
 
@@ -121,7 +124,7 @@
     std::span<const float> outputPressure() const;
 
 private:
-    explicit TfLiteMotionPredictorModel(std::string model);
+    explicit TfLiteMotionPredictorModel(std::unique_ptr<android::base::MappedFile> model);
 
     void allocateTensors();
     void attachInputTensors();
@@ -137,7 +140,7 @@
     const TfLiteTensor* mOutputPhi = nullptr;
     const TfLiteTensor* mOutputPressure = nullptr;
 
-    std::string mFlatBuffer;
+    std::unique_ptr<android::base::MappedFile> mFlatBuffer;
     std::unique_ptr<tflite::ErrorReporter> mErrorReporter;
     std::unique_ptr<tflite::FlatBufferModel> mModel;
     std::unique_ptr<tflite::Interpreter> mInterpreter;
diff --git a/include/private/performance_hint_private.h b/include/private/performance_hint_private.h
index d50c5f8..eaf3b5e 100644
--- a/include/private/performance_hint_private.h
+++ b/include/private/performance_hint_private.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_PRIVATE_NATIVE_PERFORMANCE_HINT_PRIVATE_H
 #define ANDROID_PRIVATE_NATIVE_PERFORMANCE_HINT_PRIVATE_H
 
-#include <stdint.h>
-
 __BEGIN_DECLS
 
 /**
@@ -29,7 +27,7 @@
 /**
  * Hints for the session used to signal upcoming changes in the mode or workload.
  */
-enum SessionHint: int32_t {
+enum SessionHint {
     /**
      * This hint indicates a sudden increase in CPU workload intensity. It means
      * that this hint session needs extra CPU resources immediately to meet the
@@ -63,7 +61,7 @@
  * @return 0 on success
  *         EPIPE if communication with the system service has failed.
  */
-int APerformanceHint_sendHint(void* session, SessionHint hint);
+int APerformanceHint_sendHint(void* session, int hint);
 
 /**
  * Return the list of thread ids, this API should only be used for testing only.
diff --git a/include/private/surface_control_private.h b/include/private/surface_control_private.h
index 7e6c515..138926e 100644
--- a/include/private/surface_control_private.h
+++ b/include/private/surface_control_private.h
@@ -19,6 +19,8 @@
 
 #include <stdint.h>
 
+#include <android/choreographer.h>
+
 __BEGIN_DECLS
 
 struct ASurfaceControl;
@@ -56,6 +58,13 @@
                                        ASurfaceControl_SurfaceStatsListener func);
 
 /**
+ * Gets the attached AChoreographer instance from the given \c surfaceControl. If there is no
+ * choreographer associated with the surface control, then a new instance of choreographer is
+ * created. The new choreographer is associated with the current thread's Looper.
+ */
+AChoreographer* ASurfaceControl_getChoreographer(ASurfaceControl* surfaceControl);
+
+/**
  * Returns the timestamp of when the buffer was acquired for a specific frame with frame number
  * obtained from ASurfaceControlStats_getFrameNumber.
  */
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index 661f70c..c4c8ffb 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -215,6 +215,8 @@
 
 cc_defaults {
     name: "trusty_mock_defaults",
+    vendor_available: true,
+    host_supported: true,
 
     header_libs: [
         "trusty_mock_headers",
@@ -516,6 +518,10 @@
             enabled: false,
         },
     },
+    visibility: [
+        ":__subpackages__",
+        "//system/tools/aidl:__subpackages__",
+    ],
 }
 
 // TODO(b/184872979): remove once the Rust API is created.
diff --git a/libs/binder/BpBinder.cpp b/libs/binder/BpBinder.cpp
index d03326e..53852d8 100644
--- a/libs/binder/BpBinder.cpp
+++ b/libs/binder/BpBinder.cpp
@@ -388,7 +388,8 @@
 {
     if (isRpcBinder()) {
         if (rpcSession()->getMaxIncomingThreads() < 1) {
-            ALOGE("Cannot register a DeathRecipient without any incoming connections.");
+            ALOGE("Cannot register a DeathRecipient without any incoming threads. Need to set max "
+                  "incoming threads to a value greater than 0 before calling linkToDeath.");
             return INVALID_OPERATION;
         }
     } else if constexpr (!kEnableKernelIpc) {
diff --git a/libs/binder/RpcState.cpp b/libs/binder/RpcState.cpp
index 1ea13f9..b27f102 100644
--- a/libs/binder/RpcState.cpp
+++ b/libs/binder/RpcState.cpp
@@ -1036,8 +1036,8 @@
                 return DEAD_OBJECT;
             }
 
-            if (it->second.asyncTodo.size() != 0 &&
-                it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
+            if (it->second.asyncTodo.size() == 0) return OK;
+            if (it->second.asyncTodo.top().asyncNumber == it->second.asyncNumber) {
                 LOG_RPC_DETAIL("Found next async transaction %" PRIu64 " on %" PRIu64,
                                it->second.asyncNumber, addr);
 
diff --git a/libs/binder/TEST_MAPPING b/libs/binder/TEST_MAPPING
index 2615876..1488400 100644
--- a/libs/binder/TEST_MAPPING
+++ b/libs/binder/TEST_MAPPING
@@ -89,6 +89,9 @@
       "name": "CtsRootRollbackManagerHostTestCases"
     },
     {
+      "name": "StagedRollbackTest"
+    },
+    {
       "name": "binderRpcTestNoKernel"
     },
     {
diff --git a/libs/binder/ndk/include_platform/android/binder_manager.h b/libs/binder/ndk/include_platform/android/binder_manager.h
index ad4188f..86d5ed2 100644
--- a/libs/binder/ndk/include_platform/android/binder_manager.h
+++ b/libs/binder/ndk/include_platform/android/binder_manager.h
@@ -38,6 +38,22 @@
         AIBinder* binder, const char* instance) __INTRODUCED_IN(29);
 
 /**
+ * This registers the service with the default service manager under this instance name. This does
+ * not take ownership of binder.
+ *
+ * WARNING: when using this API across an APEX boundary, do not use with unstable
+ * AIDL services. TODO(b/139325195)
+ *
+ * \param binder object to register globally with the service manager.
+ * \param instance identifier of the service. This will be used to lookup the service.
+ * \param allowIsolated allows if this service can be isolated.
+ *
+ * \return EX_NONE on success.
+ */
+__attribute__((warn_unused_result)) binder_exception_t AServiceManager_addServiceWithAllowIsolated(
+        AIBinder* binder, const char* instance, bool allowIsolated) __INTRODUCED_IN(34);
+
+/**
  * Gets a binder object with this specific instance name. Will return nullptr immediately if the
  * service is not available This also implicitly calls AIBinder_incStrong (so the caller of this
  * function is responsible for calling AIBinder_decStrong).
diff --git a/libs/binder/ndk/libbinder_ndk.map.txt b/libs/binder/ndk/libbinder_ndk.map.txt
index 54e4628..5f2f617 100644
--- a/libs/binder/ndk/libbinder_ndk.map.txt
+++ b/libs/binder/ndk/libbinder_ndk.map.txt
@@ -163,6 +163,7 @@
 LIBBINDER_NDK_PLATFORM {
   global:
     AParcel_getAllowFds;
+    AServiceManager_addServiceWithAllowIsolated;
     extern "C++" {
         AIBinder_fromPlatformBinder*;
         AIBinder_toPlatformBinder*;
diff --git a/libs/binder/ndk/service_manager.cpp b/libs/binder/ndk/service_manager.cpp
index e107c83..2763ddb 100644
--- a/libs/binder/ndk/service_manager.cpp
+++ b/libs/binder/ndk/service_manager.cpp
@@ -41,6 +41,19 @@
     status_t exception = sm->addService(String16(instance), binder->getBinder());
     return PruneException(exception);
 }
+
+binder_exception_t AServiceManager_addServiceWithAllowIsolated(AIBinder* binder,
+                                                               const char* instance,
+                                                               bool allowIsolated) {
+    if (binder == nullptr || instance == nullptr) {
+        return EX_ILLEGAL_ARGUMENT;
+    }
+
+    sp<IServiceManager> sm = defaultServiceManager();
+    status_t exception = sm->addService(String16(instance), binder->getBinder(), allowIsolated);
+    return PruneException(exception);
+}
+
 AIBinder* AServiceManager_checkService(const char* instance) {
     if (instance == nullptr) {
         return nullptr;
diff --git a/libs/binder/rust/rpcbinder/Android.bp b/libs/binder/rust/rpcbinder/Android.bp
index afb73e9..38dd4fe 100644
--- a/libs/binder/rust/rpcbinder/Android.bp
+++ b/libs/binder/rust/rpcbinder/Android.bp
@@ -23,7 +23,13 @@
         "liblibc",
         "liblog_rust",
     ],
+    visibility: [
+        "//device/google/cuttlefish/shared/minidroid/sample",
+        "//packages/modules/Uwb",
+        "//packages/modules/Virtualization:__subpackages__",
+    ],
     apex_available: [
+        "//apex_available:platform",
         "com.android.compos",
         "com.android.uwb",
         "com.android.virt",
@@ -51,6 +57,7 @@
         "libutils",
     ],
     apex_available: [
+        "//apex_available:platform",
         "com.android.compos",
         "com.android.uwb",
         "com.android.virt",
@@ -84,6 +91,7 @@
         "libutils",
     ],
     apex_available: [
+        "//apex_available:platform",
         "com.android.compos",
         "com.android.uwb",
         "com.android.virt",
diff --git a/libs/binder/tests/Android.bp b/libs/binder/tests/Android.bp
index 7006f87..e609987 100644
--- a/libs/binder/tests/Android.bp
+++ b/libs/binder/tests/Android.bp
@@ -370,6 +370,31 @@
     ],
 }
 
+cc_binary {
+    name: "binderRpcTest_on_trusty_mock",
+    defaults: [
+        "trusty_mock_defaults",
+    ],
+
+    srcs: [
+        "binderRpcUniversalTests.cpp",
+        "binderRpcTestCommon.cpp",
+        "binderRpcTestTrusty.cpp",
+    ],
+
+    shared_libs: [
+        "libbinder_on_trusty_mock",
+        "libbase",
+        "libutils",
+        "libcutils",
+    ],
+
+    static_libs: [
+        "binderRpcTestIface-cpp",
+        "libgtest",
+    ],
+}
+
 cc_test {
     name: "binderRpcTest",
     defaults: [
@@ -382,6 +407,7 @@
     required: [
         "libbinder_on_trusty_mock",
         "binderRpcTestService_on_trusty_mock",
+        "binderRpcTest_on_trusty_mock",
     ],
 }
 
diff --git a/libs/binder/tests/binderRpcTestTrusty.cpp b/libs/binder/tests/binderRpcTestTrusty.cpp
new file mode 100644
index 0000000..b3bb5eb
--- /dev/null
+++ b/libs/binder/tests/binderRpcTestTrusty.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "binderRpcTest"
+
+#include <android-base/stringprintf.h>
+#include <binder/RpcTransportTipcTrusty.h>
+#include <trusty-gtest.h>
+#include <trusty_ipc.h>
+
+#include "binderRpcTestFixture.h"
+
+namespace android {
+
+// Destructors need to be defined, even if pure virtual
+ProcessSession::~ProcessSession() {}
+
+class TrustyProcessSession : public ProcessSession {
+public:
+    ~TrustyProcessSession() override {}
+
+    void setCustomExitStatusCheck(std::function<void(int wstatus)> /*f*/) override {
+        LOG_ALWAYS_FATAL("setCustomExitStatusCheck() not supported");
+    }
+
+    void terminate() override { LOG_ALWAYS_FATAL("terminate() not supported"); }
+};
+
+std::string BinderRpc::PrintParamInfo(const testing::TestParamInfo<ParamType>& info) {
+    auto [type, security, clientVersion, serverVersion, singleThreaded, noKernel] = info.param;
+    auto ret = PrintToString(type) + "_clientV" + std::to_string(clientVersion) + "_serverV" +
+            std::to_string(serverVersion);
+    if (singleThreaded) {
+        ret += "_single_threaded";
+    }
+    if (noKernel) {
+        ret += "_no_kernel";
+    }
+    return ret;
+}
+
+// This creates a new process serving an interface on a certain number of
+// threads.
+std::unique_ptr<ProcessSession> BinderRpc::createRpcTestSocketServerProcessEtc(
+        const BinderRpcOptions& options) {
+    LOG_ALWAYS_FATAL_IF(options.numIncomingConnections != 0,
+                        "Non-zero incoming connections %zu on Trusty",
+                        options.numIncomingConnections);
+
+    uint32_t clientVersion = std::get<2>(GetParam());
+    uint32_t serverVersion = std::get<3>(GetParam());
+
+    auto ret = std::make_unique<TrustyProcessSession>();
+
+    status_t status;
+    for (size_t i = 0; i < options.numSessions; i++) {
+        auto factory = android::RpcTransportCtxFactoryTipcTrusty::make();
+        auto session = android::RpcSession::make(std::move(factory));
+
+        EXPECT_TRUE(session->setProtocolVersion(clientVersion));
+        session->setMaxOutgoingThreads(options.numOutgoingConnections);
+        session->setFileDescriptorTransportMode(options.clientFileDescriptorTransportMode);
+
+        status = session->setupPreconnectedClient({}, [&]() {
+            auto port = trustyIpcPort(serverVersion);
+            int rc = connect(port.c_str(), IPC_CONNECT_WAIT_FOR_PORT);
+            LOG_ALWAYS_FATAL_IF(rc < 0, "Failed to connect to service: %d", rc);
+            return base::unique_fd(rc);
+        });
+        if (options.allowConnectFailure && status != OK) {
+            ret->sessions.clear();
+            break;
+        }
+        LOG_ALWAYS_FATAL_IF(status != OK, "Failed to connect to service: %s",
+                            statusToString(status).c_str());
+        ret->sessions.push_back({session, session->getRootObject()});
+    }
+
+    return ret;
+}
+
+INSTANTIATE_TEST_CASE_P(Trusty, BinderRpc,
+                        ::testing::Combine(::testing::Values(SocketType::TIPC),
+                                           ::testing::Values(RpcSecurity::RAW),
+                                           ::testing::ValuesIn(testVersions()),
+                                           ::testing::ValuesIn(testVersions()),
+                                           ::testing::Values(false), ::testing::Values(true)),
+                        BinderRpc::PrintParamInfo);
+
+} // namespace android
+
+PORT_GTEST(BinderRpcTest, "com.android.trusty.binderRpcTest");
diff --git a/libs/binder/tests/binderRpcUniversalTests.cpp b/libs/binder/tests/binderRpcUniversalTests.cpp
index 2249e5c..11a22b0 100644
--- a/libs/binder/tests/binderRpcUniversalTests.cpp
+++ b/libs/binder/tests/binderRpcUniversalTests.cpp
@@ -386,11 +386,11 @@
     EXPECT_EQ(b, weak.promote());
 }
 
-#define expectSessions(expected, iface)                   \
+#define EXPECT_SESSIONS(expected, iface)                  \
     do {                                                  \
         int session;                                      \
         EXPECT_OK((iface)->getNumOpenSessions(&session)); \
-        EXPECT_EQ(expected, session);                     \
+        EXPECT_EQ(static_cast<int>(expected), session);   \
     } while (false)
 
 TEST_P(BinderRpc, SingleSession) {
@@ -402,9 +402,9 @@
     EXPECT_OK(session->getName(&out));
     EXPECT_EQ("aoeu", out);
 
-    expectSessions(1, proc.rootIface);
+    EXPECT_SESSIONS(1, proc.rootIface);
     session = nullptr;
-    expectSessions(0, proc.rootIface);
+    EXPECT_SESSIONS(0, proc.rootIface);
 }
 
 TEST_P(BinderRpc, ManySessions) {
@@ -413,24 +413,24 @@
     std::vector<sp<IBinderRpcSession>> sessions;
 
     for (size_t i = 0; i < 15; i++) {
-        expectSessions(i, proc.rootIface);
+        EXPECT_SESSIONS(i, proc.rootIface);
         sp<IBinderRpcSession> session;
         EXPECT_OK(proc.rootIface->openSession(std::to_string(i), &session));
         sessions.push_back(session);
     }
-    expectSessions(sessions.size(), proc.rootIface);
+    EXPECT_SESSIONS(sessions.size(), proc.rootIface);
     for (size_t i = 0; i < sessions.size(); i++) {
         std::string out;
         EXPECT_OK(sessions.at(i)->getName(&out));
         EXPECT_EQ(std::to_string(i), out);
     }
-    expectSessions(sessions.size(), proc.rootIface);
+    EXPECT_SESSIONS(sessions.size(), proc.rootIface);
 
     while (!sessions.empty()) {
         sessions.pop_back();
-        expectSessions(sessions.size(), proc.rootIface);
+        EXPECT_SESSIONS(sessions.size(), proc.rootIface);
     }
-    expectSessions(0, proc.rootIface);
+    EXPECT_SESSIONS(0, proc.rootIface);
 }
 
 TEST_P(BinderRpc, OnewayCallDoesNotWait) {
@@ -483,7 +483,7 @@
                     cb->mCv.wait_for(_l, 1s, [&] { return !cb->mValues.empty(); });
                 }
 
-                EXPECT_EQ(cb->mValues.size(), 1)
+                EXPECT_EQ(cb->mValues.size(), 1UL)
                         << "callIsOneway: " << callIsOneway
                         << " callbackIsOneway: " << callbackIsOneway << " delayed: " << delayed;
                 if (cb->mValues.empty()) continue;
diff --git a/libs/binder/trusty/binderRpcTest/manifest.json b/libs/binder/trusty/binderRpcTest/manifest.json
new file mode 100644
index 0000000..d8b080f
--- /dev/null
+++ b/libs/binder/trusty/binderRpcTest/manifest.json
@@ -0,0 +1,6 @@
+{
+    "uuid": "9dbe9fb8-60fd-4bdd-af86-03e95d7ad78b",
+    "app_name": "binderRpcTest",
+    "min_heap": 163840,
+    "min_stack": 16384
+}
diff --git a/libs/binder/trusty/binderRpcTest/rules.mk b/libs/binder/trusty/binderRpcTest/rules.mk
new file mode 100644
index 0000000..ae39492
--- /dev/null
+++ b/libs/binder/trusty/binderRpcTest/rules.mk
@@ -0,0 +1,35 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_DIR := $(GET_LOCAL_DIR)
+LIBBINDER_TESTS_DIR := frameworks/native/libs/binder/tests
+
+MODULE := $(LOCAL_DIR)
+
+MANIFEST := $(LOCAL_DIR)/manifest.json
+
+MODULE_SRCS += \
+	$(LIBBINDER_TESTS_DIR)/binderRpcUniversalTests.cpp \
+	$(LIBBINDER_TESTS_DIR)/binderRpcTestCommon.cpp \
+	$(LIBBINDER_TESTS_DIR)/binderRpcTestTrusty.cpp \
+
+MODULE_LIBRARY_DEPS += \
+	$(LOCAL_DIR)/aidl \
+	frameworks/native/libs/binder/trusty \
+	frameworks/native/libs/binder/trusty/ndk \
+	trusty/user/base/lib/googletest \
+	trusty/user/base/lib/libstdc++-trusty \
+
+include make/trusted_app.mk
diff --git a/libs/binder/trusty/build-config-usertests b/libs/binder/trusty/build-config-usertests
new file mode 100644
index 0000000..d0a1fbc
--- /dev/null
+++ b/libs/binder/trusty/build-config-usertests
@@ -0,0 +1,19 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file lists userspace tests
+
+[
+    porttest("com.android.trusty.binderRpcTest"),
+]
diff --git a/libs/binder/trusty/include_mock/trusty-gtest.h b/libs/binder/trusty/include_mock/trusty-gtest.h
new file mode 100644
index 0000000..046b403
--- /dev/null
+++ b/libs/binder/trusty/include_mock/trusty-gtest.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#define PORT_GTEST(suite, port) \
+    int main(void) {            \
+        return 0;               \
+    }
diff --git a/libs/binder/trusty/include_mock/trusty_ipc.h b/libs/binder/trusty/include_mock/trusty_ipc.h
index 43ab84a..db044c2 100644
--- a/libs/binder/trusty/include_mock/trusty_ipc.h
+++ b/libs/binder/trusty/include_mock/trusty_ipc.h
@@ -27,6 +27,8 @@
 #define IPC_PORT_ALLOW_TA_CONNECT 0x1
 #define IPC_PORT_ALLOW_NS_CONNECT 0x2
 
+#define IPC_CONNECT_WAIT_FOR_PORT 0x1
+
 #define IPC_HANDLE_POLL_HUP 0x1
 #define IPC_HANDLE_POLL_MSG 0x2
 #define IPC_HANDLE_POLL_SEND_UNBLOCKED 0x4
diff --git a/libs/binder/trusty/usertests-inc.mk b/libs/binder/trusty/usertests-inc.mk
index 2f5a7f4..1300121 100644
--- a/libs/binder/trusty/usertests-inc.mk
+++ b/libs/binder/trusty/usertests-inc.mk
@@ -14,4 +14,6 @@
 #
 
 TRUSTY_USER_TESTS += \
+	frameworks/native/libs/binder/trusty/binderRpcTest \
 	frameworks/native/libs/binder/trusty/binderRpcTest/service \
+
diff --git a/libs/gui/Android.bp b/libs/gui/Android.bp
index 6c9c28a..21900a0 100644
--- a/libs/gui/Android.bp
+++ b/libs/gui/Android.bp
@@ -254,6 +254,10 @@
     lto: {
         thin: true,
     },
+
+    cflags: [
+        "-Wthread-safety",
+    ],
 }
 
 // Used by media codec services exclusively as a static lib for
diff --git a/libs/gui/BLASTBufferQueue.cpp b/libs/gui/BLASTBufferQueue.cpp
index 5d12463..9d82c14 100644
--- a/libs/gui/BLASTBufferQueue.cpp
+++ b/libs/gui/BLASTBufferQueue.cpp
@@ -20,6 +20,7 @@
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 //#define LOG_NDEBUG 0
 
+#include <cutils/atomic.h>
 #include <gui/BLASTBufferQueue.h>
 #include <gui/BufferItemConsumer.h>
 #include <gui/BufferQueueConsumer.h>
@@ -35,6 +36,7 @@
 #include <private/gui/ComposerService.h>
 #include <private/gui/ComposerServiceAIDL.h>
 
+#include <android-base/thread_annotations.h>
 #include <chrono>
 
 using namespace std::chrono_literals;
@@ -63,6 +65,10 @@
     ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
                   mNumAcquired, ##__VA_ARGS__)
 
+#define UNIQUE_LOCK_WITH_ASSERTION(mutex) \
+    std::unique_lock _lock{mutex};        \
+    base::ScopedLockAssertion assumeLocked(mutex);
+
 void BLASTBufferItemConsumer::onDisconnect() {
     Mutex::Autolock lock(mMutex);
     mPreviouslyConnected = mCurrentlyConnected;
@@ -152,11 +158,11 @@
                                                       GraphicBuffer::USAGE_HW_COMPOSER |
                                                               GraphicBuffer::USAGE_HW_TEXTURE,
                                                       1, false, this);
-    static int32_t id = 0;
-    mName = name + "#" + std::to_string(id);
-    auto consumerName = mName + "(BLAST Consumer)" + std::to_string(id);
-    mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(id);
-    id++;
+    static std::atomic<uint32_t> nextId = 0;
+    mProducerId = nextId++;
+    mName = name + "#" + std::to_string(mProducerId);
+    auto consumerName = mName + "(BLAST Consumer)" + std::to_string(mProducerId);
+    mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(mProducerId);
     mBufferItemConsumer->setName(String8(consumerName.c_str()));
     mBufferItemConsumer->setFrameAvailableListener(this);
 
@@ -207,7 +213,7 @@
                               int32_t format) {
     LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
 
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     if (mFormat != format) {
         mFormat = format;
         mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
@@ -277,7 +283,7 @@
                                                     const sp<Fence>& /*presentFence*/,
                                                     const std::vector<SurfaceControlStats>& stats) {
     {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         BBQ_TRACE();
         BQA_LOGV("transactionCommittedCallback");
         if (!mSurfaceControlsWithPendingCallback.empty()) {
@@ -325,7 +331,7 @@
 void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
                                            const std::vector<SurfaceControlStats>& stats) {
     {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         BBQ_TRACE();
         BQA_LOGV("transactionCallback");
 
@@ -406,9 +412,8 @@
 void BLASTBufferQueue::releaseBufferCallback(
         const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
         std::optional<uint32_t> currentMaxAcquiredBufferCount) {
+    std::lock_guard _lock{mMutex};
     BBQ_TRACE();
-
-    std::unique_lock _lock{mMutex};
     releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
                                 false /* fakeRelease */);
 }
@@ -423,10 +428,8 @@
     // to the buffer queue. This will prevent higher latency when we are running
     // on a lower refresh rate than the max supported. We only do that for EGL
     // clients as others don't care about latency
-    const bool isEGL = [&] {
-        const auto it = mSubmitted.find(id);
-        return it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
-    }();
+    const auto it = mSubmitted.find(id);
+    const bool isEGL = it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
 
     if (currentMaxAcquiredBufferCount) {
         mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
@@ -485,11 +488,19 @@
 
 status_t BLASTBufferQueue::acquireNextBufferLocked(
         const std::optional<SurfaceComposerClient::Transaction*> transaction) {
-    // If the next transaction is set, we want to guarantee the our acquire will not fail, so don't
-    // include the extra buffer when checking if we can acquire the next buffer.
+    // Check if we have frames available and we have not acquired the maximum number of buffers.
+    // Even with this check, the consumer can fail to acquire an additional buffer if the consumer
+    // has already acquired (mMaxAcquiredBuffers + 1) and the new buffer is not droppable. In this
+    // case mBufferItemConsumer->acquireBuffer will return with NO_BUFFER_AVAILABLE.
     if (mNumFrameAvailable == 0) {
-        BQA_LOGV("Can't process next buffer. No available frames");
-        return NOT_ENOUGH_DATA;
+        BQA_LOGV("Can't acquire next buffer. No available frames");
+        return BufferQueue::NO_BUFFER_AVAILABLE;
+    }
+
+    if (mNumAcquired >= (mMaxAcquiredBuffers + 2)) {
+        BQA_LOGV("Can't acquire next buffer. Already acquired max frames %d max:%d + 2",
+                 mNumAcquired, mMaxAcquiredBuffers);
+        return BufferQueue::NO_BUFFER_AVAILABLE;
     }
 
     if (mSurfaceControl == nullptr) {
@@ -562,7 +573,8 @@
             std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
                       std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
     sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
-    t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, releaseBufferCallback);
+    t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, mProducerId,
+                 releaseBufferCallback);
     t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
     t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
     t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
@@ -607,7 +619,7 @@
     }
 
     {
-        std::unique_lock _lock{mTimestampMutex};
+        std::lock_guard _lock{mTimestampMutex};
         auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
         if (dequeueTime != mDequeueTimestamps.end()) {
             Parcel p;
@@ -662,11 +674,11 @@
 void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
     std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
     SurfaceComposerClient::Transaction* prevTransaction = nullptr;
-    bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
 
     {
-        std::unique_lock _lock{mMutex};
+        UNIQUE_LOCK_WITH_ASSERTION(mMutex);
         BBQ_TRACE();
+        bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
 
         const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
         BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
@@ -696,6 +708,15 @@
                     // flush out the shadow queue
                     acquireAndReleaseBuffer();
                 }
+            } else {
+                // Make sure the frame available count is 0 before proceeding with a sync to ensure
+                // the correct frame is used for the sync. The only way mNumFrameAvailable would be
+                // greater than 0 is if we already ran out of buffers previously. This means we
+                // need to flush the buffers before proceeding with the sync.
+                while (mNumFrameAvailable > 0) {
+                    BQA_LOGD("waiting until no queued buffers");
+                    mCallbackCV.wait(_lock);
+                }
             }
         }
 
@@ -711,6 +732,11 @@
                  item.mFrameNumber, boolToString(syncTransactionSet));
 
         if (syncTransactionSet) {
+            // Add to mSyncedFrameNumbers before waiting in case any buffers are released
+            // while waiting for a free buffer. The release and commit callback will try to
+            // acquire buffers if there are any available, but we don't want it to acquire
+            // in the case where a sync transaction wants the buffer.
+            mSyncedFrameNumbers.emplace(item.mFrameNumber);
             // If there's no available buffer and we're in a sync transaction, we need to wait
             // instead of returning since we guarantee a buffer will be acquired for the sync.
             while (acquireNextBufferLocked(mSyncTransaction) == BufferQueue::NO_BUFFER_AVAILABLE) {
@@ -723,7 +749,6 @@
             incStrong((void*)transactionCommittedCallbackThunk);
             mSyncTransaction->addTransactionCommittedCallback(transactionCommittedCallbackThunk,
                                                               static_cast<void*>(this));
-            mSyncedFrameNumbers.emplace(item.mFrameNumber);
             if (mAcquireSingleBuffer) {
                 prevCallback = mTransactionReadyCallback;
                 prevTransaction = mSyncTransaction;
@@ -745,25 +770,24 @@
 }
 
 void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
-    std::unique_lock _lock{mTimestampMutex};
+    std::lock_guard _lock{mTimestampMutex};
     mDequeueTimestamps[bufferId] = systemTime();
 };
 
 void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
-    std::unique_lock _lock{mTimestampMutex};
+    std::lock_guard _lock{mTimestampMutex};
     mDequeueTimestamps.erase(bufferId);
 };
 
 void BLASTBufferQueue::syncNextTransaction(
         std::function<void(SurfaceComposerClient::Transaction*)> callback,
         bool acquireSingleBuffer) {
-    BBQ_TRACE();
-
     std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
     SurfaceComposerClient::Transaction* prevTransaction = nullptr;
 
     {
         std::lock_guard _lock{mMutex};
+        BBQ_TRACE();
         // We're about to overwrite the previous call so we should invoke that callback
         // immediately.
         if (mTransactionReadyCallback) {
@@ -829,8 +853,8 @@
 class BBQSurface : public Surface {
 private:
     std::mutex mMutex;
-    sp<BLASTBufferQueue> mBbq;
-    bool mDestroyed = false;
+    sp<BLASTBufferQueue> mBbq GUARDED_BY(mMutex);
+    bool mDestroyed GUARDED_BY(mMutex) = false;
 
 public:
     BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
@@ -851,7 +875,7 @@
 
     status_t setFrameRate(float frameRate, int8_t compatibility,
                           int8_t changeFrameRateStrategy) override {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         if (mDestroyed) {
             return DEAD_OBJECT;
         }
@@ -864,7 +888,7 @@
 
     status_t setFrameTimelineInfo(uint64_t frameNumber,
                                   const FrameTimelineInfo& frameTimelineInfo) override {
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         if (mDestroyed) {
             return DEAD_OBJECT;
         }
@@ -874,7 +898,7 @@
     void destroy() override {
         Surface::destroy();
 
-        std::unique_lock _lock{mMutex};
+        std::lock_guard _lock{mMutex};
         mDestroyed = true;
         mBbq = nullptr;
     }
@@ -884,7 +908,7 @@
 // no timing issues.
 status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
                                         bool shouldBeSeamless) {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     SurfaceComposerClient::Transaction t;
 
     return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
@@ -894,20 +918,20 @@
                                                 const FrameTimelineInfo& frameTimelineInfo) {
     ATRACE_FORMAT("%s(%s) frameNumber: %" PRIu64 " vsyncId: %" PRId64, __func__, mName.c_str(),
                   frameNumber, frameTimelineInfo.vsyncId);
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     mPendingFrameTimelines.push({frameNumber, frameTimelineInfo});
     return OK;
 }
 
 void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     SurfaceComposerClient::Transaction t;
 
     t.setSidebandStream(mSurfaceControl, stream).apply();
 }
 
 sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     sp<IBinder> scHandle = nullptr;
     if (includeSurfaceControlHandle && mSurfaceControl) {
         scHandle = mSurfaceControl->getHandle();
@@ -1098,6 +1122,7 @@
 }
 
 uint32_t BLASTBufferQueue::getLastTransformHint() const {
+    std::lock_guard _lock{mMutex};
     if (mSurfaceControl != nullptr) {
         return mSurfaceControl->getTransformHint();
     } else {
@@ -1106,18 +1131,18 @@
 }
 
 uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     return mLastAcquiredFrameNumber;
 }
 
 bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
 }
 
 void BLASTBufferQueue::setTransactionHangCallback(
         std::function<void(const std::string&)> callback) {
-    std::unique_lock _lock{mMutex};
+    std::lock_guard _lock{mMutex};
     mTransactionHangCallback = callback;
 }
 
diff --git a/libs/gui/ITransactionCompletedListener.cpp b/libs/gui/ITransactionCompletedListener.cpp
index 985c549..ffe79a3 100644
--- a/libs/gui/ITransactionCompletedListener.cpp
+++ b/libs/gui/ITransactionCompletedListener.cpp
@@ -39,6 +39,12 @@
 
 } // Anonymous namespace
 
+namespace { // Anonymous
+
+constexpr int32_t kSerializedCallbackTypeOnCompelteWithJankData = 2;
+
+} // Anonymous namespace
+
 status_t FrameEventHistoryStats::writeToParcel(Parcel* output) const {
     status_t err = output->writeUint64(frameNumber);
     if (err != NO_ERROR) return err;
@@ -349,7 +355,11 @@
 
 status_t CallbackId::writeToParcel(Parcel* output) const {
     SAFE_PARCEL(output->writeInt64, id);
-    SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(type));
+    if (type == Type::ON_COMPLETE && includeJankData) {
+        SAFE_PARCEL(output->writeInt32, kSerializedCallbackTypeOnCompelteWithJankData);
+    } else {
+        SAFE_PARCEL(output->writeInt32, static_cast<int32_t>(type));
+    }
     return NO_ERROR;
 }
 
@@ -357,7 +367,13 @@
     SAFE_PARCEL(input->readInt64, &id);
     int32_t typeAsInt;
     SAFE_PARCEL(input->readInt32, &typeAsInt);
-    type = static_cast<CallbackId::Type>(typeAsInt);
+    if (typeAsInt == kSerializedCallbackTypeOnCompelteWithJankData) {
+        type = Type::ON_COMPLETE;
+        includeJankData = true;
+    } else {
+        type = static_cast<CallbackId::Type>(typeAsInt);
+        includeJankData = false;
+    }
     return NO_ERROR;
 }
 
diff --git a/libs/gui/LayerState.cpp b/libs/gui/LayerState.cpp
index 8372363..a6276e5 100644
--- a/libs/gui/LayerState.cpp
+++ b/libs/gui/LayerState.cpp
@@ -684,6 +684,9 @@
         what |= eDimmingEnabledChanged;
         dimmingEnabled = other.dimmingEnabled;
     }
+    if (other.what & eFlushJankData) {
+        what |= eFlushJankData;
+    }
     if ((other.what & what) != other.what) {
         ALOGE("Unmerged SurfaceComposer Transaction properties. LayerState::merge needs updating? "
               "other.what=0x%" PRIX64 " what=0x%" PRIX64 " unmerged flags=0x%" PRIX64,
@@ -981,6 +984,7 @@
     SAFE_PARCEL(output->writeUint64, cachedBuffer.id);
     SAFE_PARCEL(output->writeBool, hasBarrier);
     SAFE_PARCEL(output->writeUint64, barrierFrameNumber);
+    SAFE_PARCEL(output->writeUint32, producerId);
 
     return NO_ERROR;
 }
@@ -1019,6 +1023,7 @@
 
     SAFE_PARCEL(input->readBool, &hasBarrier);
     SAFE_PARCEL(input->readUint64, &barrierFrameNumber);
+    SAFE_PARCEL(input->readUint32, &producerId);
 
     return NO_ERROR;
 }
diff --git a/libs/gui/SurfaceComposerClient.cpp b/libs/gui/SurfaceComposerClient.cpp
index 9092f5f..4596c8a 100644
--- a/libs/gui/SurfaceComposerClient.cpp
+++ b/libs/gui/SurfaceComposerClient.cpp
@@ -53,6 +53,7 @@
 #include <ui/DisplayState.h>
 #include <ui/DynamicDisplayInfo.h>
 
+#include <android-base/thread_annotations.h>
 #include <private/gui/ComposerService.h>
 #include <private/gui/ComposerServiceAIDL.h>
 
@@ -81,6 +82,8 @@
 int64_t generateId() {
     return (((int64_t)getpid()) << 32) | ++idCounter;
 }
+
+void emptyCallback(nsecs_t, const sp<Fence>&, const std::vector<SurfaceControlStats>&) {}
 } // namespace
 
 ComposerService::ComposerService()
@@ -248,6 +251,14 @@
                 surfaceControls,
         CallbackId::Type callbackType) {
     std::lock_guard<std::mutex> lock(mMutex);
+    return addCallbackFunctionLocked(callbackFunction, surfaceControls, callbackType);
+}
+
+CallbackId TransactionCompletedListener::addCallbackFunctionLocked(
+        const TransactionCompletedCallback& callbackFunction,
+        const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
+                surfaceControls,
+        CallbackId::Type callbackType) {
     startListeningLocked();
 
     CallbackId callbackId(getNextIdLocked(), callbackType);
@@ -256,6 +267,11 @@
 
     for (const auto& surfaceControl : surfaceControls) {
         callbackSurfaceControls[surfaceControl->getHandle()] = surfaceControl;
+
+        if (callbackType == CallbackId::Type::ON_COMPLETE &&
+            mJankListeners.count(surfaceControl->getLayerId()) != 0) {
+            callbackId.includeJankData = true;
+        }
     }
 
     return callbackId;
@@ -304,15 +320,26 @@
 }
 
 void TransactionCompletedListener::addSurfaceControlToCallbacks(
-        const sp<SurfaceControl>& surfaceControl,
-        const std::unordered_set<CallbackId, CallbackIdHash>& callbackIds) {
+        SurfaceComposerClient::CallbackInfo& callbackInfo,
+        const sp<SurfaceControl>& surfaceControl) {
     std::lock_guard<std::mutex> lock(mMutex);
 
-    for (auto callbackId : callbackIds) {
+    bool includingJankData = false;
+    for (auto callbackId : callbackInfo.callbackIds) {
         mCallbacks[callbackId].surfaceControls.emplace(std::piecewise_construct,
                                                        std::forward_as_tuple(
                                                                surfaceControl->getHandle()),
                                                        std::forward_as_tuple(surfaceControl));
+        includingJankData = includingJankData || callbackId.includeJankData;
+    }
+
+    // If no registered callback is requesting jank data, but there is a jank listener registered
+    // on the new surface control, add a synthetic callback that requests the jank data.
+    if (!includingJankData && mJankListeners.count(surfaceControl->getLayerId()) != 0) {
+        CallbackId callbackId =
+                addCallbackFunctionLocked(&emptyCallback, callbackInfo.surfaceControls,
+                                          CallbackId::Type::ON_COMPLETE);
+        callbackInfo.callbackIds.emplace(callbackId);
     }
 }
 
@@ -929,8 +956,7 @@
         // register all surface controls for all callbackIds for this listener that is merging
         for (const auto& surfaceControl : currentProcessCallbackInfo.surfaceControls) {
             TransactionCompletedListener::getInstance()
-                    ->addSurfaceControlToCallbacks(surfaceControl,
-                                                   currentProcessCallbackInfo.callbackIds);
+                    ->addSurfaceControlToCallbacks(currentProcessCallbackInfo, surfaceControl);
         }
     }
 
@@ -1180,6 +1206,19 @@
 void SurfaceComposerClient::Transaction::setDefaultApplyToken(sp<IBinder> applyToken) {
     sApplyToken = applyToken;
 }
+
+status_t SurfaceComposerClient::Transaction::sendSurfaceFlushJankDataTransaction(
+        const sp<SurfaceControl>& sc) {
+    Transaction t;
+    layer_state_t* s = t.getLayerState(sc);
+    if (!s) {
+        return BAD_INDEX;
+    }
+
+    s->what |= layer_state_t::eFlushJankData;
+    t.registerSurfaceControlForCallback(sc);
+    return t.apply(/*sync=*/false, /* oneWay=*/true);
+}
 // ---------------------------------------------------------------------------
 
 sp<IBinder> SurfaceComposerClient::createDisplay(const String8& displayName, bool secure,
@@ -1253,8 +1292,7 @@
     auto& callbackInfo = mListenerCallbacks[TransactionCompletedListener::getIInstance()];
     callbackInfo.surfaceControls.insert(sc);
 
-    TransactionCompletedListener::getInstance()
-            ->addSurfaceControlToCallbacks(sc, callbackInfo.callbackIds);
+    TransactionCompletedListener::getInstance()->addSurfaceControlToCallbacks(callbackInfo, sc);
 }
 
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setPosition(
@@ -1594,7 +1632,7 @@
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setBuffer(
         const sp<SurfaceControl>& sc, const sp<GraphicBuffer>& buffer,
         const std::optional<sp<Fence>>& fence, const std::optional<uint64_t>& optFrameNumber,
-        ReleaseBufferCallback callback) {
+        uint32_t producerId, ReleaseBufferCallback callback) {
     layer_state_t* s = getLayerState(sc);
     if (!s) {
         mStatus = BAD_INDEX;
@@ -1613,6 +1651,7 @@
     bufferData->buffer = buffer;
     uint64_t frameNumber = sc->resolveFrameNumber(optFrameNumber);
     bufferData->frameNumber = frameNumber;
+    bufferData->producerId = producerId;
     bufferData->flags |= BufferData::BufferDataChange::frameNumberChanged;
     if (fence) {
         bufferData->acquireFence = *fence;
@@ -2624,6 +2663,12 @@
     return statusTFromBinderStatus(status);
 }
 
+status_t SurfaceComposerClient::getHdrOutputConversionSupport(bool* isSupported) {
+    binder::Status status =
+            ComposerServiceAIDL::getComposerService()->getHdrOutputConversionSupport(isSupported);
+    return statusTFromBinderStatus(status);
+}
+
 status_t SurfaceComposerClient::setOverrideFrameRate(uid_t uid, float frameRate) {
     binder::Status status =
             ComposerServiceAIDL::getComposerService()->setOverrideFrameRate(uid, frameRate);
@@ -2988,6 +3033,7 @@
     while (true) {
         {
             std::unique_lock<std::mutex> lock(mMutex);
+            base::ScopedLockAssertion assumeLocked(mMutex);
             callbackInfos = std::move(mCallbackInfos);
             mCallbackInfos = {};
         }
@@ -3000,6 +3046,7 @@
 
         {
             std::unique_lock<std::mutex> lock(mMutex);
+            base::ScopedLockAssertion assumeLocked(mMutex);
             if (mCallbackInfos.size() == 0) {
                 mReleaseCallbackPending.wait(lock);
             }
diff --git a/libs/gui/include/gui/BLASTBufferQueue.h b/libs/gui/include/gui/BLASTBufferQueue.h
index c93ab86..b9e0647 100644
--- a/libs/gui/include/gui/BLASTBufferQueue.h
+++ b/libs/gui/include/gui/BLASTBufferQueue.h
@@ -44,23 +44,23 @@
             mCurrentlyConnected(false),
             mPreviouslyConnected(false) {}
 
-    void onDisconnect() override;
+    void onDisconnect() override EXCLUDES(mMutex);
     void addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
-                                  FrameEventHistoryDelta* outDelta) override REQUIRES(mMutex);
+                                  FrameEventHistoryDelta* outDelta) override EXCLUDES(mMutex);
     void updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
                                const sp<Fence>& gpuCompositionDoneFence,
                                const sp<Fence>& presentFence, const sp<Fence>& prevReleaseFence,
                                CompositorTiming compositorTiming, nsecs_t latchTime,
-                               nsecs_t dequeueReadyTime) REQUIRES(mMutex);
-    void getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect);
+                               nsecs_t dequeueReadyTime) EXCLUDES(mMutex);
+    void getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) EXCLUDES(mMutex);
 
 protected:
-    void onSidebandStreamChanged() override REQUIRES(mMutex);
+    void onSidebandStreamChanged() override EXCLUDES(mMutex);
 
 private:
     const wp<BLASTBufferQueue> mBLASTBufferQueue;
 
-    uint64_t mCurrentFrameNumber = 0;
+    uint64_t mCurrentFrameNumber GUARDED_BY(mMutex) = 0;
 
     Mutex mMutex;
     ConsumerFrameEventHistory mFrameEventHistory GUARDED_BY(mMutex);
@@ -94,7 +94,7 @@
                                std::optional<uint32_t> currentMaxAcquiredBufferCount);
     void releaseBufferCallbackLocked(const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
                                      std::optional<uint32_t> currentMaxAcquiredBufferCount,
-                                     bool fakeRelease);
+                                     bool fakeRelease) REQUIRES(mMutex);
     void syncNextTransaction(std::function<void(SurfaceComposerClient::Transaction*)> callback,
                              bool acquireSingleBuffer = true);
     void stopContinuousSyncTransaction();
@@ -150,7 +150,7 @@
     // mNumAcquired (buffers that queued to SF)  mPendingRelease.size() (buffers that are held by
     // blast). This counter is read by android studio profiler.
     std::string mQueuedBufferTrace;
-    sp<SurfaceControl> mSurfaceControl;
+    sp<SurfaceControl> mSurfaceControl GUARDED_BY(mMutex);
 
     mutable std::mutex mMutex;
     std::condition_variable mCallbackCV;
@@ -162,6 +162,11 @@
     int32_t mNumFrameAvailable GUARDED_BY(mMutex) = 0;
     int32_t mNumAcquired GUARDED_BY(mMutex) = 0;
 
+    // A value used to identify if a producer has been changed for the same SurfaceControl.
+    // This is needed to know when the frame number has been reset to make sure we don't
+    // latch stale buffers and that we don't wait on barriers from an old producer.
+    uint32_t mProducerId = 0;
+
     // Keep a reference to the submitted buffers so we can release when surfaceflinger drops the
     // buffer or the buffer has been presented and a new buffer is ready to be presented.
     std::unordered_map<ReleaseCallbackId, BufferItem, ReleaseBufferCallbackIdHash> mSubmitted
@@ -252,7 +257,7 @@
     // callback for them.
     std::queue<sp<SurfaceControl>> mSurfaceControlsWithPendingCallback GUARDED_BY(mMutex);
 
-    uint32_t mCurrentMaxAcquiredBufferCount;
+    uint32_t mCurrentMaxAcquiredBufferCount GUARDED_BY(mMutex);
 
     // Flag to determine if syncTransaction should only acquire a single buffer and then clear or
     // continue to acquire buffers until explicitly cleared
@@ -276,8 +281,8 @@
     // need to set this flag, notably only in the case where we are transitioning from a previous
     // transaction applied by us (one way, may not yet have reached server) and an upcoming
     // transaction that will be applied by some sync consumer.
-    bool mAppliedLastTransaction = false;
-    uint64_t mLastAppliedFrameNumber = 0;
+    bool mAppliedLastTransaction GUARDED_BY(mMutex) = false;
+    uint64_t mLastAppliedFrameNumber GUARDED_BY(mMutex) = 0;
 
     std::function<void(const std::string&)> mTransactionHangCallback;
 
diff --git a/libs/gui/include/gui/ITransactionCompletedListener.h b/libs/gui/include/gui/ITransactionCompletedListener.h
index d593f56..39bcb4a 100644
--- a/libs/gui/include/gui/ITransactionCompletedListener.h
+++ b/libs/gui/include/gui/ITransactionCompletedListener.h
@@ -40,10 +40,15 @@
 class CallbackId : public Parcelable {
 public:
     int64_t id;
-    enum class Type : int32_t { ON_COMPLETE, ON_COMMIT } type;
+    enum class Type : int32_t {
+        ON_COMPLETE = 0,
+        ON_COMMIT = 1,
+        /*reserved for serialization = 2*/
+    } type;
+    bool includeJankData; // Only respected for ON_COMPLETE callbacks.
 
     CallbackId() {}
-    CallbackId(int64_t id, Type type) : id(id), type(type) {}
+    CallbackId(int64_t id, Type type) : id(id), type(type), includeJankData(false) {}
     status_t writeToParcel(Parcel* output) const override;
     status_t readFromParcel(const Parcel* input) override;
 
diff --git a/libs/gui/include/gui/LayerState.h b/libs/gui/include/gui/LayerState.h
index b8bee72..ddaf473 100644
--- a/libs/gui/include/gui/LayerState.h
+++ b/libs/gui/include/gui/LayerState.h
@@ -111,6 +111,7 @@
     uint64_t frameNumber = 0;
     bool hasBarrier = false;
     uint64_t barrierFrameNumber = 0;
+    uint32_t producerId = 0;
 
     // Listens to when the buffer is safe to be released. This is used for blast
     // layers only. The callback includes a release fence as well as the graphic
@@ -170,7 +171,7 @@
         eTransparentRegionChanged = 0x00000020,
         eFlagsChanged = 0x00000040,
         eLayerStackChanged = 0x00000080,
-        /* unused = 0x00000100, */
+        eFlushJankData = 0x00000100,
         /* unused = 0x00000200, */
         eDimmingEnabledChanged = 0x00000400,
         eShadowRadiusChanged = 0x00000800,
diff --git a/libs/gui/include/gui/SurfaceComposerClient.h b/libs/gui/include/gui/SurfaceComposerClient.h
index 45f4dbe..fcf8d64 100644
--- a/libs/gui/include/gui/SurfaceComposerClient.h
+++ b/libs/gui/include/gui/SurfaceComposerClient.h
@@ -194,6 +194,8 @@
     static status_t getHdrConversionCapabilities(std::vector<gui::HdrConversionCapability>*);
     // Sets the HDR conversion strategy for the device
     static status_t setHdrConversionStrategy(gui::HdrConversionStrategy hdrConversionStrategy);
+    // Returns whether HDR conversion is supported by the device.
+    static status_t getHdrOutputConversionSupport(bool* isSupported);
 
     // Sets the frame rate of a particular app (uid). This is currently called
     // by GameManager.
@@ -536,7 +538,7 @@
         Transaction& setBuffer(const sp<SurfaceControl>& sc, const sp<GraphicBuffer>& buffer,
                                const std::optional<sp<Fence>>& fence = std::nullopt,
                                const std::optional<uint64_t>& frameNumber = std::nullopt,
-                               ReleaseBufferCallback callback = nullptr);
+                               uint32_t producerId = 0, ReleaseBufferCallback callback = nullptr);
         std::shared_ptr<BufferData> getAndClearBuffer(const sp<SurfaceControl>& sc);
 
         /**
@@ -742,6 +744,8 @@
 
         static sp<IBinder> getDefaultApplyToken();
         static void setDefaultApplyToken(sp<IBinder> applyToken);
+
+        static status_t sendSurfaceFlushJankDataTransaction(const sp<SurfaceControl>& sc);
     };
 
     status_t clearLayerFrameStats(const sp<IBinder>& token) const;
@@ -876,10 +880,14 @@
             const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
                     surfaceControls,
             CallbackId::Type callbackType);
+    CallbackId addCallbackFunctionLocked(
+            const TransactionCompletedCallback& callbackFunction,
+            const std::unordered_set<sp<SurfaceControl>, SurfaceComposerClient::SCHash>&
+                    surfaceControls,
+            CallbackId::Type callbackType) REQUIRES(mMutex);
 
-    void addSurfaceControlToCallbacks(
-            const sp<SurfaceControl>& surfaceControl,
-            const std::unordered_set<CallbackId, CallbackIdHash>& callbackIds);
+    void addSurfaceControlToCallbacks(SurfaceComposerClient::CallbackInfo& callbackInfo,
+                                      const sp<SurfaceControl>& surfaceControl);
 
     void addQueueStallListener(std::function<void(const std::string&)> stallListener, void* id);
     void removeQueueStallListener(void *id);
@@ -921,7 +929,7 @@
     void onTrustedPresentationChanged(int id, bool presentedWithinThresholds) override;
 
 private:
-    ReleaseBufferCallback popReleaseBufferCallbackLocked(const ReleaseCallbackId&);
+    ReleaseBufferCallback popReleaseBufferCallbackLocked(const ReleaseCallbackId&) REQUIRES(mMutex);
     static sp<TransactionCompletedListener> sInstance;
 };
 
diff --git a/libs/input/Android.bp b/libs/input/Android.bp
index 83392ec..fd4fc16 100644
--- a/libs/input/Android.bp
+++ b/libs/input/Android.bp
@@ -73,11 +73,15 @@
         "liblog",
         "libPlatformProperties",
         "libvintf",
-        "libtflite",
+    ],
+
+    ldflags: [
+        "-Wl,--exclude-libs=libtflite_static.a",
     ],
 
     static_libs: [
         "libui-types",
+        "libtflite_static",
     ],
 
     export_static_lib_headers: [
diff --git a/libs/input/MotionPredictor.cpp b/libs/input/MotionPredictor.cpp
index 0f889e8..7d11ef2 100644
--- a/libs/input/MotionPredictor.cpp
+++ b/libs/input/MotionPredictor.cpp
@@ -65,9 +65,8 @@
 MotionPredictor::MotionPredictor(nsecs_t predictionTimestampOffsetNanos, const char* modelPath,
                                  std::function<bool()> checkMotionPredictionEnabled)
       : mPredictionTimestampOffsetNanos(predictionTimestampOffsetNanos),
-        mCheckMotionPredictionEnabled(std::move(checkMotionPredictionEnabled)),
-        mModel(TfLiteMotionPredictorModel::create(modelPath == nullptr ? DEFAULT_MODEL_PATH
-                                                                       : modelPath)) {}
+        mModelPath(modelPath == nullptr ? DEFAULT_MODEL_PATH : modelPath),
+        mCheckMotionPredictionEnabled(std::move(checkMotionPredictionEnabled)) {}
 
 void MotionPredictor::record(const MotionEvent& event) {
     if (!isPredictionAvailable(event.getDeviceId(), event.getSource())) {
@@ -76,6 +75,11 @@
         return;
     }
 
+    // Initialise the model now that it's likely to be used.
+    if (!mModel) {
+        mModel = TfLiteMotionPredictorModel::create(mModelPath.c_str());
+    }
+
     TfLiteMotionPredictorBuffers& buffers =
             mDeviceBuffers.try_emplace(event.getDeviceId(), mModel->inputLength()).first->second;
 
@@ -130,6 +134,7 @@
             continue;
         }
 
+        LOG_ALWAYS_FATAL_IF(!mModel);
         buffer.copyTo(*mModel);
         LOG_ALWAYS_FATAL_IF(!mModel->invoke());
 
diff --git a/libs/input/TfLiteMotionPredictor.cpp b/libs/input/TfLiteMotionPredictor.cpp
index 1a337ad..10510d6 100644
--- a/libs/input/TfLiteMotionPredictor.cpp
+++ b/libs/input/TfLiteMotionPredictor.cpp
@@ -17,27 +17,31 @@
 #define LOG_TAG "TfLiteMotionPredictor"
 #include <input/TfLiteMotionPredictor.h>
 
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
 #include <algorithm>
 #include <cmath>
 #include <cstddef>
 #include <cstdint>
-#include <fstream>
-#include <ios>
-#include <iterator>
 #include <memory>
 #include <span>
-#include <string>
 #include <type_traits>
 #include <utility>
 
+#include <android-base/logging.h>
+#include <android-base/mapped_file.h>
 #define ATRACE_TAG ATRACE_TAG_INPUT
 #include <cutils/trace.h>
 #include <log/log.h>
 
 #include "tensorflow/lite/core/api/error_reporter.h"
+#include "tensorflow/lite/core/api/op_resolver.h"
 #include "tensorflow/lite/interpreter.h"
-#include "tensorflow/lite/kernels/register.h"
+#include "tensorflow/lite/kernels/builtin_op_kernels.h"
 #include "tensorflow/lite/model.h"
+#include "tensorflow/lite/mutable_op_resolver.h"
 
 namespace android {
 namespace {
@@ -102,15 +106,24 @@
     LOG_ALWAYS_FATAL_IF(buffer.empty(), "No buffer for tensor '%s'", tensor->name);
 }
 
+std::unique_ptr<tflite::OpResolver> createOpResolver() {
+    auto resolver = std::make_unique<tflite::MutableOpResolver>();
+    resolver->AddBuiltin(::tflite::BuiltinOperator_CONCATENATION,
+                         ::tflite::ops::builtin::Register_CONCATENATION());
+    resolver->AddBuiltin(::tflite::BuiltinOperator_FULLY_CONNECTED,
+                         ::tflite::ops::builtin::Register_FULLY_CONNECTED());
+    return resolver;
+}
+
 } // namespace
 
-TfLiteMotionPredictorBuffers::TfLiteMotionPredictorBuffers(size_t inputLength) {
+TfLiteMotionPredictorBuffers::TfLiteMotionPredictorBuffers(size_t inputLength)
+      : mInputR(inputLength, 0),
+        mInputPhi(inputLength, 0),
+        mInputPressure(inputLength, 0),
+        mInputTilt(inputLength, 0),
+        mInputOrientation(inputLength, 0) {
     LOG_ALWAYS_FATAL_IF(inputLength == 0, "Buffer input size must be greater than 0");
-    mInputR.resize(inputLength);
-    mInputPhi.resize(inputLength);
-    mInputPressure.resize(inputLength);
-    mInputTilt.resize(inputLength);
-    mInputOrientation.resize(inputLength);
 }
 
 void TfLiteMotionPredictorBuffers::reset() {
@@ -186,42 +199,51 @@
     mAxisTo = sample;
 
     // Push the current sample onto the end of the input buffers.
-    mInputR.erase(mInputR.begin());
-    mInputPhi.erase(mInputPhi.begin());
-    mInputPressure.erase(mInputPressure.begin());
-    mInputTilt.erase(mInputTilt.begin());
-    mInputOrientation.erase(mInputOrientation.begin());
-
-    mInputR.push_back(r);
-    mInputPhi.push_back(phi);
-    mInputPressure.push_back(sample.pressure);
-    mInputTilt.push_back(sample.tilt);
-    mInputOrientation.push_back(orientation);
+    mInputR.pushBack(r);
+    mInputPhi.pushBack(phi);
+    mInputPressure.pushBack(sample.pressure);
+    mInputTilt.pushBack(sample.tilt);
+    mInputOrientation.pushBack(orientation);
 }
 
 std::unique_ptr<TfLiteMotionPredictorModel> TfLiteMotionPredictorModel::create(
         const char* modelPath) {
-    std::ifstream f(modelPath, std::ios::binary);
-    LOG_ALWAYS_FATAL_IF(!f, "Could not read model from %s", modelPath);
+    const int fd = open(modelPath, O_RDONLY);
+    if (fd == -1) {
+        PLOG(FATAL) << "Could not read model from " << modelPath;
+    }
 
-    std::string data;
-    data.assign(std::istreambuf_iterator<char>(f), std::istreambuf_iterator<char>());
+    const off_t fdSize = lseek(fd, 0, SEEK_END);
+    if (fdSize == -1) {
+        PLOG(FATAL) << "Failed to determine file size";
+    }
+
+    std::unique_ptr<android::base::MappedFile> modelBuffer =
+            android::base::MappedFile::FromFd(fd, /*offset=*/0, fdSize, PROT_READ);
+    if (!modelBuffer) {
+        PLOG(FATAL) << "Failed to mmap model";
+    }
+    if (close(fd) == -1) {
+        PLOG(FATAL) << "Failed to close model fd";
+    }
 
     return std::unique_ptr<TfLiteMotionPredictorModel>(
-            new TfLiteMotionPredictorModel(std::move(data)));
+            new TfLiteMotionPredictorModel(std::move(modelBuffer)));
 }
 
-TfLiteMotionPredictorModel::TfLiteMotionPredictorModel(std::string model)
+TfLiteMotionPredictorModel::TfLiteMotionPredictorModel(
+        std::unique_ptr<android::base::MappedFile> model)
       : mFlatBuffer(std::move(model)) {
+    CHECK(mFlatBuffer);
     mErrorReporter = std::make_unique<LoggingErrorReporter>();
-    mModel = tflite::FlatBufferModel::VerifyAndBuildFromBuffer(mFlatBuffer.data(),
-                                                               mFlatBuffer.length(),
+    mModel = tflite::FlatBufferModel::VerifyAndBuildFromBuffer(mFlatBuffer->data(),
+                                                               mFlatBuffer->size(),
                                                                /*extra_verifier=*/nullptr,
                                                                mErrorReporter.get());
     LOG_ALWAYS_FATAL_IF(!mModel);
 
-    tflite::ops::builtin::BuiltinOpResolver resolver;
-    tflite::InterpreterBuilder builder(*mModel, resolver);
+    auto resolver = createOpResolver();
+    tflite::InterpreterBuilder builder(*mModel, *resolver);
 
     if (builder(&mInterpreter) != kTfLiteOk || !mInterpreter) {
         LOG_ALWAYS_FATAL("Failed to build interpreter");
@@ -233,6 +255,8 @@
     allocateTensors();
 }
 
+TfLiteMotionPredictorModel::~TfLiteMotionPredictorModel() {}
+
 void TfLiteMotionPredictorModel::allocateTensors() {
     if (mRunner->AllocateTensors() != kTfLiteOk) {
         LOG_ALWAYS_FATAL("Failed to allocate tensors");
diff --git a/libs/input/tests/Android.bp b/libs/input/tests/Android.bp
index 916a8f2..37faf91 100644
--- a/libs/input/tests/Android.bp
+++ b/libs/input/tests/Android.bp
@@ -19,6 +19,7 @@
         "InputEvent_test.cpp",
         "InputPublisherAndConsumer_test.cpp",
         "MotionPredictor_test.cpp",
+        "RingBuffer_test.cpp",
         "TfLiteMotionPredictor_test.cpp",
         "TouchResampling_test.cpp",
         "TouchVideoFrame_test.cpp",
@@ -33,6 +34,7 @@
         "libgmock",
         "libgui_window_info_static",
         "libinput",
+        "libtflite_static",
         "libui-types",
     ],
     cflags: [
@@ -47,7 +49,6 @@
         "libcutils",
         "liblog",
         "libPlatformProperties",
-        "libtflite",
         "libutils",
         "libvintf",
     ],
diff --git a/libs/input/tests/RingBuffer_test.cpp b/libs/input/tests/RingBuffer_test.cpp
new file mode 100644
index 0000000..8a6ef4c
--- /dev/null
+++ b/libs/input/tests/RingBuffer_test.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <iterator>
+#include <memory>
+#include <vector>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <input/RingBuffer.h>
+
+namespace android {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::Not;
+using ::testing::SizeIs;
+
+TEST(RingBufferTest, PushPop) {
+    RingBuffer<int> buffer(/*capacity=*/3);
+
+    buffer.pushBack(1);
+    buffer.pushBack(2);
+    buffer.pushBack(3);
+    EXPECT_THAT(buffer, ElementsAre(1, 2, 3));
+
+    buffer.pushBack(4);
+    EXPECT_THAT(buffer, ElementsAre(2, 3, 4));
+
+    buffer.pushFront(1);
+    EXPECT_THAT(buffer, ElementsAre(1, 2, 3));
+
+    EXPECT_EQ(1, buffer.popFront());
+    EXPECT_THAT(buffer, ElementsAre(2, 3));
+
+    buffer.pushBack(4);
+    EXPECT_THAT(buffer, ElementsAre(2, 3, 4));
+
+    buffer.pushBack(5);
+    EXPECT_THAT(buffer, ElementsAre(3, 4, 5));
+
+    EXPECT_EQ(5, buffer.popBack());
+    EXPECT_THAT(buffer, ElementsAre(3, 4));
+
+    EXPECT_EQ(4, buffer.popBack());
+    EXPECT_THAT(buffer, ElementsAre(3));
+
+    EXPECT_EQ(3, buffer.popBack());
+    EXPECT_THAT(buffer, ElementsAre());
+
+    buffer.pushBack(1);
+    EXPECT_THAT(buffer, ElementsAre(1));
+
+    EXPECT_EQ(1, buffer.popFront());
+    EXPECT_THAT(buffer, ElementsAre());
+}
+
+TEST(RingBufferTest, ObjectType) {
+    RingBuffer<std::unique_ptr<int>> buffer(/*capacity=*/2);
+    buffer.pushBack(std::make_unique<int>(1));
+    buffer.pushBack(std::make_unique<int>(2));
+    buffer.pushBack(std::make_unique<int>(3));
+
+    EXPECT_EQ(2, *buffer[0]);
+    EXPECT_EQ(3, *buffer[1]);
+}
+
+TEST(RingBufferTest, ConstructConstantValue) {
+    RingBuffer<int> buffer(/*count=*/3, /*value=*/10);
+    EXPECT_THAT(buffer, ElementsAre(10, 10, 10));
+    EXPECT_EQ(3u, buffer.capacity());
+}
+
+TEST(RingBufferTest, Assignment) {
+    RingBuffer<int> a(/*capacity=*/2);
+    a.pushBack(1);
+    a.pushBack(2);
+
+    RingBuffer<int> b(/*capacity=*/3);
+    b.pushBack(10);
+    b.pushBack(20);
+    b.pushBack(30);
+
+    std::swap(a, b);
+    EXPECT_THAT(a, ElementsAre(10, 20, 30));
+    EXPECT_THAT(b, ElementsAre(1, 2));
+
+    a = b;
+    EXPECT_THAT(a, ElementsAreArray(b));
+
+    RingBuffer<int> c(b);
+    EXPECT_THAT(c, ElementsAreArray(b));
+
+    RingBuffer<int> d(std::move(b));
+    EXPECT_EQ(0u, b.capacity());
+    EXPECT_THAT(b, ElementsAre());
+    EXPECT_THAT(d, ElementsAre(1, 2));
+
+    b = std::move(d);
+    EXPECT_THAT(b, ElementsAre(1, 2));
+    EXPECT_THAT(d, ElementsAre());
+    EXPECT_EQ(0u, d.capacity());
+}
+
+TEST(RingBufferTest, Subscripting) {
+    RingBuffer<int> buffer(/*capacity=*/2);
+    buffer.pushBack(1);
+    EXPECT_EQ(1, buffer[0]);
+
+    buffer.pushFront(0);
+    EXPECT_EQ(0, buffer[0]);
+    EXPECT_EQ(1, buffer[1]);
+
+    buffer.pushFront(-1);
+    EXPECT_EQ(-1, buffer[0]);
+    EXPECT_EQ(0, buffer[1]);
+}
+
+TEST(RingBufferTest, Iterator) {
+    RingBuffer<int> buffer(/*capacity=*/3);
+    buffer.pushFront(2);
+    buffer.pushBack(3);
+
+    auto begin = buffer.begin();
+    auto end = buffer.end();
+
+    EXPECT_NE(begin, end);
+    EXPECT_LE(begin, end);
+    EXPECT_GT(end, begin);
+    EXPECT_EQ(end, begin + 2);
+    EXPECT_EQ(begin, end - 2);
+
+    EXPECT_EQ(2, end - begin);
+    EXPECT_EQ(1, end - (begin + 1));
+
+    EXPECT_EQ(2, *begin);
+    ++begin;
+    EXPECT_EQ(3, *begin);
+    --begin;
+    EXPECT_EQ(2, *begin);
+    begin += 1;
+    EXPECT_EQ(3, *begin);
+    begin += -1;
+    EXPECT_EQ(2, *begin);
+    begin -= -1;
+    EXPECT_EQ(3, *begin);
+}
+
+TEST(RingBufferTest, Clear) {
+    RingBuffer<int> buffer(/*capacity=*/2);
+    EXPECT_THAT(buffer, ElementsAre());
+
+    buffer.pushBack(1);
+    EXPECT_THAT(buffer, ElementsAre(1));
+
+    buffer.clear();
+    EXPECT_THAT(buffer, ElementsAre());
+    EXPECT_THAT(buffer, SizeIs(0));
+    EXPECT_THAT(buffer, IsEmpty());
+
+    buffer.pushFront(1);
+    EXPECT_THAT(buffer, ElementsAre(1));
+}
+
+TEST(RingBufferTest, SizeAndIsEmpty) {
+    RingBuffer<int> buffer(/*capacity=*/2);
+    EXPECT_THAT(buffer, SizeIs(0));
+    EXPECT_THAT(buffer, IsEmpty());
+
+    buffer.pushBack(1);
+    EXPECT_THAT(buffer, SizeIs(1));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.pushBack(2);
+    EXPECT_THAT(buffer, SizeIs(2));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.pushBack(3);
+    EXPECT_THAT(buffer, SizeIs(2));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.popFront();
+    EXPECT_THAT(buffer, SizeIs(1));
+    EXPECT_THAT(buffer, Not(IsEmpty()));
+
+    buffer.popBack();
+    EXPECT_THAT(buffer, SizeIs(0));
+    EXPECT_THAT(buffer, IsEmpty());
+}
+
+} // namespace
+} // namespace android
diff --git a/libs/jpegrecoverymap/Android.bp b/libs/jpegrecoverymap/Android.bp
index 2c4b3bf..78d1912 100644
--- a/libs/jpegrecoverymap/Android.bp
+++ b/libs/jpegrecoverymap/Android.bp
@@ -29,9 +29,9 @@
     local_include_dirs: ["include"],
 
     srcs: [
-        "recoverymap.cpp",
+        "jpegr.cpp",
         "recoverymapmath.cpp",
-        "recoverymaputils.cpp",
+        "jpegrutils.cpp",
     ],
 
     shared_libs: [
@@ -57,7 +57,7 @@
     export_include_dirs: ["include"],
 
     srcs: [
-        "jpegencoder.cpp",
+        "jpegencoderhelper.cpp",
     ],
 }
 
@@ -73,6 +73,6 @@
     export_include_dirs: ["include"],
 
     srcs: [
-        "jpegdecoder.cpp",
+        "jpegdecoderhelper.cpp",
     ],
 }
diff --git a/libs/jpegrecoverymap/include/jpegrecoverymap/jpegdecoder.h b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegdecoderhelper.h
similarity index 92%
rename from libs/jpegrecoverymap/include/jpegrecoverymap/jpegdecoder.h
rename to libs/jpegrecoverymap/include/jpegrecoverymap/jpegdecoderhelper.h
index 419b63d..8748237 100644
--- a/libs/jpegrecoverymap/include/jpegrecoverymap/jpegdecoder.h
+++ b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegdecoderhelper.h
@@ -1,4 +1,3 @@
-
 /*
  * Copyright 2022 The Android Open Source Project
  *
@@ -15,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_JPEGRECOVERYMAP_JPEGDECODER_H
-#define ANDROID_JPEGRECOVERYMAP_JPEGDECODER_H
+#ifndef ANDROID_JPEGRECOVERYMAP_JPEGDECODERHELPER_H
+#define ANDROID_JPEGRECOVERYMAP_JPEGDECODERHELPER_H
 
 // We must include cstdio before jpeglib.h. It is a requirement of libjpeg.
 #include <cstdio>
@@ -26,15 +25,15 @@
 }
 #include <utils/Errors.h>
 #include <vector>
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 /*
  * Encapsulates a converter from JPEG to raw image (YUV420planer or grey-scale) format.
  * This class is not thread-safe.
  */
-class JpegDecoder {
+class JpegDecoderHelper {
 public:
-    JpegDecoder();
-    ~JpegDecoder();
+    JpegDecoderHelper();
+    ~JpegDecoderHelper();
     /*
      * Decompresses JPEG image to raw image (YUV420planer, grey-scale or RGBA) format. After
      * calling this method, call getDecompressedImage() to get the image.
@@ -116,6 +115,6 @@
     // Position of EXIF package, default value is -1 which means no EXIF package appears.
     size_t mExifPos;
 };
-} /* namespace android  */
+} /* namespace android::jpegrecoverymap  */
 
-#endif // ANDROID_JPEGRECOVERYMAP_JPEGDECODER_H
+#endif // ANDROID_JPEGRECOVERYMAP_JPEGDECODERHELPER_H
diff --git a/libs/jpegrecoverymap/include/jpegrecoverymap/jpegencoder.h b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegencoderhelper.h
similarity index 91%
rename from libs/jpegrecoverymap/include/jpegrecoverymap/jpegencoder.h
rename to libs/jpegrecoverymap/include/jpegrecoverymap/jpegencoderhelper.h
index 61aeb8a..8b82b2b 100644
--- a/libs/jpegrecoverymap/include/jpegrecoverymap/jpegencoder.h
+++ b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegencoderhelper.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_JPEGRECOVERYMAP_JPEGENCODER_H
-#define ANDROID_JPEGRECOVERYMAP_JPEGENCODER_H
+#ifndef ANDROID_JPEGRECOVERYMAP_JPEGENCODERHELPER_H
+#define ANDROID_JPEGRECOVERYMAP_JPEGENCODERHELPER_H
 
 // We must include cstdio before jpeglib.h. It is a requirement of libjpeg.
 #include <cstdio>
@@ -28,16 +28,16 @@
 #include <utils/Errors.h>
 #include <vector>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 /*
  * Encapsulates a converter from raw image (YUV420planer or grey-scale) to JPEG format.
  * This class is not thread-safe.
  */
-class JpegEncoder {
+class JpegEncoderHelper {
 public:
-    JpegEncoder();
-    ~JpegEncoder();
+    JpegEncoderHelper();
+    ~JpegEncoderHelper();
 
     /*
      * Compresses YUV420Planer image to JPEG format. After calling this method, call
@@ -90,6 +90,6 @@
     std::vector<JOCTET> mResultBuffer;
 };
 
-} /* namespace android  */
+} /* namespace android::jpegrecoverymap  */
 
-#endif // ANDROID_JPEGRECOVERYMAP_JPEGENCODER_H
+#endif // ANDROID_JPEGRECOVERYMAP_JPEGENCODERHELPER_H
diff --git a/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymap.h b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegr.h
similarity index 97%
rename from libs/jpegrecoverymap/include/jpegrecoverymap/recoverymap.h
rename to libs/jpegrecoverymap/include/jpegrecoverymap/jpegr.h
index aee6602..5455ba6 100644
--- a/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymap.h
+++ b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegr.h
@@ -14,12 +14,12 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_JPEGRECOVERYMAP_RECOVERYMAP_H
-#define ANDROID_JPEGRECOVERYMAP_RECOVERYMAP_H
+#ifndef ANDROID_JPEGRECOVERYMAP_JPEGR_H
+#define ANDROID_JPEGRECOVERYMAP_JPEGR_H
 
 #include "jpegrerrorcode.h"
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 // Color gamuts for image data
 typedef enum {
@@ -88,6 +88,8 @@
   uint32_t version;
   // Max Content Boost for the map
   float maxContentBoost;
+  // Min Content Boost for the map
+  float minContentBoost;
 };
 
 typedef struct jpegr_uncompressed_struct* jr_uncompressed_ptr;
@@ -96,7 +98,7 @@
 typedef struct jpegr_metadata* jr_metadata_ptr;
 typedef struct jpegr_info_struct* jr_info_ptr;
 
-class RecoveryMap {
+class JpegR {
 public:
     /*
      * Encode API-0
@@ -219,17 +221,7 @@
     */
     status_t getJPEGRInfo(jr_compressed_ptr compressed_jpegr_image,
                           jr_info_ptr jpegr_info);
-private:
-    /*
-     * This method is called in the encoding pipeline. It will encode the recovery map.
-     *
-     * @param uncompressed_recovery_map uncompressed recovery map
-     * @param dest encoded recover map
-     * @return NO_ERROR if encoding succeeds, error code if error occurs.
-     */
-    status_t compressRecoveryMap(jr_uncompressed_ptr uncompressed_recovery_map,
-                               jr_compressed_ptr dest);
-
+protected:
     /*
      * This method is called in the encoding pipeline. It will take the uncompressed 8-bit and
      * 10-bit yuv images as input, and calculate the uncompressed recovery map. The input images
@@ -265,6 +257,17 @@
                               jr_metadata_ptr metadata,
                               jr_uncompressed_ptr dest);
 
+private:
+    /*
+     * This method is called in the encoding pipeline. It will encode the recovery map.
+     *
+     * @param uncompressed_recovery_map uncompressed recovery map
+     * @param dest encoded recover map
+     * @return NO_ERROR if encoding succeeds, error code if error occurs.
+     */
+    status_t compressRecoveryMap(jr_uncompressed_ptr uncompressed_recovery_map,
+                               jr_compressed_ptr dest);
+
     /*
      * This methoud is called to separate primary image and recovery map image from JPEGR
      *
@@ -320,6 +323,6 @@
                      jr_uncompressed_ptr dest);
 };
 
-} // namespace android::recoverymap
+} // namespace android::jpegrecoverymap
 
-#endif // ANDROID_JPEGRECOVERYMAP_RECOVERYMAP_H
+#endif // ANDROID_JPEGRECOVERYMAP_JPEGR_H
diff --git a/libs/jpegrecoverymap/include/jpegrecoverymap/jpegrerrorcode.h b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegrerrorcode.h
index 699c0d3..f730343 100644
--- a/libs/jpegrecoverymap/include/jpegrecoverymap/jpegrerrorcode.h
+++ b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegrerrorcode.h
@@ -16,7 +16,7 @@
 
 #include <utils/Errors.h>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 enum {
     // status_t map for errors in the media framework
@@ -48,4 +48,4 @@
     ERROR_JPEGR_TONEMAP_ERROR           = JPEGR_RUNTIME_ERROR_BASE - 5,
 };
 
-}  // namespace android::recoverymap
+}  // namespace android::jpegrecoverymap
diff --git a/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymaputils.h b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegrutils.h
similarity index 91%
rename from libs/jpegrecoverymap/include/jpegrecoverymap/recoverymaputils.h
rename to libs/jpegrecoverymap/include/jpegrecoverymap/jpegrutils.h
index de29a33..3a0f67d 100644
--- a/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymaputils.h
+++ b/libs/jpegrecoverymap/include/jpegrecoverymap/jpegrutils.h
@@ -14,17 +14,17 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_JPEGRECOVERYMAP_RECOVERYMAPUTILS_H
-#define ANDROID_JPEGRECOVERYMAP_RECOVERYMAPUTILS_H
+#ifndef ANDROID_JPEGRECOVERYMAP_JPEGRUTILS_H
+#define ANDROID_JPEGRECOVERYMAP_JPEGRUTILS_H
 
-#include <jpegrecoverymap/recoverymap.h>
+#include <jpegrecoverymap/jpegr.h>
 
 #include <sstream>
 #include <stdint.h>
 #include <string>
 #include <cstdio>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 struct jpegr_metadata;
 
@@ -92,6 +92,6 @@
  * @return XMP metadata in type of string
  */
 std::string generateXmp(int secondary_image_length, jpegr_metadata& metadata);
-}
+}  // namespace android::jpegrecoverymap
 
-#endif //ANDROID_JPEGRECOVERYMAP_RECOVERYMAPUTILS_H
+#endif //ANDROID_JPEGRECOVERYMAP_JPEGRUTILS_H
diff --git a/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymapmath.h b/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymapmath.h
index 0695bb7..c12cee9 100644
--- a/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymapmath.h
+++ b/libs/jpegrecoverymap/include/jpegrecoverymap/recoverymapmath.h
@@ -20,9 +20,9 @@
 #include <cmath>
 #include <stdint.h>
 
-#include <jpegrecoverymap/recoverymap.h>
+#include <jpegrecoverymap/jpegr.h>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 #define CLIP3(x, min, max) ((x) < (min)) ? (min) : ((x) > (max)) ? (max) : (x)
 
@@ -118,11 +118,12 @@
 constexpr size_t kRecoveryFactorPrecision = 10;
 constexpr size_t kRecoveryFactorNumEntries = 1 << kRecoveryFactorPrecision;
 struct RecoveryLUT {
-  RecoveryLUT(float hdrRatio) {
-    float increment = 2.0 / kRecoveryFactorNumEntries;
-    float value = -1.0f;
-    for (int idx = 0; idx < kRecoveryFactorNumEntries; idx++, value += increment) {
-      mRecoveryTable[idx] = pow(hdrRatio, value);
+  RecoveryLUT(jr_metadata_ptr metadata) {
+    for (int idx = 0; idx < kRecoveryFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kRecoveryFactorNumEntries - 1);
+      float logBoost = log2(metadata->minContentBoost) * (1.0f - value)
+                     + log2(metadata->maxContentBoost) * value;
+      mRecoveryTable[idx] = exp2(logBoost);
     }
   }
 
@@ -130,10 +131,10 @@
   }
 
   float getRecoveryFactor(float recovery) {
-    uint32_t value = static_cast<uint32_t>(((recovery + 1.0f) / 2.0f) * kRecoveryFactorNumEntries);
+    uint32_t idx = static_cast<uint32_t>(recovery * (kRecoveryFactorNumEntries - 1));
     //TODO() : Remove once conversion modules have appropriate clamping in place
-    value = CLIP3(value, 0, kRecoveryFactorNumEntries - 1);
-    return mRecoveryTable[value];
+    idx = CLIP3(idx, 0, kRecoveryFactorNumEntries - 1);
+    return mRecoveryTable[idx];
   }
 
 private:
@@ -219,6 +220,9 @@
 float srgbInvOetfLUT(float e_gamma);
 Color srgbInvOetfLUT(Color e_gamma);
 
+constexpr size_t kSrgbInvOETFPrecision = 10;
+constexpr size_t kSrgbInvOETFNumEntries = 1 << kSrgbInvOETFPrecision;
+
 ////////////////////////////////////////////////////////////////////////////////
 // Display-P3 transformations
 
@@ -260,6 +264,9 @@
 float hlgOetfLUT(float e);
 Color hlgOetfLUT(Color e);
 
+constexpr size_t kHlgOETFPrecision = 10;
+constexpr size_t kHlgOETFNumEntries = 1 << kHlgOETFPrecision;
+
 /*
  * Convert from HLG to scene luminance.
  *
@@ -270,6 +277,9 @@
 float hlgInvOetfLUT(float e_gamma);
 Color hlgInvOetfLUT(Color e_gamma);
 
+constexpr size_t kHlgInvOETFPrecision = 10;
+constexpr size_t kHlgInvOETFNumEntries = 1 << kHlgInvOETFPrecision;
+
 /*
  * Convert from scene luminance to PQ.
  *
@@ -280,6 +290,9 @@
 float pqOetfLUT(float e);
 Color pqOetfLUT(Color e);
 
+constexpr size_t kPqOETFPrecision = 10;
+constexpr size_t kPqOETFNumEntries = 1 << kPqOETFPrecision;
+
 /*
  * Convert from PQ to scene luminance in nits.
  *
@@ -290,6 +303,9 @@
 float pqInvOetfLUT(float e_gamma);
 Color pqInvOetfLUT(Color e_gamma);
 
+constexpr size_t kPqInvOETFPrecision = 10;
+constexpr size_t kPqInvOETFNumEntries = 1 << kPqInvOETFPrecision;
+
 
 ////////////////////////////////////////////////////////////////////////////////
 // Color space conversions
@@ -326,13 +342,13 @@
  * Calculate the 8-bit unsigned integer recovery value for the given SDR and HDR
  * luminances in linear space, and the hdr ratio to encode against.
  */
-uint8_t encodeRecovery(float y_sdr, float y_hdr, float hdr_ratio);
+uint8_t encodeRecovery(float y_sdr, float y_hdr, jr_metadata_ptr metadata);
 
 /*
  * Calculates the linear luminance in nits after applying the given recovery
  * value, with the given hdr ratio, to the given sdr input in the range [0, 1].
  */
-Color applyRecovery(Color e, float recovery, float hdr_ratio);
+Color applyRecovery(Color e, float recovery, jr_metadata_ptr metadata);
 Color applyRecoveryLUT(Color e, float recovery, RecoveryLUT& recoveryLUT);
 
 /*
@@ -376,6 +392,6 @@
  */
 uint32_t colorToRgba1010102(Color e_gamma);
 
-} // namespace android::recoverymap
+} // namespace android::jpegrecoverymap
 
 #endif // ANDROID_JPEGRECOVERYMAP_RECOVERYMAPMATH_H
diff --git a/libs/jpegrecoverymap/jpegdecoder.cpp b/libs/jpegrecoverymap/jpegdecoderhelper.cpp
similarity index 91%
rename from libs/jpegrecoverymap/jpegdecoder.cpp
rename to libs/jpegrecoverymap/jpegdecoderhelper.cpp
index 1bf609a..d36bbf8 100644
--- a/libs/jpegrecoverymap/jpegdecoder.cpp
+++ b/libs/jpegrecoverymap/jpegdecoderhelper.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include <jpegrecoverymap/jpegdecoder.h>
+#include <jpegrecoverymap/jpegdecoderhelper.h>
 
 #include <utils/Log.h>
 
@@ -24,7 +24,7 @@
 
 using namespace std;
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 const uint32_t kAPP0Marker = JPEG_APP0;      // JFIF
 const uint32_t kAPP1Marker = JPEG_APP0 + 1;  // EXIF, XMP
@@ -90,14 +90,14 @@
     longjmp(err->setjmp_buffer, 1);
 }
 
-JpegDecoder::JpegDecoder() {
+JpegDecoderHelper::JpegDecoderHelper() {
   mExifPos = 0;
 }
 
-JpegDecoder::~JpegDecoder() {
+JpegDecoderHelper::~JpegDecoderHelper() {
 }
 
-bool JpegDecoder::decompressImage(const void* image, int length, bool decodeToRGBA) {
+bool JpegDecoderHelper::decompressImage(const void* image, int length, bool decodeToRGBA) {
     if (image == nullptr || length <= 0) {
         ALOGE("Image size can not be handled: %d", length);
         return false;
@@ -112,39 +112,39 @@
     return true;
 }
 
-void* JpegDecoder::getDecompressedImagePtr() {
+void* JpegDecoderHelper::getDecompressedImagePtr() {
     return mResultBuffer.data();
 }
 
-size_t JpegDecoder::getDecompressedImageSize() {
+size_t JpegDecoderHelper::getDecompressedImageSize() {
     return mResultBuffer.size();
 }
 
-void* JpegDecoder::getXMPPtr() {
+void* JpegDecoderHelper::getXMPPtr() {
     return mXMPBuffer.data();
 }
 
-size_t JpegDecoder::getXMPSize() {
+size_t JpegDecoderHelper::getXMPSize() {
     return mXMPBuffer.size();
 }
 
-void* JpegDecoder::getEXIFPtr() {
+void* JpegDecoderHelper::getEXIFPtr() {
     return mEXIFBuffer.data();
 }
 
-size_t JpegDecoder::getEXIFSize() {
+size_t JpegDecoderHelper::getEXIFSize() {
     return mEXIFBuffer.size();
 }
 
-size_t JpegDecoder::getDecompressedImageWidth() {
+size_t JpegDecoderHelper::getDecompressedImageWidth() {
     return mWidth;
 }
 
-size_t JpegDecoder::getDecompressedImageHeight() {
+size_t JpegDecoderHelper::getDecompressedImageHeight() {
     return mHeight;
 }
 
-bool JpegDecoder::decode(const void* image, int length, bool decodeToRGBA) {
+bool JpegDecoderHelper::decode(const void* image, int length, bool decodeToRGBA) {
     jpeg_decompress_struct cinfo;
     jpegr_source_mgr mgr(static_cast<const uint8_t*>(image), length);
     jpegrerror_mgr myerr;
@@ -248,7 +248,7 @@
     return true;
 }
 
-bool JpegDecoder::decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest,
+bool JpegDecoderHelper::decompress(jpeg_decompress_struct* cinfo, const uint8_t* dest,
         bool isSingleChannel) {
     if (isSingleChannel) {
         return decompressSingleChannel(cinfo, dest);
@@ -259,7 +259,7 @@
         return decompressYUV(cinfo, dest);
 }
 
-bool JpegDecoder::getCompressedImageParameters(const void* image, int length,
+bool JpegDecoderHelper::getCompressedImageParameters(const void* image, int length,
                               size_t *pWidth, size_t *pHeight,
                               std::vector<uint8_t> *iccData , std::vector<uint8_t> *exifData) {
     jpeg_decompress_struct cinfo;
@@ -326,7 +326,7 @@
     return true;
 }
 
-bool JpegDecoder::decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
+bool JpegDecoderHelper::decompressRGBA(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
     JSAMPLE* decodeDst = (JSAMPLE*) dest;
     uint32_t lines = 0;
     // TODO: use batches for more effectiveness
@@ -341,7 +341,7 @@
     return lines == cinfo->image_height;
 }
 
-bool JpegDecoder::decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
+bool JpegDecoderHelper::decompressYUV(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
 
     JSAMPROW y[kCompressBatchSize];
     JSAMPROW cb[kCompressBatchSize / 2];
@@ -386,7 +386,7 @@
     return true;
 }
 
-bool JpegDecoder::decompressSingleChannel(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
+bool JpegDecoderHelper::decompressSingleChannel(jpeg_decompress_struct* cinfo, const uint8_t* dest) {
     JSAMPROW y[kCompressBatchSize];
     JSAMPARRAY planes[1] {y};
 
@@ -413,4 +413,4 @@
     return true;
 }
 
-} // namespace android
+} // namespace jpegrecoverymap
diff --git a/libs/jpegrecoverymap/jpegencoder.cpp b/libs/jpegrecoverymap/jpegencoderhelper.cpp
similarity index 86%
rename from libs/jpegrecoverymap/jpegencoder.cpp
rename to libs/jpegrecoverymap/jpegencoderhelper.cpp
index 627dcdf..586cd34 100644
--- a/libs/jpegrecoverymap/jpegencoder.cpp
+++ b/libs/jpegrecoverymap/jpegencoderhelper.cpp
@@ -14,28 +14,28 @@
  * limitations under the License.
  */
 
-#include <jpegrecoverymap/jpegencoder.h>
+#include <jpegrecoverymap/jpegencoderhelper.h>
 
 #include <utils/Log.h>
 
 #include <errno.h>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
-// The destination manager that can access |mResultBuffer| in JpegEncoder.
+// The destination manager that can access |mResultBuffer| in JpegEncoderHelper.
 struct destination_mgr {
 public:
     struct jpeg_destination_mgr mgr;
-    JpegEncoder* encoder;
+    JpegEncoderHelper* encoder;
 };
 
-JpegEncoder::JpegEncoder() {
+JpegEncoderHelper::JpegEncoderHelper() {
 }
 
-JpegEncoder::~JpegEncoder() {
+JpegEncoderHelper::~JpegEncoderHelper() {
 }
 
-bool JpegEncoder::compressImage(const void* image, int width, int height, int quality,
+bool JpegEncoderHelper::compressImage(const void* image, int width, int height, int quality,
                                    const void* iccBuffer, unsigned int iccSize,
                                    bool isSingleChannel) {
     if (width % 8 != 0 || height % 2 != 0) {
@@ -52,15 +52,15 @@
     return true;
 }
 
-void* JpegEncoder::getCompressedImagePtr() {
+void* JpegEncoderHelper::getCompressedImagePtr() {
     return mResultBuffer.data();
 }
 
-size_t JpegEncoder::getCompressedImageSize() {
+size_t JpegEncoderHelper::getCompressedImageSize() {
     return mResultBuffer.size();
 }
 
-void JpegEncoder::initDestination(j_compress_ptr cinfo) {
+void JpegEncoderHelper::initDestination(j_compress_ptr cinfo) {
     destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
     std::vector<JOCTET>& buffer = dest->encoder->mResultBuffer;
     buffer.resize(kBlockSize);
@@ -68,7 +68,7 @@
     dest->mgr.free_in_buffer = buffer.size();
 }
 
-boolean JpegEncoder::emptyOutputBuffer(j_compress_ptr cinfo) {
+boolean JpegEncoderHelper::emptyOutputBuffer(j_compress_ptr cinfo) {
     destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
     std::vector<JOCTET>& buffer = dest->encoder->mResultBuffer;
     size_t oldsize = buffer.size();
@@ -78,13 +78,13 @@
     return true;
 }
 
-void JpegEncoder::terminateDestination(j_compress_ptr cinfo) {
+void JpegEncoderHelper::terminateDestination(j_compress_ptr cinfo) {
     destination_mgr* dest = reinterpret_cast<destination_mgr*>(cinfo->dest);
     std::vector<JOCTET>& buffer = dest->encoder->mResultBuffer;
     buffer.resize(buffer.size() - dest->mgr.free_in_buffer);
 }
 
-void JpegEncoder::outputErrorMessage(j_common_ptr cinfo) {
+void JpegEncoderHelper::outputErrorMessage(j_common_ptr cinfo) {
     char buffer[JMSG_LENGTH_MAX];
 
     /* Create the message */
@@ -92,7 +92,7 @@
     ALOGE("%s\n", buffer);
 }
 
-bool JpegEncoder::encode(const void* image, int width, int height, int jpegQuality,
+bool JpegEncoderHelper::encode(const void* image, int width, int height, int jpegQuality,
                          const void* iccBuffer, unsigned int iccSize, bool isSingleChannel) {
     jpeg_compress_struct cinfo;
     jpeg_error_mgr jerr;
@@ -118,7 +118,7 @@
     return true;
 }
 
-void JpegEncoder::setJpegDestination(jpeg_compress_struct* cinfo) {
+void JpegEncoderHelper::setJpegDestination(jpeg_compress_struct* cinfo) {
     destination_mgr* dest = static_cast<struct destination_mgr *>((*cinfo->mem->alloc_small) (
             (j_common_ptr) cinfo, JPOOL_PERMANENT, sizeof(destination_mgr)));
     dest->encoder = this;
@@ -128,7 +128,7 @@
     cinfo->dest = reinterpret_cast<struct jpeg_destination_mgr*>(dest);
 }
 
-void JpegEncoder::setJpegCompressStruct(int width, int height, int quality,
+void JpegEncoderHelper::setJpegCompressStruct(int width, int height, int quality,
                                         jpeg_compress_struct* cinfo, bool isSingleChannel) {
     cinfo->image_width = width;
     cinfo->image_height = height;
@@ -158,7 +158,7 @@
     }
 }
 
-bool JpegEncoder::compress(
+bool JpegEncoderHelper::compress(
         jpeg_compress_struct* cinfo, const uint8_t* image, bool isSingleChannel) {
     if (isSingleChannel) {
         return compressSingleChannel(cinfo, image);
@@ -166,7 +166,7 @@
     return compressYuv(cinfo, image);
 }
 
-bool JpegEncoder::compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yuv) {
+bool JpegEncoderHelper::compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yuv) {
     JSAMPROW y[kCompressBatchSize];
     JSAMPROW cb[kCompressBatchSize / 2];
     JSAMPROW cr[kCompressBatchSize / 2];
@@ -210,7 +210,7 @@
     return true;
 }
 
-bool JpegEncoder::compressSingleChannel(jpeg_compress_struct* cinfo, const uint8_t* image) {
+bool JpegEncoderHelper::compressSingleChannel(jpeg_compress_struct* cinfo, const uint8_t* image) {
     JSAMPROW y[kCompressBatchSize];
     JSAMPARRAY planes[1] {y};
 
@@ -236,4 +236,4 @@
     return true;
 }
 
-} // namespace android
+} // namespace jpegrecoverymap
diff --git a/libs/jpegrecoverymap/recoverymap.cpp b/libs/jpegrecoverymap/jpegr.cpp
similarity index 91%
rename from libs/jpegrecoverymap/recoverymap.cpp
rename to libs/jpegrecoverymap/jpegr.cpp
index 8b8c2e7..bd8874e 100644
--- a/libs/jpegrecoverymap/recoverymap.cpp
+++ b/libs/jpegrecoverymap/jpegr.cpp
@@ -14,11 +14,11 @@
  * limitations under the License.
  */
 
-#include <jpegrecoverymap/recoverymap.h>
-#include <jpegrecoverymap/jpegencoder.h>
-#include <jpegrecoverymap/jpegdecoder.h>
+#include <jpegrecoverymap/jpegr.h>
+#include <jpegrecoverymap/jpegencoderhelper.h>
+#include <jpegrecoverymap/jpegdecoderhelper.h>
 #include <jpegrecoverymap/recoverymapmath.h>
-#include <jpegrecoverymap/recoverymaputils.h>
+#include <jpegrecoverymap/jpegrutils.h>
 
 #include <image_io/jpeg/jpeg_marker.h>
 #include <image_io/jpeg/jpeg_info.h>
@@ -43,7 +43,7 @@
 using namespace std;
 using namespace photos_editing_formats::image_io;
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 #define USE_SRGB_INVOETF_LUT 1
 #define USE_HLG_OETF_LUT 1
@@ -86,14 +86,15 @@
   return cpuCoreCount;
 }
 
-static const map<recoverymap::jpegr_color_gamut, skcms_Matrix3x3> jrGamut_to_skGamut {
+static const map<jpegrecoverymap::jpegr_color_gamut, skcms_Matrix3x3> jrGamut_to_skGamut {
     {JPEGR_COLORGAMUT_BT709,     SkNamedGamut::kSRGB},
     {JPEGR_COLORGAMUT_P3,        SkNamedGamut::kDisplayP3},
     {JPEGR_COLORGAMUT_BT2100,    SkNamedGamut::kRec2020},
 };
 
 static const map<
-        recoverymap::jpegr_transfer_function, skcms_TransferFunction> jrTransFunc_to_skTransFunc {
+        jpegrecoverymap::jpegr_transfer_function,
+        skcms_TransferFunction> jrTransFunc_to_skTransFunc {
     {JPEGR_TF_SRGB,        SkNamedTransferFn::kSRGB},
     {JPEGR_TF_LINEAR,      SkNamedTransferFn::kLinear},
     {JPEGR_TF_HLG,         SkNamedTransferFn::kHLG},
@@ -101,7 +102,7 @@
 };
 
 /* Encode API-0 */
-status_t RecoveryMap::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
                                   jpegr_transfer_function hdr_tf,
                                   jr_compressed_ptr dest,
                                   int quality,
@@ -146,7 +147,7 @@
           jrTransFunc_to_skTransFunc.at(JPEGR_TF_SRGB),
           jrGamut_to_skGamut.at(uncompressed_yuv_420_image.colorGamut));
 
-  JpegEncoder jpeg_encoder;
+  JpegEncoderHelper jpeg_encoder;
   if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image.data,
                                   uncompressed_yuv_420_image.width,
                                   uncompressed_yuv_420_image.height, quality,
@@ -163,7 +164,7 @@
 }
 
 /* Encode API-1 */
-status_t RecoveryMap::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
                                   jr_uncompressed_ptr uncompressed_yuv_420_image,
                                   jpegr_transfer_function hdr_tf,
                                   jr_compressed_ptr dest,
@@ -210,7 +211,7 @@
           jrTransFunc_to_skTransFunc.at(JPEGR_TF_SRGB),
           jrGamut_to_skGamut.at(uncompressed_yuv_420_image->colorGamut));
 
-  JpegEncoder jpeg_encoder;
+  JpegEncoderHelper jpeg_encoder;
   if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image->data,
                                   uncompressed_yuv_420_image->width,
                                   uncompressed_yuv_420_image->height, quality,
@@ -227,7 +228,7 @@
 }
 
 /* Encode API-2 */
-status_t RecoveryMap::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
                                   jr_uncompressed_ptr uncompressed_yuv_420_image,
                                   jr_compressed_ptr compressed_jpeg_image,
                                   jpegr_transfer_function hdr_tf,
@@ -272,7 +273,7 @@
 }
 
 /* Encode API-3 */
-status_t RecoveryMap::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
                                   jr_compressed_ptr compressed_jpeg_image,
                                   jpegr_transfer_function hdr_tf,
                                   jr_compressed_ptr dest) {
@@ -289,7 +290,7 @@
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
 
-  JpegDecoder jpeg_decoder;
+  JpegDecoderHelper jpeg_decoder;
   if (!jpeg_decoder.decompressImage(compressed_jpeg_image->data, compressed_jpeg_image->length)) {
     return ERROR_JPEGR_DECODE_ERROR;
   }
@@ -324,7 +325,7 @@
   return NO_ERROR;
 }
 
-status_t RecoveryMap::getJPEGRInfo(jr_compressed_ptr compressed_jpegr_image,
+status_t JpegR::getJPEGRInfo(jr_compressed_ptr compressed_jpegr_image,
                                    jr_info_ptr jpegr_info) {
   if (compressed_jpegr_image == nullptr || jpegr_info == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
@@ -334,7 +335,7 @@
   JPEGR_CHECK(extractPrimaryImageAndRecoveryMap(compressed_jpegr_image,
                                                 &primary_image, &recovery_map));
 
-  JpegDecoder jpeg_decoder;
+  JpegDecoderHelper jpeg_decoder;
   if (!jpeg_decoder.getCompressedImageParameters(primary_image.data, primary_image.length,
                                                  &jpegr_info->width, &jpegr_info->height,
                                                  jpegr_info->iccData, jpegr_info->exifData)) {
@@ -345,7 +346,7 @@
 }
 
 /* Decode API */
-status_t RecoveryMap::decodeJPEGR(jr_compressed_ptr compressed_jpegr_image,
+status_t JpegR::decodeJPEGR(jr_compressed_ptr compressed_jpegr_image,
                                   jr_uncompressed_ptr dest,
                                   jr_exif_ptr exif,
                                   bool request_sdr) {
@@ -356,7 +357,7 @@
   (void) exif;
 
   if (request_sdr) {
-    JpegDecoder jpeg_decoder;
+    JpegDecoderHelper jpeg_decoder;
     if (!jpeg_decoder.decompressImage(compressed_jpegr_image->data, compressed_jpegr_image->length,
                                       true)) {
         return ERROR_JPEGR_DECODE_ERROR;
@@ -376,12 +377,12 @@
   jpegr_metadata metadata;
   JPEGR_CHECK(extractRecoveryMap(compressed_jpegr_image, &compressed_map));
 
-  JpegDecoder jpeg_decoder;
+  JpegDecoderHelper jpeg_decoder;
   if (!jpeg_decoder.decompressImage(compressed_jpegr_image->data, compressed_jpegr_image->length)) {
     return ERROR_JPEGR_DECODE_ERROR;
   }
 
-  JpegDecoder recovery_map_decoder;
+  JpegDecoderHelper recovery_map_decoder;
   if (!recovery_map_decoder.decompressImage(compressed_map.data, compressed_map.length)) {
     return ERROR_JPEGR_DECODE_ERROR;
   }
@@ -405,13 +406,13 @@
   return NO_ERROR;
 }
 
-status_t RecoveryMap::compressRecoveryMap(jr_uncompressed_ptr uncompressed_recovery_map,
+status_t JpegR::compressRecoveryMap(jr_uncompressed_ptr uncompressed_recovery_map,
                                           jr_compressed_ptr dest) {
   if (uncompressed_recovery_map == nullptr || dest == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
 
-  JpegEncoder jpeg_encoder;
+  JpegEncoderHelper jpeg_encoder;
   if (!jpeg_encoder.compressImage(uncompressed_recovery_map->data,
                                   uncompressed_recovery_map->width,
                                   uncompressed_recovery_map->height,
@@ -489,7 +490,7 @@
   mQueuedAllJobs = false;
 }
 
-status_t RecoveryMap::generateRecoveryMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
+status_t JpegR::generateRecoveryMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
                                           jr_uncompressed_ptr uncompressed_p010_image,
                                           jpegr_transfer_function hdr_tf,
                                           jr_metadata_ptr metadata,
@@ -573,19 +574,20 @@
   }
 
   std::mutex mutex;
-  float hdr_y_nits_max = 0.0f;
-  double hdr_y_nits_avg = 0.0f;
+  float max_gain = 0.0f;
+  float min_gain = 1.0f;
   const int threads = std::clamp(GetCPUCoreCount(), 1, 4);
   size_t rowStep = threads == 1 ? image_height : kJobSzInRows;
   JobQueue jobQueue;
 
-  std::function<void()> computeMetadata = [uncompressed_p010_image, hdrInvOetf,
-                                           hdrGamutConversionFn, luminanceFn, hdr_white_nits,
-                                           threads, &mutex, &hdr_y_nits_avg,
-                                           &hdr_y_nits_max, &jobQueue]() -> void {
+  std::function<void()> computeMetadata = [uncompressed_p010_image, uncompressed_yuv_420_image,
+                                           hdrInvOetf, hdrGamutConversionFn, luminanceFn,
+                                           hdr_white_nits, threads, &mutex, &max_gain, &min_gain,
+                                           &jobQueue]() -> void {
     size_t rowStart, rowEnd;
-    float hdr_y_nits_max_th = 0.0f;
-    double hdr_y_nits_avg_th = 0.0f;
+    float max_gain_th = 0.0f;
+    float min_gain_th = 1.0f;
+
     while (jobQueue.dequeueJob(rowStart, rowEnd)) {
       for (size_t y = rowStart; y < rowEnd; ++y) {
         for (size_t x = 0; x < uncompressed_p010_image->width; ++x) {
@@ -595,16 +597,25 @@
           hdr_rgb = hdrGamutConversionFn(hdr_rgb);
           float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits;
 
-          hdr_y_nits_avg_th += hdr_y_nits;
-          if (hdr_y_nits > hdr_y_nits_max_th) {
-            hdr_y_nits_max_th = hdr_y_nits;
-          }
+          Color sdr_yuv_gamma =
+              getYuv420Pixel(uncompressed_yuv_420_image, x, y);
+          Color sdr_rgb_gamma = srgbYuvToRgb(sdr_yuv_gamma);
+#if USE_SRGB_INVOETF_LUT
+          Color sdr_rgb = srgbInvOetfLUT(sdr_rgb_gamma);
+#else
+          Color sdr_rgb = srgbInvOetf(sdr_rgb_gamma);
+#endif
+          float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits;
+
+          float gain = hdr_y_nits / sdr_y_nits;
+          max_gain_th = std::max(max_gain_th, gain);
+          min_gain_th = std::min(min_gain_th, gain);
         }
       }
     }
     std::unique_lock<std::mutex> lock{mutex};
-    hdr_y_nits_avg += hdr_y_nits_avg_th;
-    hdr_y_nits_max = std::max(hdr_y_nits_max, hdr_y_nits_max_th);
+    max_gain = std::max(max_gain, max_gain_th);
+    min_gain = std::min(min_gain, min_gain_th);
   };
 
   std::function<void()> generateMap = [uncompressed_yuv_420_image, uncompressed_p010_image,
@@ -634,7 +645,7 @@
 
           size_t pixel_idx = x + y * dest_map_stride;
           reinterpret_cast<uint8_t*>(dest->data)[pixel_idx] =
-              encodeRecovery(sdr_y_nits, hdr_y_nits, metadata->maxContentBoost);
+              encodeRecovery(sdr_y_nits, hdr_y_nits, metadata);
         }
       }
     }
@@ -655,9 +666,9 @@
   computeMetadata();
   std::for_each(workers.begin(), workers.end(), [](std::thread& t) { t.join(); });
   workers.clear();
-  hdr_y_nits_avg /= image_width * image_height;
 
-  metadata->maxContentBoost = hdr_y_nits_max / kSdrWhiteNits;
+  metadata->maxContentBoost = max_gain;
+  metadata->minContentBoost = min_gain;
 
   // generate map
   jobQueue.reset();
@@ -679,7 +690,7 @@
   return NO_ERROR;
 }
 
-status_t RecoveryMap::applyRecoveryMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
+status_t JpegR::applyRecoveryMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
                                        jr_uncompressed_ptr uncompressed_recovery_map,
                                        jr_metadata_ptr metadata,
                                        jr_uncompressed_ptr dest) {
@@ -693,7 +704,7 @@
   dest->width = uncompressed_yuv_420_image->width;
   dest->height = uncompressed_yuv_420_image->height;
   ShepardsIDW idwTable(kMapDimensionScaleFactor);
-  RecoveryLUT recoveryLUT(metadata->maxContentBoost);
+  RecoveryLUT recoveryLUT(metadata);
 
   JobQueue jobQueue;
   std::function<void()> applyRecMap = [uncompressed_yuv_420_image, uncompressed_recovery_map,
@@ -729,13 +740,12 @@
           if (map_scale_factor != floorf(map_scale_factor)) {
             recovery = sampleMap(uncompressed_recovery_map, map_scale_factor, x, y);
           } else {
-            recovery = sampleMap(uncompressed_recovery_map, map_scale_factor, x, y,
-                                idwTable);
+            recovery = sampleMap(uncompressed_recovery_map, map_scale_factor, x, y, idwTable);
           }
 #if USE_APPLY_RECOVERY_LUT
           Color rgb_hdr = applyRecoveryLUT(rgb_sdr, recovery, recoveryLUT);
 #else
-          Color rgb_hdr = applyRecovery(rgb_sdr, recovery, hdr_ratio);
+          Color rgb_hdr = applyRecovery(rgb_sdr, recovery, metadata);
 #endif
           Color rgb_gamma_hdr = hdrOetf(rgb_hdr / metadata->maxContentBoost);
           uint32_t rgba1010102 = colorToRgba1010102(rgb_gamma_hdr);
@@ -764,7 +774,7 @@
   return NO_ERROR;
 }
 
-status_t RecoveryMap::extractPrimaryImageAndRecoveryMap(jr_compressed_ptr compressed_jpegr_image,
+status_t JpegR::extractPrimaryImageAndRecoveryMap(jr_compressed_ptr compressed_jpegr_image,
                                                         jr_compressed_ptr primary_image,
                                                         jr_compressed_ptr recovery_map) {
   if (compressed_jpegr_image == nullptr) {
@@ -814,7 +824,7 @@
 }
 
 
-status_t RecoveryMap::extractRecoveryMap(jr_compressed_ptr compressed_jpegr_image,
+status_t JpegR::extractRecoveryMap(jr_compressed_ptr compressed_jpegr_image,
                                          jr_compressed_ptr dest) {
   if (compressed_jpegr_image == nullptr || dest == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
@@ -845,7 +855,7 @@
 // Exif 2.2 spec for EXIF marker
 // Adobe XMP spec part 3 for XMP marker
 // ICC v4.3 spec for ICC
-status_t RecoveryMap::appendRecoveryMap(jr_compressed_ptr compressed_jpeg_image,
+status_t JpegR::appendRecoveryMap(jr_compressed_ptr compressed_jpeg_image,
                                         jr_compressed_ptr compressed_recovery_map,
                                         jr_exif_ptr exif,
                                         jr_metadata_ptr metadata,
@@ -908,7 +918,7 @@
   return NO_ERROR;
 }
 
-status_t RecoveryMap::toneMap(jr_uncompressed_ptr src,
+status_t JpegR::toneMap(jr_uncompressed_ptr src,
                               jr_uncompressed_ptr dest) {
   if (src == nullptr || dest == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
@@ -945,4 +955,4 @@
   return NO_ERROR;
 }
 
-} // namespace android::recoverymap
+} // namespace android::jpegrecoverymap
diff --git a/libs/jpegrecoverymap/recoverymaputils.cpp b/libs/jpegrecoverymap/jpegrutils.cpp
similarity index 98%
rename from libs/jpegrecoverymap/recoverymaputils.cpp
rename to libs/jpegrecoverymap/jpegrutils.cpp
index 40956bd..bcca91a 100644
--- a/libs/jpegrecoverymap/recoverymaputils.cpp
+++ b/libs/jpegrecoverymap/jpegrutils.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include <jpegrecoverymap/recoverymaputils.h>
+#include <jpegrecoverymap/jpegrutils.h>
 #include <image_io/xml/xml_reader.h>
 #include <image_io/xml/xml_writer.h>
 #include <image_io/base/message_handler.h>
@@ -25,7 +25,7 @@
 using namespace photos_editing_formats::image_io;
 using namespace std;
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 /*
  * Helper function used for generating XMP metadata.
@@ -247,4 +247,4 @@
   return ss.str();
 }
 
-} // namespace android::recoverymap
+} // namespace android::jpegrecoverymap
diff --git a/libs/jpegrecoverymap/recoverymapmath.cpp b/libs/jpegrecoverymap/recoverymapmath.cpp
index 4f21ac6..7812e18 100644
--- a/libs/jpegrecoverymap/recoverymapmath.cpp
+++ b/libs/jpegrecoverymap/recoverymapmath.cpp
@@ -18,67 +18,48 @@
 #include <vector>
 #include <jpegrecoverymap/recoverymapmath.h>
 
-namespace android::recoverymap {
-
-constexpr size_t kPqOETFPrecision = 10;
-constexpr size_t kPqOETFNumEntries = 1 << kPqOETFPrecision;
+namespace android::jpegrecoverymap {
 
 static const std::vector<float> kPqOETF = [] {
     std::vector<float> result;
-    float increment = 1.0 / kPqOETFNumEntries;
-    float value = 0.0f;
-    for (int idx = 0; idx < kPqOETFNumEntries; idx++, value += increment) {
+    for (int idx = 0; idx < kPqOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqOETFNumEntries - 1);
       result.push_back(pqOetf(value));
     }
     return result;
 }();
 
-constexpr size_t kPqInvOETFPrecision = 10;
-constexpr size_t kPqInvOETFNumEntries = 1 << kPqInvOETFPrecision;
-
 static const std::vector<float> kPqInvOETF = [] {
     std::vector<float> result;
-    float increment = 1.0 / kPqInvOETFNumEntries;
-    float value = 0.0f;
-    for (int idx = 0; idx < kPqInvOETFNumEntries; idx++, value += increment) {
+    for (int idx = 0; idx < kPqInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqInvOETFNumEntries - 1);
       result.push_back(pqInvOetf(value));
     }
     return result;
 }();
 
-constexpr size_t kHlgOETFPrecision = 10;
-constexpr size_t kHlgOETFNumEntries = 1 << kHlgOETFPrecision;
-
 static const std::vector<float> kHlgOETF = [] {
     std::vector<float> result;
-    float increment = 1.0 / kHlgOETFNumEntries;
-    float value = 0.0f;
-    for (int idx = 0; idx < kHlgOETFNumEntries; idx++, value += increment) {
+    for (int idx = 0; idx < kHlgOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgOETFNumEntries - 1);
       result.push_back(hlgOetf(value));
     }
     return result;
 }();
 
-constexpr size_t kHlgInvOETFPrecision = 10;
-constexpr size_t kHlgInvOETFNumEntries = 1 << kHlgInvOETFPrecision;
-
 static const std::vector<float> kHlgInvOETF = [] {
     std::vector<float> result;
-    float increment = 1.0 / kHlgInvOETFNumEntries;
-    float value = 0.0f;
-    for (int idx = 0; idx < kHlgInvOETFNumEntries; idx++, value += increment) {
+    for (int idx = 0; idx < kHlgInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgInvOETFNumEntries - 1);
       result.push_back(hlgInvOetf(value));
     }
     return result;
 }();
 
-constexpr size_t kSRGBInvOETFPrecision = 10;
-constexpr size_t kSRGBInvOETFNumEntries = 1 << kSRGBInvOETFPrecision;
-static const std::vector<float> kSRGBInvOETF = [] {
+static const std::vector<float> kSrgbInvOETF = [] {
     std::vector<float> result;
-    float increment = 1.0 / kSRGBInvOETFNumEntries;
-    float value = 0.0f;
-    for (int idx = 0; idx < kSRGBInvOETFNumEntries; idx++, value += increment) {
+    for (int idx = 0; idx < kSrgbInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kSrgbInvOETFNumEntries - 1);
       result.push_back(srgbInvOetf(value));
     }
     return result;
@@ -182,10 +163,10 @@
 
 // See IEC 61966-2-1, Equations F.5 and F.6.
 float srgbInvOetfLUT(float e_gamma) {
-  uint32_t value = static_cast<uint32_t>(e_gamma * kSRGBInvOETFNumEntries);
+  uint32_t value = static_cast<uint32_t>(e_gamma * kSrgbInvOETFNumEntries);
   //TODO() : Remove once conversion modules have appropriate clamping in place
-  value = CLIP3(value, 0, kSRGBInvOETFNumEntries - 1);
-  return kSRGBInvOETF[value];
+  value = CLIP3(value, 0, kSrgbInvOETFNumEntries - 1);
+  return kSrgbInvOETF[value];
 }
 
 Color srgbInvOetfLUT(Color e_gamma) {
@@ -461,21 +442,24 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 // Recovery map calculations
-
-uint8_t encodeRecovery(float y_sdr, float y_hdr, float hdr_ratio) {
+uint8_t encodeRecovery(float y_sdr, float y_hdr, jr_metadata_ptr metadata) {
   float gain = 1.0f;
   if (y_sdr > 0.0f) {
     gain = y_hdr / y_sdr;
   }
 
-  if (gain < (1.0f / hdr_ratio)) gain = 1.0f / hdr_ratio;
-  if (gain > hdr_ratio) gain = hdr_ratio;
+  if (gain < metadata->minContentBoost) gain = metadata->minContentBoost;
+  if (gain > metadata->maxContentBoost) gain = metadata->maxContentBoost;
 
-  return static_cast<uint8_t>(log2(gain) / log2(hdr_ratio) * 127.5f  + 127.5f);
+  return static_cast<uint8_t>((log2(gain) - log2(metadata->minContentBoost))
+                            / (log2(metadata->maxContentBoost) - log2(metadata->minContentBoost))
+                            * 255.0f);
 }
 
-Color applyRecovery(Color e, float recovery, float hdr_ratio) {
-  float recoveryFactor = pow(hdr_ratio, recovery);
+Color applyRecovery(Color e, float recovery, jr_metadata_ptr metadata) {
+  float logBoost = log2(metadata->minContentBoost) * (1.0f - recovery)
+                 + log2(metadata->maxContentBoost) * recovery;
+  float recoveryFactor = exp2(logBoost);
   return e * recoveryFactor;
 }
 
@@ -550,7 +534,7 @@
 }
 
 static float mapUintToFloat(uint8_t map_uint) {
-  return (static_cast<float>(map_uint) - 127.5f) / 127.5f;
+  return static_cast<float>(map_uint) / 255.0f;
 }
 
 static float pythDistance(float x_diff, float y_diff) {
@@ -558,9 +542,9 @@
 }
 
 // TODO: If map_scale_factor is guaranteed to be an integer, then remove the following.
-float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y) {
-  float x_map = static_cast<float>(x) / static_cast<float>(map_scale_factor);
-  float y_map = static_cast<float>(y) / static_cast<float>(map_scale_factor);
+float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y) {
+  float x_map = static_cast<float>(x) / map_scale_factor;
+  float y_map = static_cast<float>(y) / map_scale_factor;
 
   size_t x_lower = static_cast<size_t>(floor(x_map));
   size_t x_upper = x_lower + 1;
@@ -647,4 +631,4 @@
        | (0x3 << 30);  // Set alpha to 1.0
 }
 
-} // namespace android::recoverymap
+} // namespace android::jpegrecoverymap
diff --git a/libs/jpegrecoverymap/tests/Android.bp b/libs/jpegrecoverymap/tests/Android.bp
index e381caf..5a4edb2 100644
--- a/libs/jpegrecoverymap/tests/Android.bp
+++ b/libs/jpegrecoverymap/tests/Android.bp
@@ -25,7 +25,7 @@
     name: "libjpegrecoverymap_test",
     test_suites: ["device-tests"],
     srcs: [
-        "recoverymap_test.cpp",
+        "jpegr_test.cpp",
         "recoverymapmath_test.cpp",
     ],
     shared_libs: [
@@ -44,10 +44,10 @@
 }
 
 cc_test {
-    name: "libjpegencoder_test",
+    name: "libjpegencoderhelper_test",
     test_suites: ["device-tests"],
     srcs: [
-        "jpegencoder_test.cpp",
+        "jpegencoderhelper_test.cpp",
     ],
     shared_libs: [
         "libjpeg",
@@ -60,10 +60,10 @@
 }
 
 cc_test {
-    name: "libjpegdecoder_test",
+    name: "libjpegdecoderhelper_test",
     test_suites: ["device-tests"],
     srcs: [
-        "jpegdecoder_test.cpp",
+        "jpegdecoderhelper_test.cpp",
     ],
     shared_libs: [
         "libjpeg",
diff --git a/libs/jpegrecoverymap/tests/jpegdecoder_test.cpp b/libs/jpegrecoverymap/tests/jpegdecoderhelper_test.cpp
similarity index 77%
rename from libs/jpegrecoverymap/tests/jpegdecoder_test.cpp
rename to libs/jpegrecoverymap/tests/jpegdecoderhelper_test.cpp
index 8e01351..2f32a56 100644
--- a/libs/jpegrecoverymap/tests/jpegdecoder_test.cpp
+++ b/libs/jpegrecoverymap/tests/jpegdecoderhelper_test.cpp
@@ -14,27 +14,27 @@
  * limitations under the License.
  */
 
-#include <jpegrecoverymap/jpegdecoder.h>
+#include <jpegrecoverymap/jpegdecoderhelper.h>
 #include <gtest/gtest.h>
 #include <utils/Log.h>
 
 #include <fcntl.h>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 #define YUV_IMAGE "/sdcard/Documents/minnie-320x240-yuv.jpg"
 #define YUV_IMAGE_SIZE 20193
 #define GREY_IMAGE "/sdcard/Documents/minnie-320x240-y.jpg"
 #define GREY_IMAGE_SIZE 20193
 
-class JpegDecoderTest : public testing::Test {
+class JpegDecoderHelperTest : public testing::Test {
 public:
     struct Image {
         std::unique_ptr<uint8_t[]> buffer;
         size_t size;
     };
-    JpegDecoderTest();
-    ~JpegDecoderTest();
+    JpegDecoderHelperTest();
+    ~JpegDecoderHelperTest();
 protected:
     virtual void SetUp();
     virtual void TearDown();
@@ -42,9 +42,9 @@
     Image mYuvImage, mGreyImage;
 };
 
-JpegDecoderTest::JpegDecoderTest() {}
+JpegDecoderHelperTest::JpegDecoderHelperTest() {}
 
-JpegDecoderTest::~JpegDecoderTest() {}
+JpegDecoderHelperTest::~JpegDecoderHelperTest() {}
 
 static size_t getFileSize(int fd) {
     struct stat st;
@@ -55,7 +55,7 @@
     return st.st_size; // bytes
 }
 
-static bool loadFile(const char filename[], JpegDecoderTest::Image* result) {
+static bool loadFile(const char filename[], JpegDecoderHelperTest::Image* result) {
     int fd = open(filename, O_CLOEXEC);
     if (fd < 0) {
         return false;
@@ -74,7 +74,7 @@
     return true;
 }
 
-void JpegDecoderTest::SetUp() {
+void JpegDecoderHelperTest::SetUp() {
     if (!loadFile(YUV_IMAGE, &mYuvImage)) {
         FAIL() << "Load file " << YUV_IMAGE << " failed";
     }
@@ -85,18 +85,18 @@
     mGreyImage.size = GREY_IMAGE_SIZE;
 }
 
-void JpegDecoderTest::TearDown() {}
+void JpegDecoderHelperTest::TearDown() {}
 
-TEST_F(JpegDecoderTest, decodeYuvImage) {
-    JpegDecoder decoder;
+TEST_F(JpegDecoderHelperTest, decodeYuvImage) {
+    JpegDecoderHelper decoder;
     EXPECT_TRUE(decoder.decompressImage(mYuvImage.buffer.get(), mYuvImage.size));
     ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
 }
 
-TEST_F(JpegDecoderTest, decodeGreyImage) {
-    JpegDecoder decoder;
+TEST_F(JpegDecoderHelperTest, decodeGreyImage) {
+    JpegDecoderHelper decoder;
     EXPECT_TRUE(decoder.decompressImage(mGreyImage.buffer.get(), mGreyImage.size));
     ASSERT_GT(decoder.getDecompressedImageSize(), static_cast<uint32_t>(0));
 }
 
-}
\ No newline at end of file
+}  // namespace android::jpegrecoverymap
\ No newline at end of file
diff --git a/libs/jpegrecoverymap/tests/jpegencoder_test.cpp b/libs/jpegrecoverymap/tests/jpegencoderhelper_test.cpp
similarity index 82%
rename from libs/jpegrecoverymap/tests/jpegencoder_test.cpp
rename to libs/jpegrecoverymap/tests/jpegencoderhelper_test.cpp
index 4cd2a5e..095ac2f 100644
--- a/libs/jpegrecoverymap/tests/jpegencoder_test.cpp
+++ b/libs/jpegrecoverymap/tests/jpegencoderhelper_test.cpp
@@ -14,13 +14,13 @@
  * limitations under the License.
  */
 
-#include <jpegrecoverymap/jpegencoder.h>
+#include <jpegrecoverymap/jpegencoderhelper.h>
 #include <gtest/gtest.h>
 #include <utils/Log.h>
 
 #include <fcntl.h>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 #define VALID_IMAGE "/sdcard/Documents/minnie-320x240.yu12"
 #define VALID_IMAGE_WIDTH 320
@@ -33,15 +33,15 @@
 #define INVALID_SIZE_IMAGE_HEIGHT 240
 #define JPEG_QUALITY 90
 
-class JpegEncoderTest : public testing::Test {
+class JpegEncoderHelperTest : public testing::Test {
 public:
     struct Image {
         std::unique_ptr<uint8_t[]> buffer;
         size_t width;
         size_t height;
     };
-    JpegEncoderTest();
-    ~JpegEncoderTest();
+    JpegEncoderHelperTest();
+    ~JpegEncoderHelperTest();
 protected:
     virtual void SetUp();
     virtual void TearDown();
@@ -49,9 +49,9 @@
     Image mValidImage, mInvalidSizeImage, mSingleChannelImage;
 };
 
-JpegEncoderTest::JpegEncoderTest() {}
+JpegEncoderHelperTest::JpegEncoderHelperTest() {}
 
-JpegEncoderTest::~JpegEncoderTest() {}
+JpegEncoderHelperTest::~JpegEncoderHelperTest() {}
 
 static size_t getFileSize(int fd) {
     struct stat st;
@@ -62,7 +62,7 @@
     return st.st_size; // bytes
 }
 
-static bool loadFile(const char filename[], JpegEncoderTest::Image* result) {
+static bool loadFile(const char filename[], JpegEncoderHelperTest::Image* result) {
     int fd = open(filename, O_CLOEXEC);
     if (fd < 0) {
         return false;
@@ -81,7 +81,7 @@
     return true;
 }
 
-void JpegEncoderTest::SetUp() {
+void JpegEncoderHelperTest::SetUp() {
     if (!loadFile(VALID_IMAGE, &mValidImage)) {
         FAIL() << "Load file " << VALID_IMAGE << " failed";
     }
@@ -99,27 +99,27 @@
     mSingleChannelImage.height = SINGLE_CHANNEL_IMAGE_HEIGHT;
 }
 
-void JpegEncoderTest::TearDown() {}
+void JpegEncoderHelperTest::TearDown() {}
 
-TEST_F(JpegEncoderTest, validImage) {
-    JpegEncoder encoder;
+TEST_F(JpegEncoderHelperTest, validImage) {
+    JpegEncoderHelper encoder;
     EXPECT_TRUE(encoder.compressImage(mValidImage.buffer.get(), mValidImage.width,
                                          mValidImage.height, JPEG_QUALITY, NULL, 0));
     ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
 }
 
-TEST_F(JpegEncoderTest, invalidSizeImage) {
-    JpegEncoder encoder;
+TEST_F(JpegEncoderHelperTest, invalidSizeImage) {
+    JpegEncoderHelper encoder;
     EXPECT_FALSE(encoder.compressImage(mInvalidSizeImage.buffer.get(), mInvalidSizeImage.width,
                                           mInvalidSizeImage.height, JPEG_QUALITY, NULL, 0));
 }
 
-TEST_F(JpegEncoderTest, singleChannelImage) {
-    JpegEncoder encoder;
+TEST_F(JpegEncoderHelperTest, singleChannelImage) {
+    JpegEncoderHelper encoder;
     EXPECT_TRUE(encoder.compressImage(mSingleChannelImage.buffer.get(), mSingleChannelImage.width,
                                          mSingleChannelImage.height, JPEG_QUALITY, NULL, 0, true));
     ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
 }
 
-}
+}  // namespace android::jpegrecoverymap
 
diff --git a/libs/jpegrecoverymap/tests/recoverymap_test.cpp b/libs/jpegrecoverymap/tests/jpegr_test.cpp
similarity index 67%
rename from libs/jpegrecoverymap/tests/recoverymap_test.cpp
rename to libs/jpegrecoverymap/tests/jpegr_test.cpp
index 3e9a76d..c0347e3 100644
--- a/libs/jpegrecoverymap/tests/recoverymap_test.cpp
+++ b/libs/jpegrecoverymap/tests/jpegr_test.cpp
@@ -14,12 +14,13 @@
  * limitations under the License.
  */
 
-#include <jpegrecoverymap/recoverymap.h>
+#include <jpegrecoverymap/jpegr.h>
+#include <jpegrecoverymap/jpegrutils.h>
 #include <jpegrecoverymap/recoverymapmath.h>
-#include <jpegrecoverymap/recoverymaputils.h>
 #include <fcntl.h>
 #include <fstream>
 #include <gtest/gtest.h>
+#include <sys/time.h>
 #include <utils/Log.h>
 
 #define RAW_P010_IMAGE "/sdcard/Documents/raw_p010_image.p010"
@@ -33,29 +34,26 @@
 #define SAVE_DECODING_RESULT true
 #define SAVE_INPUT_RGBA true
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
-class RecoveryMapTest : public testing::Test {
-public:
-  RecoveryMapTest();
-  ~RecoveryMapTest();
-protected:
-  virtual void SetUp();
-  virtual void TearDown();
-
-  struct jpegr_uncompressed_struct mRawP010Image;
-  struct jpegr_uncompressed_struct mRawYuv420Image;
-  struct jpegr_compressed_struct mJpegImage;
+struct Timer {
+  struct timeval StartingTime;
+  struct timeval EndingTime;
+  struct timeval ElapsedMicroseconds;
 };
 
-RecoveryMapTest::RecoveryMapTest() {}
-RecoveryMapTest::~RecoveryMapTest() {}
+void timerStart(Timer *t) {
+  gettimeofday(&t->StartingTime, nullptr);
+}
 
-void RecoveryMapTest::SetUp() {}
-void RecoveryMapTest::TearDown() {
-  free(mRawP010Image.data);
-  free(mRawYuv420Image.data);
-  free(mJpegImage.data);
+void timerStop(Timer *t) {
+  gettimeofday(&t->EndingTime, nullptr);
+}
+
+int64_t elapsedTime(Timer *t) {
+  t->ElapsedMicroseconds.tv_sec = t->EndingTime.tv_sec - t->StartingTime.tv_sec;
+  t->ElapsedMicroseconds.tv_usec = t->EndingTime.tv_usec - t->StartingTime.tv_usec;
+  return t->ElapsedMicroseconds.tv_sec * 1000000 + t->ElapsedMicroseconds.tv_usec;
 }
 
 static size_t getFileSize(int fd) {
@@ -89,19 +87,93 @@
   return true;
 }
 
-TEST_F(RecoveryMapTest, build) {
-  // Force all of the recovery map lib to be linked by calling all public functions.
-  RecoveryMap recovery_map;
-  recovery_map.encodeJPEGR(nullptr, static_cast<jpegr_transfer_function>(0), nullptr, 0, nullptr);
-  recovery_map.encodeJPEGR(nullptr, nullptr, static_cast<jpegr_transfer_function>(0),
-                           nullptr, 0, nullptr);
-  recovery_map.encodeJPEGR(nullptr, nullptr, nullptr, static_cast<jpegr_transfer_function>(0),
-                           nullptr);
-  recovery_map.encodeJPEGR(nullptr, nullptr, static_cast<jpegr_transfer_function>(0), nullptr);
-  recovery_map.decodeJPEGR(nullptr, nullptr, nullptr, false);
+class JpegRTest : public testing::Test {
+public:
+  JpegRTest();
+  ~JpegRTest();
+
+protected:
+  virtual void SetUp();
+  virtual void TearDown();
+
+  struct jpegr_uncompressed_struct mRawP010Image;
+  struct jpegr_uncompressed_struct mRawYuv420Image;
+  struct jpegr_compressed_struct mJpegImage;
+};
+
+JpegRTest::JpegRTest() {}
+JpegRTest::~JpegRTest() {}
+
+void JpegRTest::SetUp() {}
+void JpegRTest::TearDown() {
+  free(mRawP010Image.data);
+  free(mRawYuv420Image.data);
+  free(mJpegImage.data);
 }
 
-TEST_F(RecoveryMapTest, writeXmpThenRead) {
+class JpegRBenchmark : public JpegR {
+public:
+ void BenchmarkGenerateRecoveryMap(jr_uncompressed_ptr yuv420Image, jr_uncompressed_ptr p010Image,
+                                   jr_metadata_ptr metadata, jr_uncompressed_ptr map);
+ void BenchmarkApplyRecoveryMap(jr_uncompressed_ptr yuv420Image, jr_uncompressed_ptr map,
+                                jr_metadata_ptr metadata, jr_uncompressed_ptr dest);
+private:
+ const int kProfileCount = 10;
+};
+
+void JpegRBenchmark::BenchmarkGenerateRecoveryMap(jr_uncompressed_ptr yuv420Image,
+                                                        jr_uncompressed_ptr p010Image,
+                                                        jr_metadata_ptr metadata,
+                                                        jr_uncompressed_ptr map) {
+  ASSERT_EQ(yuv420Image->width, p010Image->width);
+  ASSERT_EQ(yuv420Image->height, p010Image->height);
+
+  Timer genRecMapTime;
+
+  timerStart(&genRecMapTime);
+  for (auto i = 0; i < kProfileCount; i++) {
+      ASSERT_EQ(OK, generateRecoveryMap(
+          yuv420Image, p010Image, jpegr_transfer_function::JPEGR_TF_HLG, metadata, map));
+      if (i != kProfileCount - 1) delete[] static_cast<uint8_t *>(map->data);
+  }
+  timerStop(&genRecMapTime);
+
+  ALOGE("Generate Recovery Map:- Res = %i x %i, time = %f ms",
+        yuv420Image->width, yuv420Image->height,
+        elapsedTime(&genRecMapTime) / (kProfileCount * 1000.f));
+
+}
+
+void JpegRBenchmark::BenchmarkApplyRecoveryMap(jr_uncompressed_ptr yuv420Image,
+                                                     jr_uncompressed_ptr map,
+                                                     jr_metadata_ptr metadata,
+                                                     jr_uncompressed_ptr dest) {
+  Timer applyRecMapTime;
+
+  timerStart(&applyRecMapTime);
+  for (auto i = 0; i < kProfileCount; i++) {
+      ASSERT_EQ(OK, applyRecoveryMap(yuv420Image, map, metadata, dest));
+  }
+  timerStop(&applyRecMapTime);
+
+  ALOGE("Apply Recovery Map:- Res = %i x %i, time = %f ms",
+        yuv420Image->width, yuv420Image->height,
+        elapsedTime(&applyRecMapTime) / (kProfileCount * 1000.f));
+}
+
+TEST_F(JpegRTest, build) {
+  // Force all of the recovery map lib to be linked by calling all public functions.
+  JpegR jpegRCodec;
+  jpegRCodec.encodeJPEGR(nullptr, static_cast<jpegr_transfer_function>(0), nullptr, 0, nullptr);
+  jpegRCodec.encodeJPEGR(nullptr, nullptr, static_cast<jpegr_transfer_function>(0),
+                         nullptr, 0, nullptr);
+  jpegRCodec.encodeJPEGR(nullptr, nullptr, nullptr, static_cast<jpegr_transfer_function>(0),
+                         nullptr);
+  jpegRCodec.encodeJPEGR(nullptr, nullptr, static_cast<jpegr_transfer_function>(0), nullptr);
+  jpegRCodec.decodeJPEGR(nullptr, nullptr, nullptr, false);
+}
+
+TEST_F(JpegRTest, writeXmpThenRead) {
   jpegr_metadata metadata_expected;
   metadata_expected.maxContentBoost = 1.25;
   int length_expected = 1000;
@@ -123,7 +195,7 @@
 }
 
 /* Test Encode API-0 and decode */
-TEST_F(RecoveryMapTest, encodeFromP010ThenDecode) {
+TEST_F(JpegRTest, encodeFromP010ThenDecode) {
   int ret;
 
   // Load input files.
@@ -134,12 +206,12 @@
   mRawP010Image.height = TEST_IMAGE_HEIGHT;
   mRawP010Image.colorGamut = jpegr_color_gamut::JPEGR_COLORGAMUT_BT2100;
 
-  RecoveryMap recoveryMap;
+  JpegR jpegRCodec;
 
   jpegr_compressed_struct jpegR;
   jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
   jpegR.data = malloc(jpegR.maxLength);
-  ret = recoveryMap.encodeJPEGR(
+  ret = jpegRCodec.encodeJPEGR(
       &mRawP010Image, jpegr_transfer_function::JPEGR_TF_HLG, &jpegR, DEFAULT_JPEG_QUALITY, nullptr);
   if (ret != OK) {
     FAIL() << "Error code is " << ret;
@@ -157,7 +229,7 @@
   jpegr_uncompressed_struct decodedJpegR;
   int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 4;
   decodedJpegR.data = malloc(decodedJpegRSize);
-  ret = recoveryMap.decodeJPEGR(&jpegR, &decodedJpegR);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
   if (ret != OK) {
     FAIL() << "Error code is " << ret;
   }
@@ -176,7 +248,7 @@
 }
 
 /* Test Encode API-1 and decode */
-TEST_F(RecoveryMapTest, encodeFromRawHdrAndSdrThenDecode) {
+TEST_F(JpegRTest, encodeFromRawHdrAndSdrThenDecode) {
   int ret;
 
   // Load input files.
@@ -194,12 +266,12 @@
   mRawYuv420Image.height = TEST_IMAGE_HEIGHT;
   mRawYuv420Image.colorGamut = jpegr_color_gamut::JPEGR_COLORGAMUT_BT709;
 
-  RecoveryMap recoveryMap;
+  JpegR jpegRCodec;
 
   jpegr_compressed_struct jpegR;
   jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
   jpegR.data = malloc(jpegR.maxLength);
-  ret = recoveryMap.encodeJPEGR(
+  ret = jpegRCodec.encodeJPEGR(
       &mRawP010Image, &mRawYuv420Image, jpegr_transfer_function::JPEGR_TF_HLG, &jpegR,
       DEFAULT_JPEG_QUALITY, nullptr);
   if (ret != OK) {
@@ -218,7 +290,7 @@
   jpegr_uncompressed_struct decodedJpegR;
   int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 4;
   decodedJpegR.data = malloc(decodedJpegRSize);
-  ret = recoveryMap.decodeJPEGR(&jpegR, &decodedJpegR);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
   if (ret != OK) {
     FAIL() << "Error code is " << ret;
   }
@@ -237,7 +309,7 @@
 }
 
 /* Test Encode API-2 and decode */
-TEST_F(RecoveryMapTest, encodeFromRawHdrAndSdrAndJpegThenDecode) {
+TEST_F(JpegRTest, encodeFromRawHdrAndSdrAndJpegThenDecode) {
   int ret;
 
   // Load input files.
@@ -260,12 +332,12 @@
   }
   mJpegImage.colorGamut = jpegr_color_gamut::JPEGR_COLORGAMUT_BT709;
 
-  RecoveryMap recoveryMap;
+  JpegR jpegRCodec;
 
   jpegr_compressed_struct jpegR;
   jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
   jpegR.data = malloc(jpegR.maxLength);
-  ret = recoveryMap.encodeJPEGR(
+  ret = jpegRCodec.encodeJPEGR(
       &mRawP010Image, &mRawYuv420Image, &mJpegImage, jpegr_transfer_function::JPEGR_TF_HLG, &jpegR);
   if (ret != OK) {
     FAIL() << "Error code is " << ret;
@@ -283,7 +355,7 @@
   jpegr_uncompressed_struct decodedJpegR;
   int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 4;
   decodedJpegR.data = malloc(decodedJpegRSize);
-  ret = recoveryMap.decodeJPEGR(&jpegR, &decodedJpegR);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
   if (ret != OK) {
     FAIL() << "Error code is " << ret;
   }
@@ -302,7 +374,7 @@
 }
 
 /* Test Encode API-3 and decode */
-TEST_F(RecoveryMapTest, encodeFromJpegThenDecode) {
+TEST_F(JpegRTest, encodeFromJpegThenDecode) {
   int ret;
 
   // Load input files.
@@ -341,12 +413,12 @@
   }
   mJpegImage.colorGamut = jpegr_color_gamut::JPEGR_COLORGAMUT_BT709;
 
-  RecoveryMap recoveryMap;
+  JpegR jpegRCodec;
 
   jpegr_compressed_struct jpegR;
   jpegR.maxLength = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * sizeof(uint8_t);
   jpegR.data = malloc(jpegR.maxLength);
-  ret = recoveryMap.encodeJPEGR(
+  ret = jpegRCodec.encodeJPEGR(
       &mRawP010Image, &mJpegImage, jpegr_transfer_function::JPEGR_TF_HLG, &jpegR);
   if (ret != OK) {
     FAIL() << "Error code is " << ret;
@@ -364,7 +436,7 @@
   jpegr_uncompressed_struct decodedJpegR;
   int decodedJpegRSize = TEST_IMAGE_WIDTH * TEST_IMAGE_HEIGHT * 4;
   decodedJpegR.data = malloc(decodedJpegRSize);
-  ret = recoveryMap.decodeJPEGR(&jpegR, &decodedJpegR);
+  ret = jpegRCodec.decodeJPEGR(&jpegR, &decodedJpegR);
   if (ret != OK) {
     FAIL() << "Error code is " << ret;
   }
@@ -382,4 +454,46 @@
   free(decodedJpegR.data);
 }
 
-} // namespace android::recoverymap
+TEST_F(JpegRTest, ProfileRecoveryMapFuncs) {
+  const size_t kWidth = TEST_IMAGE_WIDTH;
+  const size_t kHeight = TEST_IMAGE_HEIGHT;
+
+  // Load input files.
+  if (!loadFile(RAW_P010_IMAGE, mRawP010Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawP010Image.width = kWidth;
+  mRawP010Image.height = kHeight;
+  mRawP010Image.colorGamut = jpegr_color_gamut::JPEGR_COLORGAMUT_BT2100;
+
+  if (!loadFile(RAW_YUV420_IMAGE, mRawYuv420Image.data, nullptr)) {
+    FAIL() << "Load file " << RAW_P010_IMAGE << " failed";
+  }
+  mRawYuv420Image.width = kWidth;
+  mRawYuv420Image.height = kHeight;
+  mRawYuv420Image.colorGamut = jpegr_color_gamut::JPEGR_COLORGAMUT_BT709;
+
+  JpegRBenchmark benchmark;
+
+  jpegr_metadata metadata = { .version = 1,
+                              .maxContentBoost = 8.0f,
+                              .minContentBoost = 1.0f / 8.0f };
+
+  jpegr_uncompressed_struct map = { .data = NULL,
+                                    .width = 0,
+                                    .height = 0,
+                                    .colorGamut = JPEGR_COLORGAMUT_UNSPECIFIED };
+
+  benchmark.BenchmarkGenerateRecoveryMap(&mRawYuv420Image, &mRawP010Image, &metadata, &map);
+
+  const int dstSize = mRawYuv420Image.width * mRawYuv420Image.height * 4;
+  auto bufferDst = std::make_unique<uint8_t[]>(dstSize);
+  jpegr_uncompressed_struct dest = { .data = bufferDst.get(),
+                                     .width = 0,
+                                     .height = 0,
+                                     .colorGamut = JPEGR_COLORGAMUT_UNSPECIFIED };
+
+  benchmark.BenchmarkApplyRecoveryMap(&mRawYuv420Image, &map, &metadata, &dest);
+}
+
+} // namespace android::recoverymap
\ No newline at end of file
diff --git a/libs/jpegrecoverymap/tests/recoverymapmath_test.cpp b/libs/jpegrecoverymap/tests/recoverymapmath_test.cpp
index 2eec95f..6c61ff1 100644
--- a/libs/jpegrecoverymap/tests/recoverymapmath_test.cpp
+++ b/libs/jpegrecoverymap/tests/recoverymapmath_test.cpp
@@ -19,7 +19,7 @@
 #include <gmock/gmock.h>
 #include <jpegrecoverymap/recoverymapmath.h>
 
-namespace android::recoverymap {
+namespace android::jpegrecoverymap {
 
 class RecoveryMapMathTest : public testing::Test {
 public:
@@ -42,7 +42,7 @@
   }
 
   float Map(uint8_t e) {
-    return (static_cast<float>(e) - 127.5f) / 127.5f;
+    return static_cast<float>(e) / 255.0f;
   }
 
   Color ColorMin(Color e1, Color e2) {
@@ -88,10 +88,10 @@
     return luminance_scaled * scale_factor;
   }
 
-  Color Recover(Color yuv_gamma, float recovery, float max_content_boost) {
+  Color Recover(Color yuv_gamma, float recovery, jr_metadata_ptr metadata) {
     Color rgb_gamma = srgbYuvToRgb(yuv_gamma);
     Color rgb = srgbInvOetf(rgb_gamma);
-    return applyRecovery(rgb, recovery, max_content_boost);
+    return applyRecovery(rgb, recovery, metadata);
   }
 
   jpegr_uncompressed_struct Yuv420Image() {
@@ -518,59 +518,95 @@
 }
 
 TEST_F(RecoveryMapMathTest, PqInvOetfLUT) {
-    float increment = 1.0 / 1024.0;
-    float value = 0.0f;
-    for (int idx = 0; idx < 1024; idx++, value += increment) {
+    for (int idx = 0; idx < kPqInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqInvOETFNumEntries - 1);
       EXPECT_FLOAT_EQ(pqInvOetf(value), pqInvOetfLUT(value));
     }
 }
 
 TEST_F(RecoveryMapMathTest, HlgInvOetfLUT) {
-    float increment = 1.0 / 1024.0;
-    float value = 0.0f;
-    for (int idx = 0; idx < 1024; idx++, value += increment) {
+    for (int idx = 0; idx < kHlgInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgInvOETFNumEntries - 1);
       EXPECT_FLOAT_EQ(hlgInvOetf(value), hlgInvOetfLUT(value));
     }
 }
 
 TEST_F(RecoveryMapMathTest, pqOetfLUT) {
-    float increment = 1.0 / 1024.0;
-    float value = 0.0f;
-    for (int idx = 0; idx < 1024; idx++, value += increment) {
+    for (int idx = 0; idx < kPqOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kPqOETFNumEntries - 1);
       EXPECT_FLOAT_EQ(pqOetf(value), pqOetfLUT(value));
     }
 }
 
 TEST_F(RecoveryMapMathTest, hlgOetfLUT) {
-    float increment = 1.0 / 1024.0;
-    float value = 0.0f;
-    for (int idx = 0; idx < 1024; idx++, value += increment) {
+    for (int idx = 0; idx < kHlgOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kHlgOETFNumEntries - 1);
       EXPECT_FLOAT_EQ(hlgOetf(value), hlgOetfLUT(value));
     }
 }
 
 TEST_F(RecoveryMapMathTest, srgbInvOetfLUT) {
-    float increment = 1.0 / 1024.0;
-    float value = 0.0f;
-    for (int idx = 0; idx < 1024; idx++, value += increment) {
+    for (int idx = 0; idx < kSrgbInvOETFNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kSrgbInvOETFNumEntries - 1);
       EXPECT_FLOAT_EQ(srgbInvOetf(value), srgbInvOetfLUT(value));
     }
 }
 
 TEST_F(RecoveryMapMathTest, applyRecoveryLUT) {
-  float increment = 2.0 / kRecoveryFactorNumEntries;
-  for (float hdrRatio = 1.0f; hdrRatio <= 10.0f; hdrRatio += 1.0f)  {
-    RecoveryLUT recoveryLUT(hdrRatio);
-    for (float value = -1.0f; value <= -1.0f; value += increment) {
-      EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), value, hdrRatio),
+  for (int boost = 1; boost <= 10; boost++) {
+    jpegr_metadata metadata = { .maxContentBoost = static_cast<float>(boost),
+                                .minContentBoost = 1.0f / static_cast<float>(boost) };
+    RecoveryLUT recoveryLUT(&metadata);
+    for (int idx = 0; idx < kRecoveryFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kRecoveryFactorNumEntries - 1);
+      EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), value, &metadata),
                       applyRecoveryLUT(RgbBlack(), value, recoveryLUT));
-      EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), value, hdrRatio),
+      EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), value, &metadata),
                       applyRecoveryLUT(RgbWhite(), value, recoveryLUT));
-      EXPECT_RGB_NEAR(applyRecovery(RgbRed(), value, hdrRatio),
+      EXPECT_RGB_NEAR(applyRecovery(RgbRed(), value, &metadata),
                       applyRecoveryLUT(RgbRed(), value, recoveryLUT));
-      EXPECT_RGB_NEAR(applyRecovery(RgbGreen(), value, hdrRatio),
+      EXPECT_RGB_NEAR(applyRecovery(RgbGreen(), value, &metadata),
                       applyRecoveryLUT(RgbGreen(), value, recoveryLUT));
-      EXPECT_RGB_NEAR(applyRecovery(RgbBlue(), value, hdrRatio),
+      EXPECT_RGB_NEAR(applyRecovery(RgbBlue(), value, &metadata),
+                      applyRecoveryLUT(RgbBlue(), value, recoveryLUT));
+    }
+  }
+
+  for (int boost = 1; boost <= 10; boost++) {
+    jpegr_metadata metadata = { .maxContentBoost = static_cast<float>(boost),
+                                .minContentBoost = 1.0f };
+    RecoveryLUT recoveryLUT(&metadata);
+    for (int idx = 0; idx < kRecoveryFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kRecoveryFactorNumEntries - 1);
+      EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), value, &metadata),
+                      applyRecoveryLUT(RgbBlack(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), value, &metadata),
+                      applyRecoveryLUT(RgbWhite(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbRed(), value, &metadata),
+                      applyRecoveryLUT(RgbRed(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbGreen(), value, &metadata),
+                      applyRecoveryLUT(RgbGreen(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbBlue(), value, &metadata),
+                      applyRecoveryLUT(RgbBlue(), value, recoveryLUT));
+    }
+  }
+
+  for (int boost = 1; boost <= 10; boost++) {
+    jpegr_metadata metadata = { .maxContentBoost = static_cast<float>(boost),
+                                .minContentBoost = 1.0f / pow(static_cast<float>(boost),
+                                                              1.0f / 3.0f) };
+    RecoveryLUT recoveryLUT(&metadata);
+    for (int idx = 0; idx < kRecoveryFactorNumEntries; idx++) {
+      float value = static_cast<float>(idx) / static_cast<float>(kRecoveryFactorNumEntries - 1);
+      EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), value, &metadata),
+                      applyRecoveryLUT(RgbBlack(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), value, &metadata),
+                      applyRecoveryLUT(RgbWhite(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbRed(), value, &metadata),
+                      applyRecoveryLUT(RgbRed(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbGreen(), value, &metadata),
+                      applyRecoveryLUT(RgbGreen(), value, recoveryLUT));
+      EXPECT_RGB_NEAR(applyRecovery(RgbBlue(), value, &metadata),
                       applyRecoveryLUT(RgbBlue(), value, recoveryLUT));
     }
   }
@@ -623,60 +659,121 @@
 }
 
 TEST_F(RecoveryMapMathTest, EncodeRecovery) {
-  EXPECT_EQ(encodeRecovery(0.0f, 0.0f, 4.0f), 127);
-  EXPECT_EQ(encodeRecovery(0.0f, 1.0f, 4.0f), 127);
-  EXPECT_EQ(encodeRecovery(1.0f, 0.0f, 4.0f), 0);
-  EXPECT_EQ(encodeRecovery(0.5f, 0.0f, 4.0f), 0);
+  jpegr_metadata metadata = { .maxContentBoost = 4.0f,
+                              .minContentBoost = 1.0f / 4.0f };
 
-  EXPECT_EQ(encodeRecovery(1.0f, 1.0f, 4.0f), 127);
-  EXPECT_EQ(encodeRecovery(1.0f, 4.0f, 4.0f), 255);
-  EXPECT_EQ(encodeRecovery(1.0f, 5.0f, 4.0f), 255);
-  EXPECT_EQ(encodeRecovery(4.0f, 1.0f, 4.0f), 0);
-  EXPECT_EQ(encodeRecovery(4.0f, 0.5f, 4.0f), 0);
-  EXPECT_EQ(encodeRecovery(1.0f, 2.0f, 4.0f), 191);
-  EXPECT_EQ(encodeRecovery(2.0f, 1.0f, 4.0f), 63);
+  EXPECT_EQ(encodeRecovery(0.0f, 0.0f, &metadata), 127);
+  EXPECT_EQ(encodeRecovery(0.0f, 1.0f, &metadata), 127);
+  EXPECT_EQ(encodeRecovery(1.0f, 0.0f, &metadata), 0);
+  EXPECT_EQ(encodeRecovery(0.5f, 0.0f, &metadata), 0);
 
-  EXPECT_EQ(encodeRecovery(1.0f, 2.0f, 2.0f), 255);
-  EXPECT_EQ(encodeRecovery(2.0f, 1.0f, 2.0f), 0);
-  EXPECT_EQ(encodeRecovery(1.0f, 1.41421f, 2.0f), 191);
-  EXPECT_EQ(encodeRecovery(1.41421f, 1.0f, 2.0f), 63);
+  EXPECT_EQ(encodeRecovery(1.0f, 1.0f, &metadata), 127);
+  EXPECT_EQ(encodeRecovery(1.0f, 4.0f, &metadata), 255);
+  EXPECT_EQ(encodeRecovery(1.0f, 5.0f, &metadata), 255);
+  EXPECT_EQ(encodeRecovery(4.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeRecovery(4.0f, 0.5f, &metadata), 0);
+  EXPECT_EQ(encodeRecovery(1.0f, 2.0f, &metadata), 191);
+  EXPECT_EQ(encodeRecovery(2.0f, 1.0f, &metadata), 63);
 
-  EXPECT_EQ(encodeRecovery(1.0f, 8.0f, 8.0f), 255);
-  EXPECT_EQ(encodeRecovery(8.0f, 1.0f, 8.0f), 0);
-  EXPECT_EQ(encodeRecovery(1.0f, 2.82843f, 8.0f), 191);
-  EXPECT_EQ(encodeRecovery(2.82843f, 1.0f, 8.0f), 63);
+  metadata.maxContentBoost = 2.0f;
+  metadata.minContentBoost = 1.0f / 2.0f;
+
+  EXPECT_EQ(encodeRecovery(1.0f, 2.0f, &metadata), 255);
+  EXPECT_EQ(encodeRecovery(2.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeRecovery(1.0f, 1.41421f, &metadata), 191);
+  EXPECT_EQ(encodeRecovery(1.41421f, 1.0f, &metadata), 63);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f / 8.0f;
+
+  EXPECT_EQ(encodeRecovery(1.0f, 8.0f, &metadata), 255);
+  EXPECT_EQ(encodeRecovery(8.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeRecovery(1.0f, 2.82843f, &metadata), 191);
+  EXPECT_EQ(encodeRecovery(2.82843f, 1.0f, &metadata), 63);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f;
+
+  EXPECT_EQ(encodeRecovery(0.0f, 0.0f, &metadata), 0);
+  EXPECT_EQ(encodeRecovery(1.0f, 0.0f, &metadata), 0);
+
+  EXPECT_EQ(encodeRecovery(1.0f, 1.0f, &metadata), 0);
+  EXPECT_EQ(encodeRecovery(1.0f, 8.0f, &metadata), 255);
+  EXPECT_EQ(encodeRecovery(1.0f, 4.0f, &metadata), 170);
+  EXPECT_EQ(encodeRecovery(1.0f, 2.0f, &metadata), 85);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 0.5f;
+
+  EXPECT_EQ(encodeRecovery(0.0f, 0.0f, &metadata), 63);
+  EXPECT_EQ(encodeRecovery(1.0f, 0.0f, &metadata), 0);
+
+  EXPECT_EQ(encodeRecovery(1.0f, 1.0f, &metadata), 63);
+  EXPECT_EQ(encodeRecovery(1.0f, 8.0f, &metadata), 255);
+  EXPECT_EQ(encodeRecovery(1.0f, 4.0f, &metadata), 191);
+  EXPECT_EQ(encodeRecovery(1.0f, 2.0f, &metadata), 127);
+  EXPECT_EQ(encodeRecovery(1.0f, 0.7071f, &metadata), 31);
+  EXPECT_EQ(encodeRecovery(1.0f, 0.5f, &metadata), 0);
 }
 
 TEST_F(RecoveryMapMathTest, ApplyRecovery) {
-  EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), -1.0f, 4.0f), RgbBlack());
-  EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), 0.0f, 4.0f), RgbBlack());
-  EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), 1.0f, 4.0f), RgbBlack());
+  jpegr_metadata metadata = { .maxContentBoost = 4.0f,
+                              .minContentBoost = 1.0f / 4.0f };
 
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), -1.0f, 4.0f), RgbWhite() / 4.0f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), -0.5f, 4.0f), RgbWhite() / 2.0f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, 4.0f), RgbWhite());
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.5f, 4.0f), RgbWhite() * 2.0f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, 4.0f), RgbWhite() * 4.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), 0.0f, &metadata), RgbBlack());
+  EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), 0.5f, &metadata), RgbBlack());
+  EXPECT_RGB_NEAR(applyRecovery(RgbBlack(), 1.0f, &metadata), RgbBlack());
 
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), -1.0f, 2.0f), RgbWhite() / 2.0f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), -0.5f, 2.0f), RgbWhite() / 1.41421f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, 2.0f), RgbWhite());
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.5f, 2.0f), RgbWhite() * 1.41421f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, 2.0f), RgbWhite() * 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, &metadata), RgbWhite() / 4.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.25f, &metadata), RgbWhite() / 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.5f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.75f, &metadata), RgbWhite() * 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, &metadata), RgbWhite() * 4.0f);
 
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), -1.0f, 8.0f), RgbWhite() / 8.0f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), -0.5f, 8.0f), RgbWhite() / 2.82843f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, 8.0f), RgbWhite());
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.5f, 8.0f), RgbWhite() * 2.82843f);
-  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, 8.0f), RgbWhite() * 8.0f);
+  metadata.maxContentBoost = 2.0f;
+  metadata.minContentBoost = 1.0f / 2.0f;
+
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, &metadata), RgbWhite() / 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.25f, &metadata), RgbWhite() / 1.41421f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.5f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.75f, &metadata), RgbWhite() * 1.41421f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, &metadata), RgbWhite() * 2.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f / 8.0f;
+
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, &metadata), RgbWhite() / 8.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.25f, &metadata), RgbWhite() / 2.82843f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.5f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.75f, &metadata), RgbWhite() * 2.82843f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, &metadata), RgbWhite() * 8.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f;
+
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f / 3.0f, &metadata), RgbWhite() * 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 2.0f / 3.0f, &metadata), RgbWhite() * 4.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, &metadata), RgbWhite() * 8.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 0.5f;
+
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.0f, &metadata), RgbWhite() / 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.25f, &metadata), RgbWhite());
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.5f, &metadata), RgbWhite() * 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 0.75f, &metadata), RgbWhite() * 4.0f);
+  EXPECT_RGB_NEAR(applyRecovery(RgbWhite(), 1.0f, &metadata), RgbWhite() * 8.0f);
 
   Color e = {{{ 0.0f, 0.5f, 1.0f }}};
+  metadata.maxContentBoost = 4.0f;
+  metadata.minContentBoost = 1.0f / 4.0f;
 
-  EXPECT_RGB_NEAR(applyRecovery(e, -1.0f, 4.0f), e / 4.0f);
-  EXPECT_RGB_NEAR(applyRecovery(e, -0.5f, 4.0f), e / 2.0f);
-  EXPECT_RGB_NEAR(applyRecovery(e, 0.0f, 4.0f), e);
-  EXPECT_RGB_NEAR(applyRecovery(e, 0.5f, 4.0f), e * 2.0f);
-  EXPECT_RGB_NEAR(applyRecovery(e, 1.0f, 4.0f), e * 4.0f);
+  EXPECT_RGB_NEAR(applyRecovery(e, 0.0f, &metadata), e / 4.0f);
+  EXPECT_RGB_NEAR(applyRecovery(e, 0.25f, &metadata), e / 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(e, 0.5f, &metadata), e);
+  EXPECT_RGB_NEAR(applyRecovery(e, 0.75f, &metadata), e * 2.0f);
+  EXPECT_RGB_NEAR(applyRecovery(e, 1.0f, &metadata), e * 4.0f);
 }
 
 TEST_F(RecoveryMapMathTest, GetYuv420Pixel) {
@@ -785,8 +882,10 @@
       // Instead of reimplementing the sampling algorithm, confirm that the
       // sample output is within the range of the min and max of the nearest
       // points.
-      EXPECT_THAT(sampleMap(&image, kMapScaleFactor, x, y, idwTable),
+      EXPECT_THAT(sampleMap(&image, kMapScaleFactor, x, y),
                   testing::AllOf(testing::Ge(min), testing::Le(max)));
+      EXPECT_EQ(sampleMap(&image, kMapScaleFactor, x, y, idwTable),
+                sampleMap(&image, kMapScaleFactor, x, y));
     }
   }
 }
@@ -882,60 +981,89 @@
 }
 
 TEST_F(RecoveryMapMathTest, ApplyMap) {
-  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f, 8.0f),
+  jpegr_metadata metadata = { .maxContentBoost = 8.0f,
+                              .minContentBoost = 1.0f / 8.0f };
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f, &metadata),
                 RgbWhite() * 8.0f);
-  EXPECT_RGB_EQ(Recover(YuvBlack(), 1.0f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 1.0f, &metadata),
                 RgbBlack());
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 1.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 1.0f, &metadata),
                   RgbRed() * 8.0f);
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 1.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 1.0f, &metadata),
                   RgbGreen() * 8.0f);
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 1.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 1.0f, &metadata),
                   RgbBlue() * 8.0f);
 
-  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.5f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.75f, &metadata),
                 RgbWhite() * sqrt(8.0f));
-  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.5f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.75f, &metadata),
                 RgbBlack());
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.5f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.75f, &metadata),
                   RgbRed() * sqrt(8.0f));
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.5f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.75f, &metadata),
                   RgbGreen() * sqrt(8.0f));
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.5f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.75f, &metadata),
                   RgbBlue() * sqrt(8.0f));
 
-  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.0f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.5f, &metadata),
                 RgbWhite());
-  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.0f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.5f, &metadata),
                 RgbBlack());
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.5f, &metadata),
                   RgbRed());
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.5f, &metadata),
                   RgbGreen());
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.5f, &metadata),
                   RgbBlue());
 
-  EXPECT_RGB_EQ(Recover(YuvWhite(), -0.5f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.25f, &metadata),
                 RgbWhite() / sqrt(8.0f));
-  EXPECT_RGB_EQ(Recover(YuvBlack(), -0.5f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.25f, &metadata),
                 RgbBlack());
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), -0.5f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.25f, &metadata),
                   RgbRed() / sqrt(8.0f));
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), -0.5f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.25f, &metadata),
                   RgbGreen() / sqrt(8.0f));
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), -0.5f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.25f, &metadata),
                   RgbBlue() / sqrt(8.0f));
 
-  EXPECT_RGB_EQ(Recover(YuvWhite(), -1.0f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.0f, &metadata),
                 RgbWhite() / 8.0f);
-  EXPECT_RGB_EQ(Recover(YuvBlack(), -1.0f, 8.0f),
+  EXPECT_RGB_EQ(Recover(YuvBlack(), 0.0f, &metadata),
                 RgbBlack());
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), -1.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvRed(), 0.0f, &metadata),
                   RgbRed() / 8.0f);
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), -1.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvGreen(), 0.0f, &metadata),
                   RgbGreen() / 8.0f);
-  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), -1.0f, 8.0f),
+  EXPECT_RGB_CLOSE(Recover(SrgbYuvBlue(), 0.0f, &metadata),
                   RgbBlue() / 8.0f);
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 1.0f;
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f, &metadata),
+                RgbWhite() * 8.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 2.0f / 3.0f, &metadata),
+                RgbWhite() * 4.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f / 3.0f, &metadata),
+                RgbWhite() * 2.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.0f, &metadata),
+                RgbWhite());
+
+  metadata.maxContentBoost = 8.0f;
+  metadata.minContentBoost = 0.5f;;
+
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 1.0f, &metadata),
+                RgbWhite() * 8.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.75, &metadata),
+                RgbWhite() * 4.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.5f, &metadata),
+                RgbWhite() * 2.0f);
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.25f, &metadata),
+                RgbWhite());
+  EXPECT_RGB_EQ(Recover(YuvWhite(), 0.0f, &metadata),
+                RgbWhite() / 2.0f);
 }
 
-} // namespace android::recoverymap
+} // namespace android::jpegrecoverymap
diff --git a/opengl/libs/Android.bp b/opengl/libs/Android.bp
index 750338b..49e1cba 100644
--- a/opengl/libs/Android.bp
+++ b/opengl/libs/Android.bp
@@ -144,6 +144,7 @@
     srcs: [
         "EGL/BlobCache.cpp",
         "EGL/FileBlobCache.cpp",
+        "EGL/MultifileBlobCache.cpp",
     ],
     export_include_dirs: ["EGL"],
 }
@@ -160,7 +161,6 @@
     srcs: [
         "EGL/egl_tls.cpp",
         "EGL/egl_cache.cpp",
-        "EGL/egl_cache_multifile.cpp",
         "EGL/egl_display.cpp",
         "EGL/egl_object.cpp",
         "EGL/egl_layers.cpp",
@@ -205,6 +205,11 @@
     srcs: [
         "EGL/BlobCache.cpp",
         "EGL/BlobCache_test.cpp",
+        "EGL/MultifileBlobCache.cpp",
+        "EGL/MultifileBlobCache_test.cpp",
+    ],
+    shared_libs: [
+        "libutils",
     ],
 }
 
diff --git a/opengl/libs/EGL/MultifileBlobCache.cpp b/opengl/libs/EGL/MultifileBlobCache.cpp
new file mode 100644
index 0000000..99af299
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.cpp
@@ -0,0 +1,689 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ **     http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+
+#include "MultifileBlobCache.h"
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <log/log.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+#include <utime.h>
+
+#include <algorithm>
+#include <chrono>
+#include <limits>
+#include <locale>
+
+#include <utils/JenkinsHash.h>
+
+using namespace std::literals;
+
+namespace {
+
+// Open the file and determine the size of the value it contains
+size_t getValueSizeFromFile(int fd, const std::string& entryPath) {
+    // Read the beginning of the file to get header
+    android::MultifileHeader header;
+    size_t result = read(fd, static_cast<void*>(&header), sizeof(android::MultifileHeader));
+    if (result != sizeof(android::MultifileHeader)) {
+        ALOGE("Error reading MultifileHeader from cache entry (%s): %s", entryPath.c_str(),
+              std::strerror(errno));
+        return 0;
+    }
+
+    return header.valueSize;
+}
+
+// Helper function to close entries or free them
+void freeHotCacheEntry(android::MultifileHotCache& entry) {
+    if (entry.entryFd != -1) {
+        // If we have an fd, then this entry was added to hot cache via INIT or GET
+        // We need to unmap and close the entry
+        munmap(entry.entryBuffer, entry.entrySize);
+        close(entry.entryFd);
+    } else {
+        // Otherwise, this was added to hot cache during SET, so it was never mapped
+        // and fd was only on the deferred thread.
+        delete[] entry.entryBuffer;
+    }
+}
+
+} // namespace
+
+namespace android {
+
+MultifileBlobCache::MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize,
+                                       const std::string& baseDir)
+      : mInitialized(false),
+        mMaxTotalSize(maxTotalSize),
+        mTotalCacheSize(0),
+        mHotCacheLimit(maxHotCacheSize),
+        mHotCacheSize(0),
+        mWorkerThreadIdle(true) {
+    if (baseDir.empty()) {
+        ALOGV("INIT: no baseDir provided in MultifileBlobCache constructor, returning early.");
+        return;
+    }
+
+    // Establish the name of our multifile directory
+    mMultifileDirName = baseDir + ".multifile";
+
+    // Set a limit for max key and value, ensuring at least one entry can always fit in hot cache
+    mMaxKeySize = mHotCacheLimit / 4;
+    mMaxValueSize = mHotCacheLimit / 2;
+
+    ALOGV("INIT: Initializing multifile blobcache with maxKeySize=%zu and maxValueSize=%zu",
+          mMaxKeySize, mMaxValueSize);
+
+    // Initialize our cache with the contents of the directory
+    mTotalCacheSize = 0;
+
+    // Create the worker thread
+    mTaskThread = std::thread(&MultifileBlobCache::processTasks, this);
+
+    // See if the dir exists, and initialize using its contents
+    struct stat st;
+    if (stat(mMultifileDirName.c_str(), &st) == 0) {
+        // Read all the files and gather details, then preload their contents
+        DIR* dir;
+        struct dirent* entry;
+        if ((dir = opendir(mMultifileDirName.c_str())) != nullptr) {
+            while ((entry = readdir(dir)) != nullptr) {
+                if (entry->d_name == "."s || entry->d_name == ".."s) {
+                    continue;
+                }
+
+                std::string entryName = entry->d_name;
+                std::string fullPath = mMultifileDirName + "/" + entryName;
+
+                // The filename is the same as the entryHash
+                uint32_t entryHash = static_cast<uint32_t>(strtoul(entry->d_name, nullptr, 10));
+
+                ALOGV("INIT: Checking entry %u", entryHash);
+
+                // Look up the details of the file
+                struct stat st;
+                if (stat(fullPath.c_str(), &st) != 0) {
+                    ALOGE("Failed to stat %s", fullPath.c_str());
+                    return;
+                }
+
+                // Open the file so we can read its header
+                int fd = open(fullPath.c_str(), O_RDONLY);
+                if (fd == -1) {
+                    ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+                          std::strerror(errno));
+                    return;
+                }
+
+                // Look up the details we track about each file
+                size_t valueSize = getValueSizeFromFile(fd, fullPath);
+
+                // If the cache entry is damaged or no good, remove it
+                // TODO: Perform any other checks
+                if (valueSize <= 0 || st.st_size <= 0 || st.st_atime <= 0) {
+                    ALOGV("INIT: Entry %u has a problem! Removing.", entryHash);
+                    if (remove(fullPath.c_str()) != 0) {
+                        ALOGE("Error removing %s: %s", fullPath.c_str(), std::strerror(errno));
+                    }
+                    continue;
+                }
+
+                ALOGV("INIT: Entry %u is good, tracking it now.", entryHash);
+
+                // Note: Converting from off_t (signed) to size_t (unsigned)
+                size_t fileSize = static_cast<size_t>(st.st_size);
+                time_t accessTime = st.st_atime;
+
+                // Track details for rapid lookup later
+                trackEntry(entryHash, valueSize, fileSize, accessTime);
+
+                // Track the total size
+                increaseTotalCacheSize(fileSize);
+
+                // Preload the entry for fast retrieval
+                if ((mHotCacheSize + fileSize) < mHotCacheLimit) {
+                    // Memory map the file
+                    uint8_t* mappedEntry = reinterpret_cast<uint8_t*>(
+                            mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+                    if (mappedEntry == MAP_FAILED) {
+                        ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+                    }
+
+                    ALOGV("INIT: Populating hot cache with fd = %i, cacheEntry = %p for "
+                          "entryHash %u",
+                          fd, mappedEntry, entryHash);
+
+                    // Track the details of the preload so they can be retrieved later
+                    if (!addToHotCache(entryHash, fd, mappedEntry, fileSize)) {
+                        ALOGE("INIT Failed to add %u to hot cache", entryHash);
+                        munmap(mappedEntry, fileSize);
+                        close(fd);
+                        return;
+                    }
+                } else {
+                    close(fd);
+                }
+            }
+            closedir(dir);
+        } else {
+            ALOGE("Unable to open filename: %s", mMultifileDirName.c_str());
+        }
+    } else {
+        // If the multifile directory does not exist, create it and start from scratch
+        if (mkdir(mMultifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
+            ALOGE("Unable to create directory (%s), errno (%i)", mMultifileDirName.c_str(), errno);
+        }
+    }
+
+    mInitialized = true;
+}
+
+MultifileBlobCache::~MultifileBlobCache() {
+    if (!mInitialized) {
+        return;
+    }
+
+    // Inform the worker thread we're done
+    ALOGV("DESCTRUCTOR: Shutting down worker thread");
+    DeferredTask task(TaskCommand::Exit);
+    queueTask(std::move(task));
+
+    // Wait for it to complete
+    ALOGV("DESCTRUCTOR: Waiting for worker thread to complete");
+    waitForWorkComplete();
+    if (mTaskThread.joinable()) {
+        mTaskThread.join();
+    }
+}
+
+// Set will add the entry to hot cache and start a deferred process to write it to disk
+void MultifileBlobCache::set(const void* key, EGLsizeiANDROID keySize, const void* value,
+                             EGLsizeiANDROID valueSize) {
+    if (!mInitialized) {
+        return;
+    }
+
+    // Ensure key and value are under their limits
+    if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+        ALOGV("SET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+              valueSize, mMaxValueSize);
+        return;
+    }
+
+    // Generate a hash of the key and use it to track this entry
+    uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+    size_t fileSize = sizeof(MultifileHeader) + keySize + valueSize;
+
+    // If we're going to be over the cache limit, kick off a trim to clear space
+    if (getTotalSize() + fileSize > mMaxTotalSize) {
+        ALOGV("SET: Cache is full, calling trimCache to clear space");
+        trimCache(mMaxTotalSize);
+    }
+
+    ALOGV("SET: Add %u to cache", entryHash);
+
+    uint8_t* buffer = new uint8_t[fileSize];
+
+    // Write the key and value after the header
+    android::MultifileHeader header = {keySize, valueSize};
+    memcpy(static_cast<void*>(buffer), static_cast<const void*>(&header),
+           sizeof(android::MultifileHeader));
+    memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader)), static_cast<const void*>(key),
+           keySize);
+    memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader) + keySize),
+           static_cast<const void*>(value), valueSize);
+
+    std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+    // Track the size and access time for quick recall
+    trackEntry(entryHash, valueSize, fileSize, time(0));
+
+    // Update the overall cache size
+    increaseTotalCacheSize(fileSize);
+
+    // Keep the entry in hot cache for quick retrieval
+    ALOGV("SET: Adding %u to hot cache.", entryHash);
+
+    // Sending -1 as the fd indicates we don't have an fd for this
+    if (!addToHotCache(entryHash, -1, buffer, fileSize)) {
+        ALOGE("GET: Failed to add %u to hot cache", entryHash);
+        return;
+    }
+
+    // Track that we're creating a pending write for this entry
+    // Include the buffer to handle the case when multiple writes are pending for an entry
+    mDeferredWrites.insert(std::make_pair(entryHash, buffer));
+
+    // Create deferred task to write to storage
+    ALOGV("SET: Adding task to queue.");
+    DeferredTask task(TaskCommand::WriteToDisk);
+    task.initWriteToDisk(entryHash, fullPath, buffer, fileSize);
+    queueTask(std::move(task));
+}
+
+// Get will check the hot cache, then load it from disk if needed
+EGLsizeiANDROID MultifileBlobCache::get(const void* key, EGLsizeiANDROID keySize, void* value,
+                                        EGLsizeiANDROID valueSize) {
+    if (!mInitialized) {
+        return 0;
+    }
+
+    // Ensure key and value are under their limits
+    if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+        ALOGV("GET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+              valueSize, mMaxValueSize);
+        return 0;
+    }
+
+    // Generate a hash of the key and use it to track this entry
+    uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+    // See if we have this file
+    if (!contains(entryHash)) {
+        ALOGV("GET: Cache MISS - cache does not contain entry: %u", entryHash);
+        return 0;
+    }
+
+    // Look up the data for this entry
+    MultifileEntryStats entryStats = getEntryStats(entryHash);
+
+    size_t cachedValueSize = entryStats.valueSize;
+    if (cachedValueSize > valueSize) {
+        ALOGV("GET: Cache MISS - valueSize not large enough (%lu) for entry %u, returning required"
+              "size (%zu)",
+              valueSize, entryHash, cachedValueSize);
+        return cachedValueSize;
+    }
+
+    // We have the file and have enough room to write it out, return the entry
+    ALOGV("GET: Cache HIT - cache contains entry: %u", entryHash);
+
+    // Look up the size of the file
+    size_t fileSize = entryStats.fileSize;
+    if (keySize > fileSize) {
+        ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
+              "file",
+              keySize, fileSize);
+        return 0;
+    }
+
+    std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+    // Open the hashed filename path
+    uint8_t* cacheEntry = 0;
+
+    // Check hot cache
+    if (mHotCache.find(entryHash) != mHotCache.end()) {
+        ALOGV("GET: HotCache HIT for entry %u", entryHash);
+        cacheEntry = mHotCache[entryHash].entryBuffer;
+    } else {
+        ALOGV("GET: HotCache MISS for entry: %u", entryHash);
+
+        if (mDeferredWrites.find(entryHash) != mDeferredWrites.end()) {
+            // Wait for writes to complete if there is an outstanding write for this entry
+            ALOGV("GET: Waiting for write to complete for %u", entryHash);
+            waitForWorkComplete();
+        }
+
+        // Open the entry file
+        int fd = open(fullPath.c_str(), O_RDONLY);
+        if (fd == -1) {
+            ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+                  std::strerror(errno));
+            return 0;
+        }
+
+        // Memory map the file
+        cacheEntry =
+                reinterpret_cast<uint8_t*>(mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+        if (cacheEntry == MAP_FAILED) {
+            ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+            close(fd);
+            return 0;
+        }
+
+        ALOGV("GET: Adding %u to hot cache", entryHash);
+        if (!addToHotCache(entryHash, fd, cacheEntry, fileSize)) {
+            ALOGE("GET: Failed to add %u to hot cache", entryHash);
+            return 0;
+        }
+
+        cacheEntry = mHotCache[entryHash].entryBuffer;
+    }
+
+    // Ensure the header matches
+    MultifileHeader* header = reinterpret_cast<MultifileHeader*>(cacheEntry);
+    if (header->keySize != keySize || header->valueSize != valueSize) {
+        ALOGW("Mismatch on keySize(%ld vs. cached %ld) or valueSize(%ld vs. cached %ld) compared "
+              "to cache header values for fullPath: %s",
+              keySize, header->keySize, valueSize, header->valueSize, fullPath.c_str());
+        removeFromHotCache(entryHash);
+        return 0;
+    }
+
+    // Compare the incoming key with our stored version (the beginning of the entry)
+    uint8_t* cachedKey = cacheEntry + sizeof(MultifileHeader);
+    int compare = memcmp(cachedKey, key, keySize);
+    if (compare != 0) {
+        ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
+        removeFromHotCache(entryHash);
+        return 0;
+    }
+
+    // Remaining entry following the key is the value
+    uint8_t* cachedValue = cacheEntry + (keySize + sizeof(MultifileHeader));
+    memcpy(value, cachedValue, cachedValueSize);
+
+    return cachedValueSize;
+}
+
+void MultifileBlobCache::finish() {
+    if (!mInitialized) {
+        return;
+    }
+
+    // Wait for all deferred writes to complete
+    ALOGV("FINISH: Waiting for work to complete.");
+    waitForWorkComplete();
+
+    // Close all entries in the hot cache
+    for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+        uint32_t entryHash = hotCacheIter->first;
+        MultifileHotCache entry = hotCacheIter->second;
+
+        ALOGV("FINISH: Closing hot cache entry for %u", entryHash);
+        freeHotCacheEntry(entry);
+
+        mHotCache.erase(hotCacheIter++);
+    }
+}
+
+void MultifileBlobCache::trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+                                    time_t accessTime) {
+    mEntries.insert(entryHash);
+    mEntryStats[entryHash] = {valueSize, fileSize, accessTime};
+}
+
+bool MultifileBlobCache::contains(uint32_t hashEntry) const {
+    return mEntries.find(hashEntry) != mEntries.end();
+}
+
+MultifileEntryStats MultifileBlobCache::getEntryStats(uint32_t entryHash) {
+    return mEntryStats[entryHash];
+}
+
+void MultifileBlobCache::increaseTotalCacheSize(size_t fileSize) {
+    mTotalCacheSize += fileSize;
+}
+
+void MultifileBlobCache::decreaseTotalCacheSize(size_t fileSize) {
+    mTotalCacheSize -= fileSize;
+}
+
+bool MultifileBlobCache::addToHotCache(uint32_t newEntryHash, int newFd, uint8_t* newEntryBuffer,
+                                       size_t newEntrySize) {
+    ALOGV("HOTCACHE(ADD): Adding %u to hot cache", newEntryHash);
+
+    // Clear space if we need to
+    if ((mHotCacheSize + newEntrySize) > mHotCacheLimit) {
+        ALOGV("HOTCACHE(ADD): mHotCacheSize (%zu) + newEntrySize (%zu) is to big for "
+              "mHotCacheLimit "
+              "(%zu), freeing up space for %u",
+              mHotCacheSize, newEntrySize, mHotCacheLimit, newEntryHash);
+
+        // Wait for all the files to complete writing so our hot cache is accurate
+        waitForWorkComplete();
+
+        // Free up old entries until under the limit
+        for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+            uint32_t oldEntryHash = hotCacheIter->first;
+            MultifileHotCache oldEntry = hotCacheIter->second;
+
+            // Move our iterator before deleting the entry
+            hotCacheIter++;
+            if (!removeFromHotCache(oldEntryHash)) {
+                ALOGE("HOTCACHE(ADD): Unable to remove entry %u", oldEntryHash);
+                return false;
+            }
+
+            // Clear at least half the hot cache
+            if ((mHotCacheSize + newEntrySize) <= mHotCacheLimit / 2) {
+                ALOGV("HOTCACHE(ADD): Freed enough space for %zu", mHotCacheSize);
+                break;
+            }
+        }
+    }
+
+    // Track it
+    mHotCache[newEntryHash] = {newFd, newEntryBuffer, newEntrySize};
+    mHotCacheSize += newEntrySize;
+
+    ALOGV("HOTCACHE(ADD): New hot cache size: %zu", mHotCacheSize);
+
+    return true;
+}
+
+bool MultifileBlobCache::removeFromHotCache(uint32_t entryHash) {
+    if (mHotCache.find(entryHash) != mHotCache.end()) {
+        ALOGV("HOTCACHE(REMOVE): Removing %u from hot cache", entryHash);
+
+        // Wait for all the files to complete writing so our hot cache is accurate
+        waitForWorkComplete();
+
+        ALOGV("HOTCACHE(REMOVE): Closing hot cache entry for %u", entryHash);
+        MultifileHotCache entry = mHotCache[entryHash];
+        freeHotCacheEntry(entry);
+
+        // Delete the entry from our tracking
+        mHotCacheSize -= entry.entrySize;
+        mHotCache.erase(entryHash);
+
+        return true;
+    }
+
+    return false;
+}
+
+bool MultifileBlobCache::applyLRU(size_t cacheLimit) {
+    // Walk through our map of sorted last access times and remove files until under the limit
+    for (auto cacheEntryIter = mEntryStats.begin(); cacheEntryIter != mEntryStats.end();) {
+        uint32_t entryHash = cacheEntryIter->first;
+
+        ALOGV("LRU: Removing entryHash %u", entryHash);
+
+        // Track the overall size
+        MultifileEntryStats entryStats = getEntryStats(entryHash);
+        decreaseTotalCacheSize(entryStats.fileSize);
+
+        // Remove it from hot cache if present
+        removeFromHotCache(entryHash);
+
+        // Remove it from the system
+        std::string entryPath = mMultifileDirName + "/" + std::to_string(entryHash);
+        if (remove(entryPath.c_str()) != 0) {
+            ALOGE("LRU: Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
+            return false;
+        }
+
+        // Increment the iterator before clearing the entry
+        cacheEntryIter++;
+
+        // Delete the entry from our tracking
+        size_t count = mEntryStats.erase(entryHash);
+        if (count != 1) {
+            ALOGE("LRU: Failed to remove entryHash (%u) from mEntryStats", entryHash);
+            return false;
+        }
+
+        // See if it has been reduced enough
+        size_t totalCacheSize = getTotalSize();
+        if (totalCacheSize <= cacheLimit) {
+            // Success
+            ALOGV("LRU: Reduced cache to %zu", totalCacheSize);
+            return true;
+        }
+    }
+
+    ALOGV("LRU: Cache is emptry");
+    return false;
+}
+
+// When removing files, what fraction of the overall limit should be reached when removing files
+// A divisor of two will decrease the cache to 50%, four to 25% and so on
+constexpr uint32_t kCacheLimitDivisor = 2;
+
+// Calculate the cache size and remove old entries until under the limit
+void MultifileBlobCache::trimCache(size_t cacheByteLimit) {
+    // Start with the value provided by egl_cache
+    size_t limit = cacheByteLimit;
+
+    // Wait for all deferred writes to complete
+    waitForWorkComplete();
+
+    size_t size = getTotalSize();
+
+    // If size is larger than the threshold, remove files using LRU
+    if (size > limit) {
+        ALOGV("TRIM: Multifile cache size is larger than %zu, removing old entries",
+              cacheByteLimit);
+        if (!applyLRU(limit / kCacheLimitDivisor)) {
+            ALOGE("Error when clearing multifile shader cache");
+            return;
+        }
+    }
+}
+
+// This function performs a task.  It only knows how to write files to disk,
+// but it could be expanded if needed.
+void MultifileBlobCache::processTask(DeferredTask& task) {
+    switch (task.getTaskCommand()) {
+        case TaskCommand::Exit: {
+            ALOGV("DEFERRED: Shutting down");
+            return;
+        }
+        case TaskCommand::WriteToDisk: {
+            uint32_t entryHash = task.getEntryHash();
+            std::string& fullPath = task.getFullPath();
+            uint8_t* buffer = task.getBuffer();
+            size_t bufferSize = task.getBufferSize();
+
+            // Create the file or reset it if already present, read+write for user only
+            int fd = open(fullPath.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+            if (fd == -1) {
+                ALOGE("Cache error in SET - failed to open fullPath: %s, error: %s",
+                      fullPath.c_str(), std::strerror(errno));
+                return;
+            }
+
+            ALOGV("DEFERRED: Opened fd %i from %s", fd, fullPath.c_str());
+
+            ssize_t result = write(fd, buffer, bufferSize);
+            if (result != bufferSize) {
+                ALOGE("Error writing fileSize to cache entry (%s): %s", fullPath.c_str(),
+                      std::strerror(errno));
+                return;
+            }
+
+            ALOGV("DEFERRED: Completed write for: %s", fullPath.c_str());
+            close(fd);
+
+            // Erase the entry from mDeferredWrites
+            // Since there could be multiple outstanding writes for an entry, find the matching one
+            typedef std::multimap<uint32_t, uint8_t*>::iterator entryIter;
+            std::pair<entryIter, entryIter> iterPair = mDeferredWrites.equal_range(entryHash);
+            for (entryIter it = iterPair.first; it != iterPair.second; ++it) {
+                if (it->second == buffer) {
+                    ALOGV("DEFERRED: Marking write complete for %u at %p", it->first, it->second);
+                    mDeferredWrites.erase(it);
+                    break;
+                }
+            }
+
+            return;
+        }
+        default: {
+            ALOGE("DEFERRED: Unhandled task type");
+            return;
+        }
+    }
+}
+
+// This function will wait until tasks arrive, then execute them
+// If the exit command is submitted, the loop will terminate
+void MultifileBlobCache::processTasksImpl(bool* exitThread) {
+    while (true) {
+        std::unique_lock<std::mutex> lock(mWorkerMutex);
+        if (mTasks.empty()) {
+            ALOGV("WORKER: No tasks available, waiting");
+            mWorkerThreadIdle = true;
+            mWorkerIdleCondition.notify_all();
+            // Only wake if notified and command queue is not empty
+            mWorkAvailableCondition.wait(lock, [this] { return !mTasks.empty(); });
+        }
+
+        ALOGV("WORKER: Task available, waking up.");
+        mWorkerThreadIdle = false;
+        DeferredTask task = std::move(mTasks.front());
+        mTasks.pop();
+
+        if (task.getTaskCommand() == TaskCommand::Exit) {
+            ALOGV("WORKER: Exiting work loop.");
+            *exitThread = true;
+            mWorkerThreadIdle = true;
+            mWorkerIdleCondition.notify_one();
+            return;
+        }
+
+        lock.unlock();
+        processTask(task);
+    }
+}
+
+// Process tasks until the exit task is submitted
+void MultifileBlobCache::processTasks() {
+    while (true) {
+        bool exitThread = false;
+        processTasksImpl(&exitThread);
+        if (exitThread) {
+            break;
+        }
+    }
+}
+
+// Add a task to the queue to be processed by the worker thread
+void MultifileBlobCache::queueTask(DeferredTask&& task) {
+    std::lock_guard<std::mutex> queueLock(mWorkerMutex);
+    mTasks.emplace(std::move(task));
+    mWorkAvailableCondition.notify_one();
+}
+
+// Wait until all tasks have been completed
+void MultifileBlobCache::waitForWorkComplete() {
+    std::unique_lock<std::mutex> lock(mWorkerMutex);
+    mWorkerIdleCondition.wait(lock, [this] { return (mTasks.empty() && mWorkerThreadIdle); });
+}
+
+}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/MultifileBlobCache.h b/opengl/libs/EGL/MultifileBlobCache.h
new file mode 100644
index 0000000..c0cc9dc
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.h
@@ -0,0 +1,167 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ **     http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#ifndef ANDROID_MULTIFILE_BLOB_CACHE_H
+#define ANDROID_MULTIFILE_BLOB_CACHE_H
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include <future>
+#include <map>
+#include <queue>
+#include <string>
+#include <thread>
+#include <unordered_map>
+#include <unordered_set>
+
+namespace android {
+
+struct MultifileHeader {
+    EGLsizeiANDROID keySize;
+    EGLsizeiANDROID valueSize;
+};
+
+struct MultifileEntryStats {
+    EGLsizeiANDROID valueSize;
+    size_t fileSize;
+    time_t accessTime;
+};
+
+struct MultifileHotCache {
+    int entryFd;
+    uint8_t* entryBuffer;
+    size_t entrySize;
+};
+
+enum class TaskCommand {
+    Invalid = 0,
+    WriteToDisk,
+    Exit,
+};
+
+class DeferredTask {
+public:
+    DeferredTask(TaskCommand command)
+          : mCommand(command), mEntryHash(0), mBuffer(nullptr), mBufferSize(0) {}
+
+    TaskCommand getTaskCommand() { return mCommand; }
+
+    void initWriteToDisk(uint32_t entryHash, std::string fullPath, uint8_t* buffer,
+                         size_t bufferSize) {
+        mCommand = TaskCommand::WriteToDisk;
+        mEntryHash = entryHash;
+        mFullPath = std::move(fullPath);
+        mBuffer = buffer;
+        mBufferSize = bufferSize;
+    }
+
+    uint32_t getEntryHash() { return mEntryHash; }
+    std::string& getFullPath() { return mFullPath; }
+    uint8_t* getBuffer() { return mBuffer; }
+    size_t getBufferSize() { return mBufferSize; };
+
+private:
+    TaskCommand mCommand;
+
+    // Parameters for WriteToDisk
+    uint32_t mEntryHash;
+    std::string mFullPath;
+    uint8_t* mBuffer;
+    size_t mBufferSize;
+};
+
+class MultifileBlobCache {
+public:
+    MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize, const std::string& baseDir);
+    ~MultifileBlobCache();
+
+    void set(const void* key, EGLsizeiANDROID keySize, const void* value,
+             EGLsizeiANDROID valueSize);
+    EGLsizeiANDROID get(const void* key, EGLsizeiANDROID keySize, void* value,
+                        EGLsizeiANDROID valueSize);
+
+    void finish();
+
+    size_t getTotalSize() const { return mTotalCacheSize; }
+
+private:
+    void trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+                    time_t accessTime);
+    bool contains(uint32_t entryHash) const;
+    bool removeEntry(uint32_t entryHash);
+    MultifileEntryStats getEntryStats(uint32_t entryHash);
+
+    size_t getFileSize(uint32_t entryHash);
+    size_t getValueSize(uint32_t entryHash);
+
+    void increaseTotalCacheSize(size_t fileSize);
+    void decreaseTotalCacheSize(size_t fileSize);
+
+    bool addToHotCache(uint32_t entryHash, int fd, uint8_t* entryBufer, size_t entrySize);
+    bool removeFromHotCache(uint32_t entryHash);
+
+    void trimCache(size_t cacheByteLimit);
+    bool applyLRU(size_t cacheLimit);
+
+    bool mInitialized;
+    std::string mMultifileDirName;
+
+    std::unordered_set<uint32_t> mEntries;
+    std::unordered_map<uint32_t, MultifileEntryStats> mEntryStats;
+    std::unordered_map<uint32_t, MultifileHotCache> mHotCache;
+
+    size_t mMaxKeySize;
+    size_t mMaxValueSize;
+    size_t mMaxTotalSize;
+    size_t mTotalCacheSize;
+    size_t mHotCacheLimit;
+    size_t mHotCacheEntryLimit;
+    size_t mHotCacheSize;
+
+    // Below are the components used for deferred writes
+
+    // Track whether we have pending writes for an entry
+    std::multimap<uint32_t, uint8_t*> mDeferredWrites;
+
+    // Functions to work through tasks in the queue
+    void processTasks();
+    void processTasksImpl(bool* exitThread);
+    void processTask(DeferredTask& task);
+
+    // Used by main thread to create work for the worker thread
+    void queueTask(DeferredTask&& task);
+
+    // Used by main thread to wait for worker thread to complete all outstanding work.
+    void waitForWorkComplete();
+
+    std::thread mTaskThread;
+    std::queue<DeferredTask> mTasks;
+    std::mutex mWorkerMutex;
+
+    // This condition will block the worker thread until a task is queued
+    std::condition_variable mWorkAvailableCondition;
+
+    // This condition will block the main thread while the worker thread still has tasks
+    std::condition_variable mWorkerIdleCondition;
+
+    // This bool will track whether all tasks have been completed
+    bool mWorkerThreadIdle;
+};
+
+}; // namespace android
+
+#endif // ANDROID_MULTIFILE_BLOB_CACHE_H
diff --git a/opengl/libs/EGL/MultifileBlobCache_test.cpp b/opengl/libs/EGL/MultifileBlobCache_test.cpp
new file mode 100644
index 0000000..1a55a4f
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache_test.cpp
@@ -0,0 +1,200 @@
+/*
+ ** Copyright 2023, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ **     http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#include "MultifileBlobCache.h"
+
+#include <android-base/test_utils.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <stdio.h>
+
+#include <memory>
+
+namespace android {
+
+template <typename T>
+using sp = std::shared_ptr<T>;
+
+constexpr size_t kMaxTotalSize = 32 * 1024;
+constexpr size_t kMaxPreloadSize = 8 * 1024;
+
+constexpr size_t kMaxKeySize = kMaxPreloadSize / 4;
+constexpr size_t kMaxValueSize = kMaxPreloadSize / 2;
+
+class MultifileBlobCacheTest : public ::testing::Test {
+protected:
+    virtual void SetUp() {
+        mTempFile.reset(new TemporaryFile());
+        mMBC.reset(new MultifileBlobCache(kMaxTotalSize, kMaxPreloadSize, &mTempFile->path[0]));
+    }
+
+    virtual void TearDown() { mMBC.reset(); }
+
+    std::unique_ptr<TemporaryFile> mTempFile;
+    std::unique_ptr<MultifileBlobCache> mMBC;
+};
+
+TEST_F(MultifileBlobCacheTest, CacheSingleValueSucceeds) {
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+    ASSERT_EQ('e', buf[0]);
+    ASSERT_EQ('f', buf[1]);
+    ASSERT_EQ('g', buf[2]);
+    ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheTwoValuesSucceeds) {
+    unsigned char buf[2] = {0xee, 0xee};
+    mMBC->set("ab", 2, "cd", 2);
+    mMBC->set("ef", 2, "gh", 2);
+    ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+    ASSERT_EQ('c', buf[0]);
+    ASSERT_EQ('d', buf[1]);
+    ASSERT_EQ(size_t(2), mMBC->get("ef", 2, buf, 2));
+    ASSERT_EQ('g', buf[0]);
+    ASSERT_EQ('h', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetSetTwiceSucceeds) {
+    unsigned char buf[2] = {0xee, 0xee};
+    mMBC->set("ab", 2, "cd", 2);
+    ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+    ASSERT_EQ('c', buf[0]);
+    ASSERT_EQ('d', buf[1]);
+    // Use the same key, but different value
+    mMBC->set("ab", 2, "ef", 2);
+    ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+    ASSERT_EQ('e', buf[0]);
+    ASSERT_EQ('f', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesInsideBounds) {
+    unsigned char buf[6] = {0xee, 0xee, 0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf + 1, 4));
+    ASSERT_EQ(0xee, buf[0]);
+    ASSERT_EQ('e', buf[1]);
+    ASSERT_EQ('f', buf[2]);
+    ASSERT_EQ('g', buf[3]);
+    ASSERT_EQ('h', buf[4]);
+    ASSERT_EQ(0xee, buf[5]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesIfBufferIsLargeEnough) {
+    unsigned char buf[3] = {0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 3));
+    ASSERT_EQ(0xee, buf[0]);
+    ASSERT_EQ(0xee, buf[1]);
+    ASSERT_EQ(0xee, buf[2]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetDoesntAccessNullBuffer) {
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, nullptr, 0));
+}
+
+TEST_F(MultifileBlobCacheTest, MultipleSetsCacheLatestValue) {
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    mMBC->set("abcd", 4, "ijkl", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+    ASSERT_EQ('i', buf[0]);
+    ASSERT_EQ('j', buf[1]);
+    ASSERT_EQ('k', buf[2]);
+    ASSERT_EQ('l', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, SecondSetKeepsFirstValueIfTooLarge) {
+    unsigned char buf[kMaxValueSize + 1] = {0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+    ASSERT_EQ('e', buf[0]);
+    ASSERT_EQ('f', buf[1]);
+    ASSERT_EQ('g', buf[2]);
+    ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfKeyIsTooBig) {
+    char key[kMaxKeySize + 1];
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    for (int i = 0; i < kMaxKeySize + 1; i++) {
+        key[i] = 'a';
+    }
+    mMBC->set(key, kMaxKeySize + 1, "bbbb", 4);
+    ASSERT_EQ(size_t(0), mMBC->get(key, kMaxKeySize + 1, buf, 4));
+    ASSERT_EQ(0xee, buf[0]);
+    ASSERT_EQ(0xee, buf[1]);
+    ASSERT_EQ(0xee, buf[2]);
+    ASSERT_EQ(0xee, buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfValueIsTooBig) {
+    char buf[kMaxValueSize + 1];
+    for (int i = 0; i < kMaxValueSize + 1; i++) {
+        buf[i] = 'b';
+    }
+    mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+    for (int i = 0; i < kMaxValueSize + 1; i++) {
+        buf[i] = 0xee;
+    }
+    ASSERT_EQ(size_t(0), mMBC->get("abcd", 4, buf, kMaxValueSize + 1));
+    for (int i = 0; i < kMaxValueSize + 1; i++) {
+        SCOPED_TRACE(i);
+        ASSERT_EQ(0xee, buf[i]);
+    }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxKeySizeSucceeds) {
+    char key[kMaxKeySize];
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    for (int i = 0; i < kMaxKeySize; i++) {
+        key[i] = 'a';
+    }
+    mMBC->set(key, kMaxKeySize, "wxyz", 4);
+    ASSERT_EQ(size_t(4), mMBC->get(key, kMaxKeySize, buf, 4));
+    ASSERT_EQ('w', buf[0]);
+    ASSERT_EQ('x', buf[1]);
+    ASSERT_EQ('y', buf[2]);
+    ASSERT_EQ('z', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxValueSizeSucceeds) {
+    char buf[kMaxValueSize];
+    for (int i = 0; i < kMaxValueSize; i++) {
+        buf[i] = 'b';
+    }
+    mMBC->set("abcd", 4, buf, kMaxValueSize);
+    for (int i = 0; i < kMaxValueSize; i++) {
+        buf[i] = 0xee;
+    }
+    mMBC->get("abcd", 4, buf, kMaxValueSize);
+    for (int i = 0; i < kMaxValueSize; i++) {
+        SCOPED_TRACE(i);
+        ASSERT_EQ('b', buf[i]);
+    }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMinKeyAndValueSizeSucceeds) {
+    unsigned char buf[1] = {0xee};
+    mMBC->set("x", 1, "y", 1);
+    ASSERT_EQ(size_t(1), mMBC->get("x", 1, buf, 1));
+    ASSERT_EQ('y', buf[0]);
+}
+
+} // namespace android
diff --git a/opengl/libs/EGL/egl_cache.cpp b/opengl/libs/EGL/egl_cache.cpp
index 1e8a348..b00ee33 100644
--- a/opengl/libs/EGL/egl_cache.cpp
+++ b/opengl/libs/EGL/egl_cache.cpp
@@ -14,6 +14,8 @@
  ** limitations under the License.
  */
 
+// #define LOG_NDEBUG 0
+
 #include "egl_cache.h"
 
 #include <android-base/properties.h>
@@ -25,22 +27,19 @@
 #include <thread>
 
 #include "../egl_impl.h"
-#include "egl_cache_multifile.h"
 #include "egl_display.h"
 
 // Monolithic cache size limits.
-static const size_t maxKeySize = 12 * 1024;
-static const size_t maxValueSize = 64 * 1024;
-static const size_t maxTotalSize = 32 * 1024 * 1024;
+static const size_t kMaxMonolithicKeySize = 12 * 1024;
+static const size_t kMaxMonolithicValueSize = 64 * 1024;
+static const size_t kMaxMonolithicTotalSize = 2 * 1024 * 1024;
 
 // The time in seconds to wait before saving newly inserted monolithic cache entries.
-static const unsigned int deferredSaveDelay = 4;
+static const unsigned int kDeferredMonolithicSaveDelay = 4;
 
-// Multifile cache size limit
-constexpr size_t kMultifileCacheByteLimit = 64 * 1024 * 1024;
-
-// Delay before cleaning up multifile cache entries
-static const unsigned int deferredMultifileCleanupDelaySeconds = 1;
+// Multifile cache size limits
+constexpr uint32_t kMultifileHotCacheLimit = 8 * 1024 * 1024;
+constexpr uint32_t kMultifileCacheByteLimit = 32 * 1024 * 1024;
 
 namespace android {
 
@@ -68,10 +67,7 @@
 // egl_cache_t definition
 //
 egl_cache_t::egl_cache_t()
-      : mInitialized(false),
-        mMultifileMode(false),
-        mCacheByteLimit(maxTotalSize),
-        mMultifileCleanupPending(false) {}
+      : mInitialized(false), mMultifileMode(false), mCacheByteLimit(kMaxMonolithicTotalSize) {}
 
 egl_cache_t::~egl_cache_t() {}
 
@@ -85,7 +81,7 @@
     std::lock_guard<std::mutex> lock(mMutex);
 
     egl_connection_t* const cnx = &gEGLImpl;
-    if (cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
+    if (display && cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
         const char* exts = display->disp.queryString.extensions;
         size_t bcExtLen = strlen(BC_EXT_STR);
         size_t extsLen = strlen(exts);
@@ -114,14 +110,36 @@
         }
     }
 
-    // Allow forcing monolithic cache for debug purposes
-    if (base::GetProperty("debug.egl.blobcache.multifilemode", "") == "false") {
-        ALOGD("Forcing monolithic cache due to debug.egl.blobcache.multifilemode == \"false\"");
+    // Check the device config to decide whether multifile should be used
+    if (base::GetBoolProperty("ro.egl.blobcache.multifile", false)) {
+        mMultifileMode = true;
+        ALOGV("Using multifile EGL blobcache");
+    }
+
+    // Allow forcing the mode for debug purposes
+    std::string mode = base::GetProperty("debug.egl.blobcache.multifile", "");
+    if (mode == "true") {
+        ALOGV("Forcing multifile cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
+        mMultifileMode = true;
+    } else if (mode == "false") {
+        ALOGV("Forcing monolithic cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
         mMultifileMode = false;
     }
 
     if (mMultifileMode) {
-        mCacheByteLimit = kMultifileCacheByteLimit;
+        mCacheByteLimit = static_cast<size_t>(
+                base::GetUintProperty<uint32_t>("ro.egl.blobcache.multifile_limit",
+                                                kMultifileCacheByteLimit));
+
+        // Check for a debug value
+        int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.multifile_limit", -1);
+        if (debugCacheSize >= 0) {
+            ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.multifile_limit",
+                  mCacheByteLimit, debugCacheSize);
+            mCacheByteLimit = debugCacheSize;
+        }
+
+        ALOGV("Using multifile EGL blobcache limit of %zu bytes", mCacheByteLimit);
     }
 
     mInitialized = true;
@@ -133,10 +151,10 @@
         mBlobCache->writeToFile();
     }
     mBlobCache = nullptr;
-    if (mMultifileMode) {
-        checkMultifileCacheSize(mCacheByteLimit);
+    if (mMultifileBlobCache) {
+        mMultifileBlobCache->finish();
     }
-    mMultifileMode = false;
+    mMultifileBlobCache = nullptr;
     mInitialized = false;
 }
 
@@ -151,20 +169,8 @@
 
     if (mInitialized) {
         if (mMultifileMode) {
-            setBlobMultifile(key, keySize, value, valueSize, mFilename);
-
-            if (!mMultifileCleanupPending) {
-                mMultifileCleanupPending = true;
-                // Kick off a thread to cull cache files below limit
-                std::thread deferredMultifileCleanupThread([this]() {
-                    sleep(deferredMultifileCleanupDelaySeconds);
-                    std::lock_guard<std::mutex> lock(mMutex);
-                    // Check the size of cache and remove entries to stay under limit
-                    checkMultifileCacheSize(mCacheByteLimit);
-                    mMultifileCleanupPending = false;
-                });
-                deferredMultifileCleanupThread.detach();
-            }
+            MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+            mbc->set(key, keySize, value, valueSize);
         } else {
             BlobCache* bc = getBlobCacheLocked();
             bc->set(key, keySize, value, valueSize);
@@ -172,7 +178,7 @@
             if (!mSavePending) {
                 mSavePending = true;
                 std::thread deferredSaveThread([this]() {
-                    sleep(deferredSaveDelay);
+                    sleep(kDeferredMonolithicSaveDelay);
                     std::lock_guard<std::mutex> lock(mMutex);
                     if (mInitialized && mBlobCache) {
                         mBlobCache->writeToFile();
@@ -196,15 +202,21 @@
 
     if (mInitialized) {
         if (mMultifileMode) {
-            return getBlobMultifile(key, keySize, value, valueSize, mFilename);
+            MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+            return mbc->get(key, keySize, value, valueSize);
         } else {
             BlobCache* bc = getBlobCacheLocked();
             return bc->get(key, keySize, value, valueSize);
         }
     }
+
     return 0;
 }
 
+void egl_cache_t::setCacheMode(EGLCacheMode cacheMode) {
+    mMultifileMode = (cacheMode == EGLCacheMode::Multifile);
+}
+
 void egl_cache_t::setCacheFilename(const char* filename) {
     std::lock_guard<std::mutex> lock(mMutex);
     mFilename = filename;
@@ -216,7 +228,7 @@
     if (!mMultifileMode) {
         // If we're not in multifile mode, ensure the cache limit is only being lowered,
         // not increasing above the hard coded platform limit
-        if (cacheByteLimit > maxTotalSize) {
+        if (cacheByteLimit > kMaxMonolithicTotalSize) {
             return;
         }
     }
@@ -226,8 +238,8 @@
 
 size_t egl_cache_t::getCacheSize() {
     std::lock_guard<std::mutex> lock(mMutex);
-    if (mMultifileMode) {
-        return getMultifileCacheSize();
+    if (mMultifileBlobCache) {
+        return mMultifileBlobCache->getTotalSize();
     }
     if (mBlobCache) {
         return mBlobCache->getSize();
@@ -237,9 +249,18 @@
 
 BlobCache* egl_cache_t::getBlobCacheLocked() {
     if (mBlobCache == nullptr) {
-        mBlobCache.reset(new FileBlobCache(maxKeySize, maxValueSize, mCacheByteLimit, mFilename));
+        mBlobCache.reset(new FileBlobCache(kMaxMonolithicKeySize, kMaxMonolithicValueSize,
+                                           mCacheByteLimit, mFilename));
     }
     return mBlobCache.get();
 }
 
+MultifileBlobCache* egl_cache_t::getMultifileBlobCacheLocked() {
+    if (mMultifileBlobCache == nullptr) {
+        mMultifileBlobCache.reset(
+                new MultifileBlobCache(mCacheByteLimit, kMultifileHotCacheLimit, mFilename));
+    }
+    return mMultifileBlobCache.get();
+}
+
 }; // namespace android
diff --git a/opengl/libs/EGL/egl_cache.h b/opengl/libs/EGL/egl_cache.h
index 2dcd803..1399368 100644
--- a/opengl/libs/EGL/egl_cache.h
+++ b/opengl/libs/EGL/egl_cache.h
@@ -25,6 +25,7 @@
 #include <string>
 
 #include "FileBlobCache.h"
+#include "MultifileBlobCache.h"
 
 namespace android {
 
@@ -32,6 +33,11 @@
 
 class EGLAPI egl_cache_t {
 public:
+    enum class EGLCacheMode {
+        Monolithic,
+        Multifile,
+    };
+
     // get returns a pointer to the singleton egl_cache_t object.  This
     // singleton object will never be destroyed.
     static egl_cache_t* get();
@@ -64,6 +70,9 @@
     // cache contents from one program invocation to another.
     void setCacheFilename(const char* filename);
 
+    // Allow setting monolithic or multifile modes
+    void setCacheMode(EGLCacheMode cacheMode);
+
     // Allow the fixed cache limit to be overridden
     void setCacheLimit(int64_t cacheByteLimit);
 
@@ -85,6 +94,9 @@
     // possible.
     BlobCache* getBlobCacheLocked();
 
+    // Get or create the multifile blobcache
+    MultifileBlobCache* getMultifileBlobCacheLocked();
+
     // mInitialized indicates whether the egl_cache_t is in the initialized
     // state.  It is initialized to false at construction time, and gets set to
     // true when initialize is called.  It is set back to false when terminate
@@ -98,6 +110,9 @@
     // first time it's needed.
     std::unique_ptr<FileBlobCache> mBlobCache;
 
+    // The multifile version of blobcache allowing larger contents to be stored
+    std::unique_ptr<MultifileBlobCache> mMultifileBlobCache;
+
     // mFilename is the name of the file for storing cache contents in between
     // program invocations.  It is initialized to an empty string at
     // construction time, and can be set with the setCacheFilename method.  An
@@ -123,11 +138,7 @@
     bool mMultifileMode;
 
     // Cache limit
-    int64_t mCacheByteLimit;
-
-    // Whether we've kicked off a side thread that will check the multifile
-    // cache size and remove entries if needed.
-    bool mMultifileCleanupPending;
+    size_t mCacheByteLimit;
 };
 
 }; // namespace android
diff --git a/opengl/libs/EGL/egl_cache_multifile.cpp b/opengl/libs/EGL/egl_cache_multifile.cpp
deleted file mode 100644
index 48e557f..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- **     http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-
-#include "egl_cache_multifile.h"
-
-#include <android-base/properties.h>
-#include <dirent.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <log/log.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <utime.h>
-
-#include <algorithm>
-#include <chrono>
-#include <fstream>
-#include <limits>
-#include <locale>
-#include <map>
-#include <sstream>
-#include <unordered_map>
-
-#include <utils/JenkinsHash.h>
-
-static std::string multifileDirName = "";
-
-using namespace std::literals;
-
-namespace {
-
-// Create a directory for tracking multiple files
-void setupMultifile(const std::string& baseDir) {
-    // If we've already set up the multifile dir in this base directory, we're done
-    if (!multifileDirName.empty() && multifileDirName.find(baseDir) != std::string::npos) {
-        return;
-    }
-
-    // Otherwise, create it
-    multifileDirName = baseDir + ".multifile";
-    if (mkdir(multifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
-        ALOGW("Unable to create directory (%s), errno (%i)", multifileDirName.c_str(), errno);
-    }
-}
-
-// Create a filename that is based on the hash of the key
-std::string getCacheEntryFilename(const void* key, EGLsizeiANDROID keySize,
-                                  const std::string& baseDir) {
-    // Hash the key into a string
-    std::stringstream keyName;
-    keyName << android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
-
-    // Build a filename using dir and hash
-    return baseDir + "/" + keyName.str();
-}
-
-// Determine file age based on stat modification time
-// Newer files have a higher age (time since epoch)
-time_t getFileAge(const std::string& filePath) {
-    struct stat st;
-    if (stat(filePath.c_str(), &st) == 0) {
-        ALOGD("getFileAge returning %" PRId64 " for file age", static_cast<uint64_t>(st.st_mtime));
-        return st.st_mtime;
-    } else {
-        ALOGW("Failed to stat %s", filePath.c_str());
-        return 0;
-    }
-}
-
-size_t getFileSize(const std::string& filePath) {
-    struct stat st;
-    if (stat(filePath.c_str(), &st) != 0) {
-        ALOGE("Unable to stat %s", filePath.c_str());
-        return 0;
-    }
-    return st.st_size;
-}
-
-// Walk through directory entries and track age and size
-// Then iterate through the entries, oldest first, and remove them until under the limit.
-// This will need to be updated if we move to a multilevel cache dir.
-bool applyLRU(size_t cacheLimit) {
-    // Build a multimap of files indexed by age.
-    // They will be automatically sorted smallest (oldest) to largest (newest)
-    std::multimap<time_t, std::string> agesToFiles;
-
-    // Map files to sizes
-    std::unordered_map<std::string, size_t> filesToSizes;
-
-    size_t totalCacheSize = 0;
-
-    DIR* dir;
-    struct dirent* entry;
-    if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
-        while ((entry = readdir(dir)) != nullptr) {
-            if (entry->d_name == "."s || entry->d_name == ".."s) {
-                continue;
-            }
-
-            // Look up each file age
-            std::string fullPath = multifileDirName + "/" + entry->d_name;
-            time_t fileAge = getFileAge(fullPath);
-
-            // Track the files, sorted by age
-            agesToFiles.insert(std::make_pair(fileAge, fullPath));
-
-            // Also track the size so we know how much room we have freed
-            size_t fileSize = getFileSize(fullPath);
-            filesToSizes[fullPath] = fileSize;
-            totalCacheSize += fileSize;
-        }
-        closedir(dir);
-    } else {
-        ALOGE("Unable to open filename: %s", multifileDirName.c_str());
-        return false;
-    }
-
-    if (totalCacheSize <= cacheLimit) {
-        // If LRU was called on a sufficiently small cache, no need to remove anything
-        return true;
-    }
-
-    // Walk through the map of files until we're under the cache size
-    for (const auto& cacheEntryIter : agesToFiles) {
-        time_t entryAge = cacheEntryIter.first;
-        const std::string entryPath = cacheEntryIter.second;
-
-        ALOGD("Removing %s with age %ld", entryPath.c_str(), entryAge);
-        if (std::remove(entryPath.c_str()) != 0) {
-            ALOGE("Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
-            return false;
-        }
-
-        totalCacheSize -= filesToSizes[entryPath];
-        if (totalCacheSize <= cacheLimit) {
-            // Success
-            ALOGV("Reduced cache to %zu", totalCacheSize);
-            return true;
-        } else {
-            ALOGD("Cache size is still too large (%zu), removing more files", totalCacheSize);
-        }
-    }
-
-    // Should never reach this return
-    return false;
-}
-
-} // namespace
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
-                      EGLsizeiANDROID valueSize, const std::string& baseDir) {
-    if (baseDir.empty()) {
-        return;
-    }
-
-    setupMultifile(baseDir);
-    std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
-    ALOGD("Attempting to open filename for set: %s", filename.c_str());
-    std::ofstream outfile(filename, std::ofstream::binary);
-    if (outfile.fail()) {
-        ALOGW("Unable to open filename: %s", filename.c_str());
-        return;
-    }
-
-    // First write the key
-    outfile.write(static_cast<const char*>(key), keySize);
-    if (outfile.bad()) {
-        ALOGW("Unable to write key to filename: %s", filename.c_str());
-        outfile.close();
-        return;
-    }
-    ALOGD("Wrote %i bytes to out file for key", static_cast<int>(outfile.tellp()));
-
-    // Then write the value
-    outfile.write(static_cast<const char*>(value), valueSize);
-    if (outfile.bad()) {
-        ALOGW("Unable to write value to filename: %s", filename.c_str());
-        outfile.close();
-        return;
-    }
-    ALOGD("Wrote %i bytes to out file for full entry", static_cast<int>(outfile.tellp()));
-
-    outfile.close();
-}
-
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
-                                 EGLsizeiANDROID valueSize, const std::string& baseDir) {
-    if (baseDir.empty()) {
-        return 0;
-    }
-
-    setupMultifile(baseDir);
-    std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
-    // Open the hashed filename path
-    ALOGD("Attempting to open filename for get: %s", filename.c_str());
-    int fd = open(filename.c_str(), O_RDONLY);
-
-    // File doesn't exist, this is a MISS, return zero bytes read
-    if (fd == -1) {
-        ALOGD("Cache MISS - failed to open filename: %s, error: %s", filename.c_str(),
-              std::strerror(errno));
-        return 0;
-    }
-
-    ALOGD("Cache HIT - opened filename: %s", filename.c_str());
-
-    // Get the size of the file
-    size_t entrySize = getFileSize(filename);
-    if (keySize > entrySize) {
-        ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
-              "file",
-              keySize, entrySize);
-        close(fd);
-        return 0;
-    }
-
-    // Memory map the file
-    uint8_t* cacheEntry =
-            reinterpret_cast<uint8_t*>(mmap(nullptr, entrySize, PROT_READ, MAP_PRIVATE, fd, 0));
-    if (cacheEntry == MAP_FAILED) {
-        ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
-        close(fd);
-        return 0;
-    }
-
-    // Compare the incoming key with our stored version (the beginning of the entry)
-    int compare = memcmp(cacheEntry, key, keySize);
-    if (compare != 0) {
-        ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
-        munmap(cacheEntry, entrySize);
-        close(fd);
-        return 0;
-    }
-
-    // Keys matched, so remaining cache is value size
-    size_t cachedValueSize = entrySize - keySize;
-
-    // Return actual value size if valueSize is not large enough
-    if (cachedValueSize > valueSize) {
-        ALOGD("Skipping file read, not enough room provided (valueSize): %lu, "
-              "returning required space as %zu",
-              valueSize, cachedValueSize);
-        munmap(cacheEntry, entrySize);
-        close(fd);
-        return cachedValueSize;
-    }
-
-    // Remaining entry following the key is the value
-    uint8_t* cachedValue = cacheEntry + keySize;
-    memcpy(value, cachedValue, cachedValueSize);
-    munmap(cacheEntry, entrySize);
-    close(fd);
-
-    ALOGD("Read %zu bytes from %s", cachedValueSize, filename.c_str());
-    return cachedValueSize;
-}
-
-// Walk through the files in our flat directory, checking the size of each one.
-// Return the total size of normal files in the directory.
-// This will need to be updated if we move to a multilevel cache dir.
-size_t getMultifileCacheSize() {
-    if (multifileDirName.empty()) {
-        return 0;
-    }
-
-    DIR* dir;
-    struct dirent* entry;
-    size_t size = 0;
-
-    ALOGD("Using %s as the multifile cache dir ", multifileDirName.c_str());
-
-    if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
-        while ((entry = readdir(dir)) != nullptr) {
-            if (entry->d_name == "."s || entry->d_name == ".."s) {
-                continue;
-            }
-
-            // Add up the size of all files in the dir
-            std::string fullPath = multifileDirName + "/" + entry->d_name;
-            size += getFileSize(fullPath);
-        }
-        closedir(dir);
-    } else {
-        ALOGW("Unable to open filename: %s", multifileDirName.c_str());
-        return 0;
-    }
-
-    return size;
-}
-
-// When removing files, what fraction of the overall limit should be reached when removing files
-// A divisor of two will decrease the cache to 50%, four to 25% and so on
-constexpr uint32_t kCacheLimitDivisor = 2;
-
-// Calculate the cache size and remove old entries until under the limit
-void checkMultifileCacheSize(size_t cacheByteLimit) {
-    // Start with the value provided by egl_cache
-    size_t limit = cacheByteLimit;
-
-    // Check for a debug value
-    int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.bytelimit", -1);
-    if (debugCacheSize >= 0) {
-        ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.bytelimit", limit,
-              debugCacheSize);
-        limit = debugCacheSize;
-    }
-
-    // Tally up the initial amount of cache in use
-    size_t size = getMultifileCacheSize();
-    ALOGD("Multifile cache dir size: %zu", size);
-
-    // If size is larger than the threshold, remove files using LRU
-    if (size > limit) {
-        ALOGV("Multifile cache size is larger than %zu, removing old entries", cacheByteLimit);
-        if (!applyLRU(limit / kCacheLimitDivisor)) {
-            ALOGE("Error when clearing multifile shader cache");
-            return;
-        }
-    }
-    ALOGD("Multifile cache size after reduction: %zu", getMultifileCacheSize());
-}
-
-}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/egl_cache_multifile.h b/opengl/libs/EGL/egl_cache_multifile.h
deleted file mode 100644
index ee5fe81..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- **     http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-#ifndef ANDROID_EGL_CACHE_MULTIFILE_H
-#define ANDROID_EGL_CACHE_MULTIFILE_H
-
-#include <EGL/egl.h>
-#include <EGL/eglext.h>
-
-#include <string>
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
-                      EGLsizeiANDROID valueSize, const std::string& baseDir);
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
-                                 EGLsizeiANDROID valueSize, const std::string& baseDir);
-size_t getMultifileCacheSize();
-void checkMultifileCacheSize(size_t cacheByteLimit);
-
-}; // namespace android
-
-#endif // ANDROID_EGL_CACHE_MULTIFILE_H
diff --git a/opengl/tests/EGLTest/egl_cache_test.cpp b/opengl/tests/EGLTest/egl_cache_test.cpp
index 265bec4..2b3e3a4 100644
--- a/opengl/tests/EGLTest/egl_cache_test.cpp
+++ b/opengl/tests/EGLTest/egl_cache_test.cpp
@@ -24,7 +24,7 @@
 #include <android-base/test_utils.h>
 
 #include "egl_cache.h"
-#include "egl_cache_multifile.h"
+#include "MultifileBlobCache.h"
 #include "egl_display.h"
 
 #include <memory>
@@ -33,12 +33,16 @@
 
 namespace android {
 
-class EGLCacheTest : public ::testing::Test {
+class EGLCacheTest : public ::testing::TestWithParam<egl_cache_t::EGLCacheMode> {
 protected:
     virtual void SetUp() {
-        mCache = egl_cache_t::get();
+        // Terminate to clean up any previous cache in this process
+        mCache->terminate();
+
         mTempFile.reset(new TemporaryFile());
         mCache->setCacheFilename(&mTempFile->path[0]);
+        mCache->setCacheLimit(1024);
+        mCache->setCacheMode(mCacheMode);
     }
 
     virtual void TearDown() {
@@ -49,11 +53,12 @@
 
     std::string getCachefileName();
 
-    egl_cache_t* mCache;
+    egl_cache_t* mCache = egl_cache_t::get();
     std::unique_ptr<TemporaryFile> mTempFile;
+    egl_cache_t::EGLCacheMode mCacheMode = GetParam();
 };
 
-TEST_F(EGLCacheTest, UninitializedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, UninitializedCacheAlwaysMisses) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->setBlob("abcd", 4, "efgh", 4);
     ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf, 4));
@@ -63,7 +68,7 @@
     ASSERT_EQ(0xee, buf[3]);
 }
 
-TEST_F(EGLCacheTest, InitializedCacheAlwaysHits) {
+TEST_P(EGLCacheTest, InitializedCacheAlwaysHits) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     mCache->setBlob("abcd", 4, "efgh", 4);
@@ -74,7 +79,7 @@
     ASSERT_EQ('h', buf[3]);
 }
 
-TEST_F(EGLCacheTest, TerminatedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, TerminatedCacheAlwaysMisses) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     mCache->setBlob("abcd", 4, "efgh", 4);
@@ -86,7 +91,7 @@
     ASSERT_EQ(0xee, buf[3]);
 }
 
-TEST_F(EGLCacheTest, ReinitializedCacheContainsValues) {
+TEST_P(EGLCacheTest, ReinitializedCacheContainsValues) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     mCache->setBlob("abcd", 4, "efgh", 4);
@@ -101,12 +106,12 @@
 
 std::string EGLCacheTest::getCachefileName() {
     // Return the monolithic filename unless we find the multifile dir
-    std::string cachefileName = &mTempFile->path[0];
-    std::string multifileDirName = cachefileName + ".multifile";
+    std::string cachePath = &mTempFile->path[0];
+    std::string multifileDirName = cachePath + ".multifile";
+    std::string cachefileName = "";
 
     struct stat info;
     if (stat(multifileDirName.c_str(), &info) == 0) {
-
         // Ensure we only have one file to manage
         int realFileCount = 0;
 
@@ -121,6 +126,9 @@
                 cachefileName = multifileDirName + "/" + entry->d_name;
                 realFileCount++;
             }
+        } else {
+            printf("Unable to open %s, error: %s\n",
+                   multifileDirName.c_str(), std::strerror(errno));
         }
 
         if (realFileCount != 1) {
@@ -128,14 +136,19 @@
             // violates test assumptions
             cachefileName = "";
         }
+    } else {
+        printf("Unable to stat %s, error: %s\n",
+               multifileDirName.c_str(), std::strerror(errno));
     }
 
     return cachefileName;
 }
 
-TEST_F(EGLCacheTest, ModifiedCacheMisses) {
-    // Turn this back on if multifile becomes the default
-    GTEST_SKIP() << "Skipping test designed for multifile, see b/263574392 and b/246966894";
+TEST_P(EGLCacheTest, ModifiedCacheMisses) {
+    // Skip if not in multifile mode
+    if (mCacheMode == egl_cache_t::EGLCacheMode::Monolithic) {
+        GTEST_SKIP() << "Skipping test designed for multifile";
+    }
 
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
@@ -147,13 +160,13 @@
     ASSERT_EQ('g', buf[2]);
     ASSERT_EQ('h', buf[3]);
 
+    // Ensure the cache file is written to disk
+    mCache->terminate();
+
     // Depending on the cache mode, the file will be in different locations
     std::string cachefileName = getCachefileName();
     ASSERT_TRUE(cachefileName.length() > 0);
 
-    // Ensure the cache file is written to disk
-    mCache->terminate();
-
     // Stomp on the beginning of the cache file, breaking the key match
     const long stomp = 0xbadf00d;
     FILE *file = fopen(cachefileName.c_str(), "w");
@@ -164,14 +177,15 @@
     // Ensure no cache hit
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     uint8_t buf2[4] = { 0xee, 0xee, 0xee, 0xee };
-    ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf2, 4));
+    // getBlob may return junk for required size, but should not return a cache hit
+    mCache->getBlob("abcd", 4, buf2, 4);
     ASSERT_EQ(0xee, buf2[0]);
     ASSERT_EQ(0xee, buf2[1]);
     ASSERT_EQ(0xee, buf2[2]);
     ASSERT_EQ(0xee, buf2[3]);
 }
 
-TEST_F(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
+TEST_P(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
 
@@ -204,4 +218,8 @@
     ASSERT_LE(mCache->getCacheSize(), 4);
 }
 
+INSTANTIATE_TEST_CASE_P(MonolithicCacheTests,
+        EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Monolithic));
+INSTANTIATE_TEST_CASE_P(MultifileCacheTests,
+        EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Multifile));
 }
diff --git a/services/inputflinger/dispatcher/CancelationOptions.h b/services/inputflinger/dispatcher/CancelationOptions.h
index 48f9f2b..83e6a60 100644
--- a/services/inputflinger/dispatcher/CancelationOptions.h
+++ b/services/inputflinger/dispatcher/CancelationOptions.h
@@ -30,6 +30,7 @@
         CANCEL_POINTER_EVENTS = 1,
         CANCEL_NON_POINTER_EVENTS = 2,
         CANCEL_FALLBACK_EVENTS = 3,
+        ftl_last = CANCEL_FALLBACK_EVENTS,
     };
 
     // The criterion to use to determine which events should be canceled.
diff --git a/services/inputflinger/dispatcher/InputDispatcher.cpp b/services/inputflinger/dispatcher/InputDispatcher.cpp
index 079b80d..9d5bbbd 100644
--- a/services/inputflinger/dispatcher/InputDispatcher.cpp
+++ b/services/inputflinger/dispatcher/InputDispatcher.cpp
@@ -2185,8 +2185,9 @@
     const bool isFromMouse = isFromSource(entry.source, AINPUT_SOURCE_MOUSE);
 
     if (newGesture) {
-        bool down = maskedAction == AMOTION_EVENT_ACTION_DOWN;
-        if (switchedDevice && tempTouchState.isDown() && !down && !isHoverAction) {
+        // If pointers are already down, let's finish the current gesture and ignore the new events
+        // from another device.
+        if (switchedDevice && wasDown) {
             ALOGI("Dropping event because a pointer for a different device is already down "
                   "in display %" PRId32,
                   displayId);
@@ -3761,9 +3762,9 @@
     }
     if (DEBUG_OUTBOUND_EVENT_DETAILS) {
         ALOGD("channel '%s' ~ Synthesized %zu cancelation events to bring channel back in sync "
-              "with reality: %s, mode=%d.",
+              "with reality: %s, mode=%s.",
               connection->getInputChannelName().c_str(), cancelationEvents.size(), options.reason,
-              options.mode);
+              ftl::enum_string(options.mode).c_str());
     }
 
     std::string reason = std::string("reason=").append(options.reason);
@@ -4185,6 +4186,17 @@
     bool needWake = false;
     { // acquire lock
         mLock.lock();
+        if (!(policyFlags & POLICY_FLAG_PASS_TO_USER)) {
+            // Set the flag anyway if we already have an ongoing gesture. That would allow us to
+            // complete the processing of the current stroke.
+            const auto touchStateIt = mTouchStatesByDisplay.find(args->displayId);
+            if (touchStateIt != mTouchStatesByDisplay.end()) {
+                const TouchState& touchState = touchStateIt->second;
+                if (touchState.deviceId == args->deviceId && touchState.isDown()) {
+                    policyFlags |= POLICY_FLAG_PASS_TO_USER;
+                }
+            }
+        }
 
         if (shouldSendMotionToInputFilterLocked(args)) {
             ui::Transform displayTransform;
diff --git a/services/inputflinger/dispatcher/TouchState.cpp b/services/inputflinger/dispatcher/TouchState.cpp
index acfd0a2..4258471 100644
--- a/services/inputflinger/dispatcher/TouchState.cpp
+++ b/services/inputflinger/dispatcher/TouchState.cpp
@@ -33,8 +33,7 @@
 
 void TouchState::removeTouchedPointer(int32_t pointerId) {
     for (TouchedWindow& touchedWindow : windows) {
-        touchedWindow.pointerIds.reset(pointerId);
-        touchedWindow.pilferedPointerIds.reset(pointerId);
+        touchedWindow.removeTouchingPointer(pointerId);
     }
 }
 
@@ -42,8 +41,7 @@
         int32_t pointerId, const sp<android::gui::WindowInfoHandle>& windowHandle) {
     for (TouchedWindow& touchedWindow : windows) {
         if (touchedWindow.windowHandle == windowHandle) {
-            touchedWindow.pointerIds.reset(pointerId);
-            touchedWindow.pilferedPointerIds.reset(pointerId);
+            touchedWindow.removeTouchingPointer(pointerId);
             return;
         }
     }
@@ -164,17 +162,7 @@
     std::for_each(windows.begin(), windows.end(), [&allPilferedPointerIds](TouchedWindow& w) {
         std::bitset<MAX_POINTER_ID + 1> pilferedByOtherWindows =
                 w.pilferedPointerIds ^ allPilferedPointerIds;
-        // TODO(b/211379801) : convert pointerIds to use std::bitset, which would allow us to
-        // replace the loop below with a bitwise operation. Currently, the XOR operation above is
-        // redundant, but is done to make the code more explicit / easier to convert later.
-        for (std::size_t i = 0; i < pilferedByOtherWindows.size(); i++) {
-            if (pilferedByOtherWindows.test(i) && !w.pilferedPointerIds.test(i)) {
-                // Pointer is pilfered by other windows, but not by this one! Remove it from here.
-                // We could call 'removeTouchedPointerFromWindow' here, but it's faster to directly
-                // manipulate it.
-                w.pointerIds.reset(i);
-            }
-        }
+        w.pointerIds &= ~pilferedByOtherWindows;
     });
     std::erase_if(windows, [](const TouchedWindow& w) { return w.pointerIds.none(); });
 }
diff --git a/services/inputflinger/dispatcher/TouchedWindow.cpp b/services/inputflinger/dispatcher/TouchedWindow.cpp
index 92f62b5..99c4769 100644
--- a/services/inputflinger/dispatcher/TouchedWindow.cpp
+++ b/services/inputflinger/dispatcher/TouchedWindow.cpp
@@ -50,6 +50,14 @@
     it->second.set(pointerId);
 }
 
+void TouchedWindow::removeTouchingPointer(int32_t pointerId) {
+    pointerIds.reset(pointerId);
+    pilferedPointerIds.reset(pointerId);
+    if (pointerIds.none()) {
+        firstDownTimeInTarget.reset();
+    }
+}
+
 void TouchedWindow::removeHoveringPointer(int32_t deviceId, int32_t pointerId) {
     const auto it = mHoveringPointerIdsByDevice.find(deviceId);
     if (it == mHoveringPointerIdsByDevice.end()) {
diff --git a/services/inputflinger/dispatcher/TouchedWindow.h b/services/inputflinger/dispatcher/TouchedWindow.h
index e59e781..aa2e9dd 100644
--- a/services/inputflinger/dispatcher/TouchedWindow.h
+++ b/services/inputflinger/dispatcher/TouchedWindow.h
@@ -43,6 +43,7 @@
     bool hasHoveringPointer(int32_t deviceId, int32_t pointerId) const;
     void addHoveringPointer(int32_t deviceId, int32_t pointerId);
     void removeHoveringPointer(int32_t deviceId, int32_t pointerId);
+    void removeTouchingPointer(int32_t pointerId);
     void clearHoveringPointers();
     std::string dump() const;
 
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.cpp b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
index b789156..31fdac9 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.cpp
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
@@ -2156,6 +2156,53 @@
     return out;
 }
 
+std::list<NotifyArgs> TouchInputMapper::dispatchGestureButtonRelease(nsecs_t when,
+                                                                     uint32_t policyFlags,
+                                                                     BitSet32 idBits,
+                                                                     nsecs_t readTime) {
+    std::list<NotifyArgs> out;
+    BitSet32 releasedButtons(mLastCookedState.buttonState & ~mCurrentCookedState.buttonState);
+    const int32_t metaState = getContext()->getGlobalMetaState();
+    int32_t buttonState = mLastCookedState.buttonState;
+
+    while (!releasedButtons.isEmpty()) {
+        int32_t actionButton = BitSet32::valueForBit(releasedButtons.clearFirstMarkedBit());
+        buttonState &= ~actionButton;
+        out.push_back(dispatchMotion(when, readTime, policyFlags, mSource,
+                                     AMOTION_EVENT_ACTION_BUTTON_RELEASE, actionButton, 0,
+                                     metaState, buttonState, 0,
+                                     mPointerGesture.lastGestureProperties,
+                                     mPointerGesture.lastGestureCoords,
+                                     mPointerGesture.lastGestureIdToIndex, idBits, -1,
+                                     mOrientedXPrecision, mOrientedYPrecision,
+                                     mPointerGesture.downTime, MotionClassification::NONE));
+    }
+    return out;
+}
+
+std::list<NotifyArgs> TouchInputMapper::dispatchGestureButtonPress(nsecs_t when,
+                                                                   uint32_t policyFlags,
+                                                                   BitSet32 idBits,
+                                                                   nsecs_t readTime) {
+    std::list<NotifyArgs> out;
+    BitSet32 pressedButtons(mCurrentCookedState.buttonState & ~mLastCookedState.buttonState);
+    const int32_t metaState = getContext()->getGlobalMetaState();
+    int32_t buttonState = mLastCookedState.buttonState;
+
+    while (!pressedButtons.isEmpty()) {
+        int32_t actionButton = BitSet32::valueForBit(pressedButtons.clearFirstMarkedBit());
+        buttonState |= actionButton;
+        out.push_back(dispatchMotion(when, readTime, policyFlags, mSource,
+                                     AMOTION_EVENT_ACTION_BUTTON_PRESS, actionButton, 0, metaState,
+                                     buttonState, 0, mPointerGesture.currentGestureProperties,
+                                     mPointerGesture.currentGestureCoords,
+                                     mPointerGesture.currentGestureIdToIndex, idBits, -1,
+                                     mOrientedXPrecision, mOrientedYPrecision,
+                                     mPointerGesture.downTime, MotionClassification::NONE));
+    }
+    return out;
+}
+
 const BitSet32& TouchInputMapper::findActiveIdBits(const CookedPointerData& cookedPointerData) {
     if (!cookedPointerData.touchingIdBits.isEmpty()) {
         return cookedPointerData.touchingIdBits;
@@ -2540,8 +2587,13 @@
                         dispatchedGestureIdBits.value & ~mPointerGesture.currentGestureIdBits.value;
             }
             while (!upGestureIdBits.isEmpty()) {
-                uint32_t id = upGestureIdBits.clearFirstMarkedBit();
-
+                if (((mLastCookedState.buttonState & AMOTION_EVENT_BUTTON_PRIMARY) != 0 ||
+                     (mLastCookedState.buttonState & AMOTION_EVENT_BUTTON_SECONDARY) != 0) &&
+                    mPointerGesture.lastGestureMode == PointerGesture::Mode::BUTTON_CLICK_OR_DRAG) {
+                    out += dispatchGestureButtonRelease(when, policyFlags, dispatchedGestureIdBits,
+                                                        readTime);
+                }
+                const uint32_t id = upGestureIdBits.clearFirstMarkedBit();
                 out.push_back(dispatchMotion(when, readTime, policyFlags, mSource,
                                              AMOTION_EVENT_ACTION_POINTER_UP, 0, flags, metaState,
                                              buttonState, AMOTION_EVENT_EDGE_FLAG_NONE,
@@ -2586,6 +2638,12 @@
                                          mPointerGesture.currentGestureIdToIndex,
                                          dispatchedGestureIdBits, id, 0, 0,
                                          mPointerGesture.downTime, classification));
+            if (((buttonState & AMOTION_EVENT_BUTTON_PRIMARY) != 0 ||
+                 (buttonState & AMOTION_EVENT_BUTTON_SECONDARY) != 0) &&
+                mPointerGesture.currentGestureMode == PointerGesture::Mode::BUTTON_CLICK_OR_DRAG) {
+                out += dispatchGestureButtonPress(when, policyFlags, dispatchedGestureIdBits,
+                                                  readTime);
+            }
         }
     }
 
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.h b/services/inputflinger/reader/mapper/TouchInputMapper.h
index 87deb39..7b464ef 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.h
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.h
@@ -735,6 +735,14 @@
                                                               uint32_t policyFlags);
     [[nodiscard]] std::list<NotifyArgs> dispatchButtonPress(nsecs_t when, nsecs_t readTime,
                                                             uint32_t policyFlags);
+    [[nodiscard]] std::list<NotifyArgs> dispatchGestureButtonPress(nsecs_t when,
+                                                                   uint32_t policyFlags,
+                                                                   BitSet32 idBits,
+                                                                   nsecs_t readTime);
+    [[nodiscard]] std::list<NotifyArgs> dispatchGestureButtonRelease(nsecs_t when,
+                                                                     uint32_t policyFlags,
+                                                                     BitSet32 idBits,
+                                                                     nsecs_t readTime);
     const BitSet32& findActiveIdBits(const CookedPointerData& cookedPointerData);
     void cookPointerData();
     [[nodiscard]] std::list<NotifyArgs> abortTouches(nsecs_t when, nsecs_t readTime,
diff --git a/services/inputflinger/tests/InputDispatcher_test.cpp b/services/inputflinger/tests/InputDispatcher_test.cpp
index b1b6e05..e71cdce 100644
--- a/services/inputflinger/tests/InputDispatcher_test.cpp
+++ b/services/inputflinger/tests/InputDispatcher_test.cpp
@@ -1567,6 +1567,113 @@
     std::vector<PointerBuilder> mPointers;
 };
 
+class MotionArgsBuilder {
+public:
+    MotionArgsBuilder(int32_t action, int32_t source) {
+        mAction = action;
+        mSource = source;
+        mEventTime = systemTime(SYSTEM_TIME_MONOTONIC);
+        mDownTime = mEventTime;
+    }
+
+    MotionArgsBuilder& deviceId(int32_t deviceId) {
+        mDeviceId = deviceId;
+        return *this;
+    }
+
+    MotionArgsBuilder& downTime(nsecs_t downTime) {
+        mDownTime = downTime;
+        return *this;
+    }
+
+    MotionArgsBuilder& eventTime(nsecs_t eventTime) {
+        mEventTime = eventTime;
+        return *this;
+    }
+
+    MotionArgsBuilder& displayId(int32_t displayId) {
+        mDisplayId = displayId;
+        return *this;
+    }
+
+    MotionArgsBuilder& policyFlags(int32_t policyFlags) {
+        mPolicyFlags = policyFlags;
+        return *this;
+    }
+
+    MotionArgsBuilder& actionButton(int32_t actionButton) {
+        mActionButton = actionButton;
+        return *this;
+    }
+
+    MotionArgsBuilder& buttonState(int32_t buttonState) {
+        mButtonState = buttonState;
+        return *this;
+    }
+
+    MotionArgsBuilder& rawXCursorPosition(float rawXCursorPosition) {
+        mRawXCursorPosition = rawXCursorPosition;
+        return *this;
+    }
+
+    MotionArgsBuilder& rawYCursorPosition(float rawYCursorPosition) {
+        mRawYCursorPosition = rawYCursorPosition;
+        return *this;
+    }
+
+    MotionArgsBuilder& pointer(PointerBuilder pointer) {
+        mPointers.push_back(pointer);
+        return *this;
+    }
+
+    MotionArgsBuilder& addFlag(uint32_t flags) {
+        mFlags |= flags;
+        return *this;
+    }
+
+    NotifyMotionArgs build() {
+        std::vector<PointerProperties> pointerProperties;
+        std::vector<PointerCoords> pointerCoords;
+        for (const PointerBuilder& pointer : mPointers) {
+            pointerProperties.push_back(pointer.buildProperties());
+            pointerCoords.push_back(pointer.buildCoords());
+        }
+
+        // Set mouse cursor position for the most common cases to avoid boilerplate.
+        if (mSource == AINPUT_SOURCE_MOUSE &&
+            !MotionEvent::isValidCursorPosition(mRawXCursorPosition, mRawYCursorPosition) &&
+            mPointers.size() == 1) {
+            mRawXCursorPosition = pointerCoords[0].getX();
+            mRawYCursorPosition = pointerCoords[0].getY();
+        }
+
+        NotifyMotionArgs args(InputEvent::nextId(), mEventTime, /*readTime=*/mEventTime, mDeviceId,
+                              mSource, mDisplayId, mPolicyFlags, mAction, mActionButton, mFlags,
+                              AMETA_NONE, mButtonState, MotionClassification::NONE, /*edgeFlags=*/0,
+                              mPointers.size(), pointerProperties.data(), pointerCoords.data(),
+                              /*xPrecision=*/0, /*yPrecision=*/0, mRawXCursorPosition,
+                              mRawYCursorPosition, mDownTime, /*videoFrames=*/{});
+
+        return args;
+    }
+
+private:
+    int32_t mAction;
+    int32_t mDeviceId = DEVICE_ID;
+    int32_t mSource;
+    nsecs_t mDownTime;
+    nsecs_t mEventTime;
+    int32_t mDisplayId{ADISPLAY_ID_DEFAULT};
+    int32_t mPolicyFlags = DEFAULT_POLICY_FLAGS;
+    int32_t mActionButton{0};
+    int32_t mButtonState{0};
+    int32_t mFlags{0};
+    float mRawXCursorPosition{AMOTION_EVENT_INVALID_CURSOR_POSITION};
+    float mRawYCursorPosition{AMOTION_EVENT_INVALID_CURSOR_POSITION};
+
+    std::vector<PointerBuilder> mPointers;
+};
+
 static InputEventInjectionResult injectMotionEvent(
         const std::unique_ptr<InputDispatcher>& dispatcher, const MotionEvent& event,
         std::chrono::milliseconds injectionTimeout = INJECT_EVENT_TIMEOUT,
@@ -2055,6 +2162,92 @@
 }
 
 /**
+ * The policy typically sets POLICY_FLAG_PASS_TO_USER to the events. But when the display is not
+ * interactive, it might stop sending this flag.
+ * In this test, we check that if the policy stops sending this flag mid-gesture, we still ensure
+ * to have a consistent input stream.
+ *
+ * Test procedure:
+ * DOWN -> POINTER_DOWN -> (stop sending POLICY_FLAG_PASS_TO_USER) -> CANCEL.
+ * DOWN (new gesture).
+ *
+ * In the bad implementation, we could potentially drop the CANCEL event, and get an inconsistent
+ * state in the dispatcher. This would cause the final DOWN event to not be delivered to the app.
+ *
+ * We technically just need a single window here, but we are using two windows (spy on top and a
+ * regular window below) to emulate the actual situation where it happens on the device.
+ */
+TEST_F(InputDispatcherTest, TwoPointerCancelInconsistentPolicy) {
+    std::shared_ptr<FakeApplicationHandle> application = std::make_shared<FakeApplicationHandle>();
+    sp<FakeWindowHandle> spyWindow =
+            sp<FakeWindowHandle>::make(application, mDispatcher, "Spy", ADISPLAY_ID_DEFAULT);
+    spyWindow->setFrame(Rect(0, 0, 200, 200));
+    spyWindow->setTrustedOverlay(true);
+    spyWindow->setSpy(true);
+
+    sp<FakeWindowHandle> window =
+            sp<FakeWindowHandle>::make(application, mDispatcher, "Window", ADISPLAY_ID_DEFAULT);
+    window->setFrame(Rect(0, 0, 200, 200));
+
+    mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {spyWindow, window}}});
+    const int32_t touchDeviceId = 4;
+    NotifyMotionArgs args;
+
+    // Two pointers down
+    mDispatcher->notifyMotion(&(
+            args = MotionArgsBuilder(AMOTION_EVENT_ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+                           .deviceId(touchDeviceId)
+                           .policyFlags(DEFAULT_POLICY_FLAGS)
+                           .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+                           .build()));
+
+    mDispatcher->notifyMotion(&(
+            args = MotionArgsBuilder(POINTER_1_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+                           .deviceId(touchDeviceId)
+                           .policyFlags(DEFAULT_POLICY_FLAGS)
+                           .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+                           .pointer(PointerBuilder(1, AMOTION_EVENT_TOOL_TYPE_FINGER).x(120).y(120))
+                           .build()));
+    spyWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+    spyWindow->consumeMotionEvent(WithMotionAction(POINTER_1_DOWN));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+    window->consumeMotionEvent(WithMotionAction(POINTER_1_DOWN));
+
+    // Cancel the current gesture. Send the cancel without the default policy flags.
+    mDispatcher->notifyMotion(&(
+            args = MotionArgsBuilder(AMOTION_EVENT_ACTION_CANCEL, AINPUT_SOURCE_TOUCHSCREEN)
+                           .deviceId(touchDeviceId)
+                           .policyFlags(0)
+                           .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+                           .pointer(PointerBuilder(1, AMOTION_EVENT_TOOL_TYPE_FINGER).x(120).y(120))
+                           .build()));
+    spyWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_CANCEL));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_CANCEL));
+
+    // We don't need to reset the device to reproduce the issue, but the reset event typically
+    // follows, so we keep it here to model the actual listener behaviour more closely.
+    NotifyDeviceResetArgs resetArgs;
+    resetArgs.id = 1; // arbitrary id
+    resetArgs.eventTime = systemTime(SYSTEM_TIME_MONOTONIC);
+    resetArgs.deviceId = touchDeviceId;
+    mDispatcher->notifyDeviceReset(&resetArgs);
+
+    // Start new gesture
+    mDispatcher->notifyMotion(&(
+            args = MotionArgsBuilder(AMOTION_EVENT_ACTION_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+                           .deviceId(touchDeviceId)
+                           .policyFlags(DEFAULT_POLICY_FLAGS)
+                           .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER).x(100).y(100))
+                           .build()));
+    spyWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+
+    // No more events
+    spyWindow->assertNoEvents();
+    window->assertNoEvents();
+}
+
+/**
  * Two windows: a window on the left and a window on the right.
  * Mouse is hovered from the right window into the left window.
  * Next, we tap on the left window, where the cursor was last seen.
@@ -2172,6 +2365,182 @@
 }
 
 /**
+ * This test is similar to the test above, but the sequence of injected events is different.
+ *
+ * Two windows: a window on the left and a window on the right.
+ * Mouse is hovered over the left window.
+ * Next, we tap on the left window, where the cursor was last seen.
+ *
+ * After that, we inject one finger down onto the right window, and then a second finger down onto
+ * the left window.
+ * The touch is split, so this last gesture should cause 2 ACTION_DOWN events, one in the right
+ * window (first), and then another on the left window (second).
+ * This test reproduces a crash where there is a mismatch between the downTime and eventTime.
+ * In the buggy implementation, second finger down on the left window would cause a crash.
+ */
+TEST_F(InputDispatcherTest, HoverTapAndSplitTouch) {
+    std::shared_ptr<FakeApplicationHandle> application = std::make_shared<FakeApplicationHandle>();
+    sp<FakeWindowHandle> leftWindow =
+            sp<FakeWindowHandle>::make(application, mDispatcher, "Left", ADISPLAY_ID_DEFAULT);
+    leftWindow->setFrame(Rect(0, 0, 200, 200));
+
+    sp<FakeWindowHandle> rightWindow =
+            sp<FakeWindowHandle>::make(application, mDispatcher, "Right", ADISPLAY_ID_DEFAULT);
+    rightWindow->setFrame(Rect(200, 0, 400, 200));
+
+    mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {leftWindow, rightWindow}}});
+
+    const int32_t mouseDeviceId = 6;
+    const int32_t touchDeviceId = 4;
+    // Hover over the left window. Keep the cursor there.
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_HOVER_ENTER,
+                                                   AINPUT_SOURCE_MOUSE)
+                                        .deviceId(mouseDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_MOUSE)
+                                                         .x(50)
+                                                         .y(50))
+                                        .build()));
+    leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER));
+
+    // Tap on left window
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_DOWN,
+                                                   AINPUT_SOURCE_TOUCHSCREEN)
+                                        .deviceId(touchDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+                                                         .x(100)
+                                                         .y(100))
+                                        .build()));
+
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_UP,
+                                                   AINPUT_SOURCE_TOUCHSCREEN)
+                                        .deviceId(touchDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+                                                         .x(100)
+                                                         .y(100))
+                                        .build()));
+    leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_EXIT));
+    leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+    leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_UP));
+
+    // First finger down on right window
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_DOWN,
+                                                   AINPUT_SOURCE_TOUCHSCREEN)
+                                        .deviceId(touchDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+                                                         .x(300)
+                                                         .y(100))
+                                        .build()));
+    rightWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+
+    // Second finger down on the left window
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(POINTER_1_DOWN, AINPUT_SOURCE_TOUCHSCREEN)
+                                        .deviceId(touchDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+                                                         .x(300)
+                                                         .y(100))
+                                        .pointer(PointerBuilder(1, AMOTION_EVENT_TOOL_TYPE_FINGER)
+                                                         .x(100)
+                                                         .y(100))
+                                        .build()));
+    leftWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+    rightWindow->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_MOVE));
+
+    // No more events
+    leftWindow->assertNoEvents();
+    rightWindow->assertNoEvents();
+}
+
+/**
+ * Start hovering with a stylus device, and then tap with a touch device. Ensure no crash occurs.
+ * While the touch is down, new hover events from the stylus device should be ignored. After the
+ * touch is gone, stylus hovering should start working again.
+ */
+TEST_F(InputDispatcherTest, StylusHoverAndTouchTap) {
+    std::shared_ptr<FakeApplicationHandle> application = std::make_shared<FakeApplicationHandle>();
+    sp<FakeWindowHandle> window =
+            sp<FakeWindowHandle>::make(application, mDispatcher, "Window", ADISPLAY_ID_DEFAULT);
+    window->setFrame(Rect(0, 0, 200, 200));
+
+    mDispatcher->setInputWindows({{ADISPLAY_ID_DEFAULT, {window}}});
+
+    const int32_t stylusDeviceId = 5;
+    const int32_t touchDeviceId = 4;
+    // Start hovering with stylus
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_HOVER_ENTER,
+                                                   AINPUT_SOURCE_STYLUS)
+                                        .deviceId(stylusDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_STYLUS)
+                                                         .x(50)
+                                                         .y(50))
+                                        .build()));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER));
+
+    // Finger down on the window
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_DOWN,
+                                                   AINPUT_SOURCE_TOUCHSCREEN)
+                                        .deviceId(touchDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+                                                         .x(100)
+                                                         .y(100))
+                                        .build()));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_EXIT));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_DOWN));
+
+    // Try to continue hovering with stylus. Since we are already down, injection should fail
+    ASSERT_EQ(InputEventInjectionResult::FAILED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_HOVER_MOVE,
+                                                   AINPUT_SOURCE_STYLUS)
+                                        .deviceId(stylusDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_STYLUS)
+                                                         .x(50)
+                                                         .y(50))
+                                        .build()));
+    // No event should be sent. This event should be ignored because a pointer from another device
+    // is already down.
+
+    // Lift up the finger
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_UP,
+                                                   AINPUT_SOURCE_TOUCHSCREEN)
+                                        .deviceId(touchDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_FINGER)
+                                                         .x(100)
+                                                         .y(100))
+                                        .build()));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_UP));
+
+    // Now that the touch is gone, stylus hovering should start working again
+    ASSERT_EQ(InputEventInjectionResult::SUCCEEDED,
+              injectMotionEvent(mDispatcher,
+                                MotionEventBuilder(AMOTION_EVENT_ACTION_HOVER_MOVE,
+                                                   AINPUT_SOURCE_STYLUS)
+                                        .deviceId(stylusDeviceId)
+                                        .pointer(PointerBuilder(0, AMOTION_EVENT_TOOL_TYPE_STYLUS)
+                                                         .x(50)
+                                                         .y(50))
+                                        .build()));
+    window->consumeMotionEvent(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER));
+    // No more events
+    window->assertNoEvents();
+}
+
+/**
  * On the display, have a single window, and also an area where there's no window.
  * First pointer touches the "no window" area of the screen. Second pointer touches the window.
  * Make sure that the window receives the second pointer, and first pointer is simply ignored.
diff --git a/services/inputflinger/tests/InputReader_test.cpp b/services/inputflinger/tests/InputReader_test.cpp
index 1b04375..fe7af80 100644
--- a/services/inputflinger/tests/InputReader_test.cpp
+++ b/services/inputflinger/tests/InputReader_test.cpp
@@ -10084,6 +10084,26 @@
     ASSERT_EQ(AMOTION_EVENT_ACTION_HOVER_MOVE, args.action);
     ASSERT_NO_FATAL_FAILURE(assertPointerCoords(args.pointerCoords[0], 100 * scale, 100 * scale, 0,
                                                 0, 0, 0, 0, 0, 0, 0));
+
+    // BUTTON DOWN
+    processKey(mapper, BTN_LEFT, 1);
+    processSync(mapper);
+
+    // touchinputmapper design sends a move before button press
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+    ASSERT_EQ(AMOTION_EVENT_ACTION_DOWN, args.action);
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+    ASSERT_EQ(AMOTION_EVENT_ACTION_BUTTON_PRESS, args.action);
+
+    // BUTTON UP
+    processKey(mapper, BTN_LEFT, 0);
+    processSync(mapper);
+
+    // touchinputmapper design sends a move after button release
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+    ASSERT_EQ(AMOTION_EVENT_ACTION_BUTTON_RELEASE, args.action);
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(&args));
+    ASSERT_EQ(AMOTION_EVENT_ACTION_UP, args.action);
 }
 
 TEST_F(MultiTouchInputMapperTest, WhenCapturedAndNotCaptured_GetSources) {
diff --git a/services/sensorservice/SensorInterface.cpp b/services/sensorservice/SensorInterface.cpp
index 398cdf9..e9c8335 100644
--- a/services/sensorservice/SensorInterface.cpp
+++ b/services/sensorservice/SensorInterface.cpp
@@ -87,14 +87,15 @@
 
 // ---------------------------------------------------------------------------
 
-RuntimeSensor::RuntimeSensor(const sensor_t& sensor, sp<StateChangeCallback> callback)
+RuntimeSensor::RuntimeSensor(const sensor_t& sensor, sp<SensorCallback> callback)
   : BaseSensor(sensor), mCallback(std::move(callback)) {
 }
 
 status_t RuntimeSensor::activate(void*, bool enabled) {
     if (enabled != mEnabled) {
         mEnabled = enabled;
-        mCallback->onStateChanged(mEnabled, mSamplingPeriodNs, mBatchReportLatencyNs);
+        return mCallback->onConfigurationChanged(mSensor.getHandle(), mEnabled, mSamplingPeriodNs,
+                mBatchReportLatencyNs);
     }
     return OK;
 }
@@ -105,7 +106,8 @@
         mSamplingPeriodNs = samplingPeriodNs;
         mBatchReportLatencyNs = maxBatchReportLatencyNs;
         if (mEnabled) {
-            mCallback->onStateChanged(mEnabled, mSamplingPeriodNs, mBatchReportLatencyNs);
+            return mCallback->onConfigurationChanged(mSensor.getHandle(), mEnabled,
+                    mSamplingPeriodNs, mBatchReportLatencyNs);
         }
     }
     return OK;
@@ -115,7 +117,8 @@
     if (mSamplingPeriodNs != ns) {
         mSamplingPeriodNs = ns;
         if (mEnabled) {
-            mCallback->onStateChanged(mEnabled, mSamplingPeriodNs, mBatchReportLatencyNs);
+            return mCallback->onConfigurationChanged(mSensor.getHandle(), mEnabled,
+                    mSamplingPeriodNs, mBatchReportLatencyNs);
         }
     }
     return OK;
diff --git a/services/sensorservice/SensorInterface.h b/services/sensorservice/SensorInterface.h
index 5ee5e12..c446d61 100644
--- a/services/sensorservice/SensorInterface.h
+++ b/services/sensorservice/SensorInterface.h
@@ -108,12 +108,12 @@
 public:
     static constexpr int DEFAULT_DEVICE_ID = 0;
 
-    class StateChangeCallback : public virtual RefBase {
+    class SensorCallback : public virtual RefBase {
       public:
-        virtual void onStateChanged(bool enabled, int64_t samplingPeriodNs,
-                                    int64_t batchReportLatencyNs) = 0;
+        virtual status_t onConfigurationChanged(int handle, bool enabled, int64_t samplingPeriodNs,
+                                                int64_t batchReportLatencyNs) = 0;
     };
-    RuntimeSensor(const sensor_t& sensor, sp<StateChangeCallback> callback);
+    RuntimeSensor(const sensor_t& sensor, sp<SensorCallback> callback);
     virtual status_t activate(void* ident, bool enabled) override;
     virtual status_t batch(void* ident, int handle, int flags, int64_t samplingPeriodNs,
                            int64_t maxBatchReportLatencyNs) override;
@@ -125,7 +125,7 @@
     bool mEnabled = false;
     int64_t mSamplingPeriodNs = 0;
     int64_t mBatchReportLatencyNs = 0;
-    sp<StateChangeCallback> mCallback;
+    sp<SensorCallback> mCallback;
 };
 
 // ---------------------------------------------------------------------------
diff --git a/services/sensorservice/SensorService.cpp b/services/sensorservice/SensorService.cpp
index 5c98614..3a0329c 100644
--- a/services/sensorservice/SensorService.cpp
+++ b/services/sensorservice/SensorService.cpp
@@ -116,16 +116,17 @@
     return nextHandle++;
 }
 
-class RuntimeSensorCallbackProxy : public RuntimeSensor::StateChangeCallback {
+class RuntimeSensorCallbackProxy : public RuntimeSensor::SensorCallback {
  public:
-    RuntimeSensorCallbackProxy(sp<SensorService::RuntimeSensorStateChangeCallback> callback)
+    RuntimeSensorCallbackProxy(sp<SensorService::RuntimeSensorCallback> callback)
         : mCallback(std::move(callback)) {}
-    void onStateChanged(bool enabled, int64_t samplingPeriodNs,
-                        int64_t batchReportLatencyNs) override {
-        mCallback->onStateChanged(enabled, samplingPeriodNs, batchReportLatencyNs);
+    status_t onConfigurationChanged(int handle, bool enabled, int64_t samplingPeriodNs,
+                                    int64_t batchReportLatencyNs) override {
+        return mCallback->onConfigurationChanged(handle, enabled, samplingPeriodNs,
+                batchReportLatencyNs);
     }
  private:
-    sp<SensorService::RuntimeSensorStateChangeCallback> mCallback;
+    sp<SensorService::RuntimeSensorCallback> mCallback;
 };
 
 } // namespace
@@ -166,7 +167,7 @@
 }
 
 int SensorService::registerRuntimeSensor(
-    const sensor_t& sensor, int deviceId, sp<RuntimeSensorStateChangeCallback> callback) {
+        const sensor_t& sensor, int deviceId, sp<RuntimeSensorCallback> callback) {
     int handle = 0;
     while (handle == 0 || !mSensors.isNewHandle(handle)) {
         handle = nextRuntimeSensorHandle();
@@ -179,7 +180,7 @@
     ALOGI("Registering runtime sensor handle 0x%x, type %d, name %s",
             handle, sensor.type, sensor.name);
 
-    sp<RuntimeSensor::StateChangeCallback> runtimeSensorCallback(
+    sp<RuntimeSensor::SensorCallback> runtimeSensorCallback(
         new RuntimeSensorCallbackProxy(std::move(callback)));
     sensor_t runtimeSensor = sensor;
     // force the handle to be consistent
diff --git a/services/sensorservice/SensorService.h b/services/sensorservice/SensorService.h
index 0798279..3f6a895 100644
--- a/services/sensorservice/SensorService.h
+++ b/services/sensorservice/SensorService.h
@@ -147,12 +147,13 @@
         virtual void onProximityActive(bool isActive) = 0;
     };
 
-    class RuntimeSensorStateChangeCallback : public virtual RefBase {
+    class RuntimeSensorCallback : public virtual RefBase {
     public:
         // Note that the callback is invoked from an async thread and can interact with the
         // SensorService directly.
-        virtual void onStateChanged(bool enabled, int64_t samplingPeriodNanos,
-                                    int64_t batchReportLatencyNanos) = 0;
+        virtual status_t onConfigurationChanged(int handle, bool enabled,
+                                                int64_t samplingPeriodNanos,
+                                                int64_t batchReportLatencyNanos) = 0;
     };
 
     static char const* getServiceName() ANDROID_API { return "sensorservice"; }
@@ -182,7 +183,7 @@
     status_t removeProximityActiveListener(const sp<ProximityActiveListener>& callback) ANDROID_API;
 
     int registerRuntimeSensor(const sensor_t& sensor, int deviceId,
-                              sp<RuntimeSensorStateChangeCallback> callback) ANDROID_API;
+                              sp<RuntimeSensorCallback> callback) ANDROID_API;
     status_t unregisterRuntimeSensor(int handle) ANDROID_API;
     status_t sendRuntimeSensorEvent(const sensors_event_t& event) ANDROID_API;
 
diff --git a/services/stats/StatsAidl.cpp b/services/stats/StatsAidl.cpp
index 1348548..0f01507 100644
--- a/services/stats/StatsAidl.cpp
+++ b/services/stats/StatsAidl.cpp
@@ -17,19 +17,72 @@
 #define DEBUG false  // STOPSHIP if true
 #define LOG_TAG "StatsAidl"
 
+#define VLOG(...) \
+    if (DEBUG) ALOGD(__VA_ARGS__);
+
 #include "StatsAidl.h"
 
 #include <log/log.h>
+#include <stats_annotations.h>
+#include <stats_event.h>
 #include <statslog.h>
 
+#include <unordered_map>
+
 namespace aidl {
 namespace android {
 namespace frameworks {
 namespace stats {
 
+template <typename E>
+constexpr typename std::underlying_type<E>::type to_underlying(E e) noexcept {
+    return static_cast<typename std::underlying_type<E>::type>(e);
+}
+
 StatsHal::StatsHal() {
 }
 
+bool write_annotation(AStatsEvent* event, const Annotation& annotation) {
+    switch (annotation.value.getTag()) {
+        case AnnotationValue::boolValue: {
+            AStatsEvent_addBoolAnnotation(event, to_underlying(annotation.annotationId),
+                                          annotation.value.get<AnnotationValue::boolValue>());
+            break;
+        }
+        case AnnotationValue::intValue: {
+            AStatsEvent_addInt32Annotation(event, to_underlying(annotation.annotationId),
+                                           annotation.value.get<AnnotationValue::intValue>());
+            break;
+        }
+        default: {
+            return false;
+        }
+    }
+    return true;
+}
+
+bool write_atom_annotations(AStatsEvent* event,
+                            const std::vector<std::optional<Annotation>>& annotations) {
+    for (const auto& atomAnnotation : annotations) {
+        if (!atomAnnotation) {
+            return false;
+        }
+        if (!write_annotation(event, *atomAnnotation)) {
+            return false;
+        }
+    }
+    return true;
+}
+
+bool write_field_annotations(AStatsEvent* event, const std::vector<Annotation>& annotations) {
+    for (const auto& fieldAnnotation : annotations) {
+        if (!write_annotation(event, fieldAnnotation)) {
+            return false;
+        }
+    }
+    return true;
+}
+
 ndk::ScopedAStatus StatsHal::reportVendorAtom(const VendorAtom& vendorAtom) {
     if (vendorAtom.atomId < 100000 || vendorAtom.atomId >= 200000) {
         ALOGE("Atom ID %ld is not a valid vendor atom ID", (long)vendorAtom.atomId);
@@ -44,7 +97,30 @@
     }
     AStatsEvent* event = AStatsEvent_obtain();
     AStatsEvent_setAtomId(event, vendorAtom.atomId);
+
+    if (vendorAtom.atomAnnotations) {
+        if (!write_atom_annotations(event, *vendorAtom.atomAnnotations)) {
+            ALOGE("Atom ID %ld has incompatible atom level annotation", (long)vendorAtom.atomId);
+            AStatsEvent_release(event);
+            return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                    -1, "invalid atom annotation");
+        }
+    }
+
+    // populate map for quickier access for VendorAtomValue associated annotations by value index
+    std::unordered_map<int, int> fieldIndexToAnnotationSetMap;
+    if (vendorAtom.valuesAnnotations) {
+        const std::vector<std::optional<AnnotationSet>>& valuesAnnotations =
+                *vendorAtom.valuesAnnotations;
+        for (int i = 0; i < valuesAnnotations.size(); i++) {
+            if (valuesAnnotations[i]) {
+                fieldIndexToAnnotationSetMap[valuesAnnotations[i]->valueIndex] = i;
+            }
+        }
+    }
+
     AStatsEvent_writeString(event, vendorAtom.reverseDomainName.c_str());
+    size_t atomValueIdx = 0;
     for (const auto& atomValue : vendorAtom.values) {
         switch (atomValue.getTag()) {
             case VendorAtomValue::intValue:
@@ -143,12 +219,37 @@
                 AStatsEvent_writeByteArray(event, byteArrayValue->data(), byteArrayValue->size());
                 break;
             }
+            default: {
+                AStatsEvent_release(event);
+                ALOGE("Atom ID %ld has invalid atomValue.getTag", (long)vendorAtom.atomId);
+                return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                        -1, "invalid atomValue.getTag");
+                break;
+            }
         }
+
+        const auto& valueAnnotationIndex = fieldIndexToAnnotationSetMap.find(atomValueIdx);
+        if (valueAnnotationIndex != fieldIndexToAnnotationSetMap.end()) {
+            const std::vector<Annotation>& fieldAnnotations =
+                    (*vendorAtom.valuesAnnotations)[valueAnnotationIndex->second]->annotations;
+            VLOG("Atom ID %ld has %ld annotations for field #%ld", (long)vendorAtom.atomId,
+                 (long)fieldAnnotations.size(), (long)atomValueIdx + 2);
+            if (!write_field_annotations(event, fieldAnnotations)) {
+                ALOGE("Atom ID %ld has incompatible field level annotation for field #%ld",
+                      (long)vendorAtom.atomId, (long)atomValueIdx + 2);
+                AStatsEvent_release(event);
+                return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+                        -1, "invalid atom field annotation");
+            }
+        }
+        atomValueIdx++;
     }
     AStatsEvent_build(event);
     const int ret = AStatsEvent_write(event);
     AStatsEvent_release(event);
-
+    if (ret <= 0) {
+        ALOGE("Error writing Atom ID %ld. Result: %d", (long)vendorAtom.atomId, ret);
+    }
     return ret <= 0 ? ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(ret,
                                                                               "report atom failed")
                     : ndk::ScopedAStatus::ok();
diff --git a/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp b/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp
index 6b69ce7..1b86cd3 100644
--- a/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp
@@ -585,6 +585,7 @@
         case Composition::CURSOR:
         case Composition::DEVICE:
         case Composition::DISPLAY_DECORATION:
+        case Composition::REFRESH_RATE_INDICATOR:
             writeBufferStateToHWC(hwcLayer, outputIndependentState, skipLayer);
             break;
         case Composition::INVALID:
@@ -780,6 +781,7 @@
         case Composition::CURSOR:
         case Composition::SIDEBAND:
         case Composition::DISPLAY_DECORATION:
+        case Composition::REFRESH_RATE_INDICATOR:
             result = (to == Composition::CLIENT || to == Composition::DEVICE);
             break;
     }
diff --git a/services/surfaceflinger/CompositionEngine/src/planner/Predictor.cpp b/services/surfaceflinger/CompositionEngine/src/planner/Predictor.cpp
index 2fc029f..6064126 100644
--- a/services/surfaceflinger/CompositionEngine/src/planner/Predictor.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/planner/Predictor.cpp
@@ -151,6 +151,10 @@
                 // A for "Alpha", since the decoration is an alpha layer.
                 result.append("A");
                 break;
+            case aidl::android::hardware::graphics::composer3::Composition::REFRESH_RATE_INDICATOR:
+                // R for "Refresh", since the layer is Refresh rate overlay.
+                result.append("R");
+                break;
         }
     }
     return result;
diff --git a/services/surfaceflinger/DisplayDevice.h b/services/surfaceflinger/DisplayDevice.h
index 6b5d1d7..b86d9be 100644
--- a/services/surfaceflinger/DisplayDevice.h
+++ b/services/surfaceflinger/DisplayDevice.h
@@ -24,6 +24,7 @@
 #include <android-base/thread_annotations.h>
 #include <android/native_window.h>
 #include <binder/IBinder.h>
+#include <ftl/concat.h>
 #include <gui/LayerState.h>
 #include <math/mat4.h>
 #include <renderengine/RenderEngine.h>
@@ -300,8 +301,8 @@
 
     mutable std::mutex mActiveModeLock;
     ActiveModeInfo mDesiredActiveMode GUARDED_BY(mActiveModeLock);
-    TracedOrdinal<bool> mDesiredActiveModeChanged
-            GUARDED_BY(mActiveModeLock) = {"DesiredActiveModeChanged", false};
+    TracedOrdinal<bool> mDesiredActiveModeChanged GUARDED_BY(mActiveModeLock) =
+            {ftl::Concat("DesiredActiveModeChanged-", getId().value).c_str(), false};
     ActiveModeInfo mUpcomingActiveMode GUARDED_BY(kMainThreadContext);
 };
 
diff --git a/services/surfaceflinger/DisplayHardware/AidlComposerHal.cpp b/services/surfaceflinger/DisplayHardware/AidlComposerHal.cpp
index 9470552..ba9aed8 100644
--- a/services/surfaceflinger/DisplayHardware/AidlComposerHal.cpp
+++ b/services/surfaceflinger/DisplayHardware/AidlComposerHal.cpp
@@ -208,6 +208,12 @@
         return ::ndk::ScopedAStatus::ok();
     }
 
+    ::ndk::ScopedAStatus onRefreshRateChangedDebug(
+            const RefreshRateChangedDebugData& refreshRateChangedDebugData) override {
+        mCallback.onRefreshRateChangedDebug(refreshRateChangedDebugData);
+        return ::ndk::ScopedAStatus::ok();
+    }
+
 private:
     HWC2::ComposerCallback& mCallback;
 };
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.h b/services/surfaceflinger/DisplayHardware/HWC2.h
index c1c7070..23dd3e5 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.h
+++ b/services/surfaceflinger/DisplayHardware/HWC2.h
@@ -44,6 +44,7 @@
 #include <aidl/android/hardware/graphics/composer3/Composition.h>
 #include <aidl/android/hardware/graphics/composer3/DisplayCapability.h>
 #include <aidl/android/hardware/graphics/composer3/OverlayProperties.h>
+#include <aidl/android/hardware/graphics/composer3/RefreshRateChangedDebugData.h>
 
 namespace android {
 
@@ -63,6 +64,8 @@
 
 namespace hal = android::hardware::graphics::composer::hal;
 
+using aidl::android::hardware::graphics::composer3::RefreshRateChangedDebugData;
+
 // Implement this interface to receive hardware composer events.
 //
 // These callback functions will generally be called on a hwbinder thread, but
@@ -77,6 +80,7 @@
                                                        const hal::VsyncPeriodChangeTimeline&) = 0;
     virtual void onComposerHalSeamlessPossible(hal::HWDisplayId) = 0;
     virtual void onComposerHalVsyncIdle(hal::HWDisplayId) = 0;
+    virtual void onRefreshRateChangedDebug(const RefreshRateChangedDebugData&) = 0;
 
 protected:
     ~ComposerCallback() = default;
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.cpp b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
index 8e74716..6d94079 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
@@ -175,8 +175,8 @@
         displayData.lastPresentTimestamp = timestamp;
     }
 
-    const ftl::Concat tag("HW_VSYNC_", displayIdOpt->value);
-    ATRACE_INT(tag.c_str(), displayData.vsyncTraceToggle);
+    ATRACE_INT(ftl::Concat("HW_VSYNC_", displayIdOpt->value).c_str(),
+               displayData.vsyncTraceToggle);
     displayData.vsyncTraceToggle = !displayData.vsyncTraceToggle;
 
     return displayIdOpt;
@@ -377,8 +377,8 @@
 
     displayData.vsyncEnabled = enabled;
 
-    const auto tag = "HW_VSYNC_ON_" + to_string(displayId);
-    ATRACE_INT(tag.c_str(), enabled == hal::Vsync::ENABLE ? 1 : 0);
+    ATRACE_INT(ftl::Concat("HW_VSYNC_ON_", displayId.value).c_str(),
+               enabled == hal::Vsync::ENABLE ? 1 : 0);
 }
 
 status_t HWComposer::setClientTarget(HalDisplayId displayId, uint32_t slot,
diff --git a/services/surfaceflinger/DisplayHardware/Hal.h b/services/surfaceflinger/DisplayHardware/Hal.h
index 537d545..bf3089f 100644
--- a/services/surfaceflinger/DisplayHardware/Hal.h
+++ b/services/surfaceflinger/DisplayHardware/Hal.h
@@ -113,6 +113,8 @@
             return "Sideband";
         case aidl::android::hardware::graphics::composer3::Composition::DISPLAY_DECORATION:
             return "DisplayDecoration";
+        case aidl::android::hardware::graphics::composer3::Composition::REFRESH_RATE_INDICATOR:
+            return "RefreshRateIndicator";
         default:
             return "Unknown";
     }
diff --git a/services/surfaceflinger/FrontEnd/TransactionHandler.h b/services/surfaceflinger/FrontEnd/TransactionHandler.h
index a06b870..7fc825e 100644
--- a/services/surfaceflinger/FrontEnd/TransactionHandler.h
+++ b/services/surfaceflinger/FrontEnd/TransactionHandler.h
@@ -34,7 +34,7 @@
 class TransactionHandler {
 public:
     struct TransactionFlushState {
-        const TransactionState* transaction;
+        TransactionState* transaction;
         bool firstTransaction = true;
         nsecs_t queueProcessTime = 0;
         // Layer handles that have transactions with buffers that are ready to be applied.
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 31ee91e..704f336 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -228,9 +228,7 @@
     if (mBufferInfo.mBuffer != nullptr) {
         callReleaseBufferCallback(mDrawingState.releaseBufferListener,
                                   mBufferInfo.mBuffer->getBuffer(), mBufferInfo.mFrameNumber,
-                                  mBufferInfo.mFence,
-                                  mFlinger->getMaxAcquiredBufferCountForCurrentRefreshRate(
-                                          mOwnerUid));
+                                  mBufferInfo.mFence);
     }
     if (!isClone()) {
         // The original layer and the clone layer share the same texture. Therefore, only one of
@@ -734,11 +732,45 @@
     return (p != nullptr) ? p->isSecure() : false;
 }
 
+void Layer::transferAvailableJankData(const std::deque<sp<CallbackHandle>>& handles,
+                                      std::vector<JankData>& jankData) {
+    if (mPendingJankClassifications.empty() ||
+        !mPendingJankClassifications.front()->getJankType()) {
+        return;
+    }
+
+    bool includeJankData = false;
+    for (const auto& handle : handles) {
+        for (const auto& cb : handle->callbackIds) {
+            if (cb.includeJankData) {
+                includeJankData = true;
+                break;
+            }
+        }
+
+        if (includeJankData) {
+            jankData.reserve(mPendingJankClassifications.size());
+            break;
+        }
+    }
+
+    while (!mPendingJankClassifications.empty() &&
+           mPendingJankClassifications.front()->getJankType()) {
+        if (includeJankData) {
+            std::shared_ptr<frametimeline::SurfaceFrame> surfaceFrame =
+                    mPendingJankClassifications.front();
+            jankData.emplace_back(
+                    JankData(surfaceFrame->getToken(), surfaceFrame->getJankType().value()));
+        }
+        mPendingJankClassifications.pop_front();
+    }
+}
+
 // ----------------------------------------------------------------------------
 // transaction
 // ----------------------------------------------------------------------------
 
-uint32_t Layer::doTransaction(uint32_t flags) {
+uint32_t Layer::doTransaction(uint32_t flags, nsecs_t latchTime) {
     ATRACE_CALL();
 
     // TODO: This is unfortunate.
@@ -766,23 +798,24 @@
         mFlinger->mUpdateInputInfo = true;
     }
 
-    commitTransaction(mDrawingState);
+    commitTransaction(mDrawingState, latchTime);
 
     return flags;
 }
 
-void Layer::commitTransaction(State&) {
+void Layer::commitTransaction(State&, nsecs_t currentLatchTime) {
     // Set the present state for all bufferlessSurfaceFramesTX to Presented. The
     // bufferSurfaceFrameTX will be presented in latchBuffer.
     for (auto& [token, surfaceFrame] : mDrawingState.bufferlessSurfaceFramesTX) {
         if (surfaceFrame->getPresentState() != PresentState::Presented) {
             // With applyPendingStates, we could end up having presented surfaceframes from previous
             // states
-            surfaceFrame->setPresentState(PresentState::Presented);
+            surfaceFrame->setPresentState(PresentState::Presented, mLastLatchTime);
             mFlinger->mFrameTimeline->addSurfaceFrame(surfaceFrame);
         }
     }
     mDrawingState.bufferlessSurfaceFramesTX.clear();
+    mLastLatchTime = currentLatchTime;
 }
 
 uint32_t Layer::clearTransactionFlags(uint32_t mask) {
@@ -2698,12 +2731,13 @@
 
 void Layer::callReleaseBufferCallback(const sp<ITransactionCompletedListener>& listener,
                                       const sp<GraphicBuffer>& buffer, uint64_t framenumber,
-                                      const sp<Fence>& releaseFence,
-                                      uint32_t currentMaxAcquiredBufferCount) {
+                                      const sp<Fence>& releaseFence) {
     if (!listener) {
         return;
     }
     ATRACE_FORMAT_INSTANT("callReleaseBufferCallback %s - %" PRIu64, getDebugName(), framenumber);
+    uint32_t currentMaxAcquiredBufferCount =
+            mFlinger->getMaxAcquiredBufferCountForCurrentRefreshRate(mOwnerUid);
     listener->onReleaseBuffer({buffer->getId(), framenumber},
                               releaseFence ? releaseFence : Fence::NO_FENCE,
                               currentMaxAcquiredBufferCount);
@@ -2798,16 +2832,7 @@
     }
 
     std::vector<JankData> jankData;
-    jankData.reserve(mPendingJankClassifications.size());
-    while (!mPendingJankClassifications.empty() &&
-           mPendingJankClassifications.front()->getJankType()) {
-        std::shared_ptr<frametimeline::SurfaceFrame> surfaceFrame =
-                mPendingJankClassifications.front();
-        mPendingJankClassifications.pop_front();
-        jankData.emplace_back(
-                JankData(surfaceFrame->getToken(), surfaceFrame->getJankType().value()));
-    }
-
+    transferAvailableJankData(mDrawingState.callbackHandles, jankData);
     mFlinger->getTransactionCallbackInvoker().addCallbackHandles(mDrawingState.callbackHandles,
                                                                  jankData);
     mDrawingState.callbackHandles = {};
@@ -2963,9 +2988,7 @@
             // call any release buffer callbacks if set.
             callReleaseBufferCallback(mDrawingState.releaseBufferListener,
                                       mDrawingState.buffer->getBuffer(), mDrawingState.frameNumber,
-                                      mDrawingState.acquireFence,
-                                      mFlinger->getMaxAcquiredBufferCountForCurrentRefreshRate(
-                                              mOwnerUid));
+                                      mDrawingState.acquireFence);
             decrementPendingBufferCount();
             if (mDrawingState.bufferSurfaceFrameTX != nullptr &&
                 mDrawingState.bufferSurfaceFrameTX->getPresentState() != PresentState::Presented) {
@@ -2975,13 +2998,12 @@
         } else if (EARLY_RELEASE_ENABLED && mLastClientCompositionFence != nullptr) {
             callReleaseBufferCallback(mDrawingState.releaseBufferListener,
                                       mDrawingState.buffer->getBuffer(), mDrawingState.frameNumber,
-                                      mLastClientCompositionFence,
-                                      mFlinger->getMaxAcquiredBufferCountForCurrentRefreshRate(
-                                              mOwnerUid));
+                                      mLastClientCompositionFence);
             mLastClientCompositionFence = nullptr;
         }
     }
 
+    mDrawingState.producerId = bufferData.producerId;
     mDrawingState.frameNumber = frameNumber;
     mDrawingState.releaseBufferListener = bufferData.releaseBufferListener;
     mDrawingState.buffer = std::move(buffer);
@@ -3106,6 +3128,7 @@
         return false;
     }
 
+    std::deque<sp<CallbackHandle>> remainingHandles;
     for (const auto& handle : handles) {
         // If this transaction set a buffer on this layer, release its previous buffer
         handle->releasePreviousBuffer = mReleasePreviousBuffer;
@@ -3120,11 +3143,19 @@
             mDrawingState.callbackHandles.push_back(handle);
 
         } else { // If this layer will NOT need to be relatched and presented this frame
-            // Notify the transaction completed thread this handle is done
-            mFlinger->getTransactionCallbackInvoker().registerUnpresentedCallbackHandle(handle);
+            // Queue this handle to be notified below.
+            remainingHandles.push_back(handle);
         }
     }
 
+    if (!remainingHandles.empty()) {
+        // Notify the transaction completed threads these handles are done. These are only the
+        // handles that were not added to the mDrawingState, which will be notified later.
+        std::vector<JankData> jankData;
+        transferAvailableJankData(remainingHandles, jankData);
+        mFlinger->getTransactionCallbackInvoker().addCallbackHandles(remainingHandles, jankData);
+    }
+
     mReleasePreviousBuffer = false;
     mCallbackHandleAcquireTimeOrFence = -1;
 
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 3d4f03f..2955daf 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -141,6 +141,8 @@
 
         uint64_t frameNumber;
         ui::Transform transform;
+
+        uint32_t producerId = 0;
         uint32_t bufferTransform;
         bool transformToDisplayInverse;
         Region transparentRegionHint;
@@ -618,7 +620,7 @@
      * doTransaction - process the transaction. This is a good place to figure
      * out which attributes of the surface have changed.
      */
-    virtual uint32_t doTransaction(uint32_t transactionFlags);
+    virtual uint32_t doTransaction(uint32_t transactionFlags, nsecs_t currentLatchTime);
 
     /*
      * Remove relative z for the layer if its relative parent is not part of the
@@ -838,6 +840,10 @@
                                         std::unordered_set<Layer*>& visited);
     bool willPresentCurrentTransaction() const;
 
+    void callReleaseBufferCallback(const sp<ITransactionCompletedListener>& listener,
+                                   const sp<GraphicBuffer>& buffer, uint64_t framenumber,
+                                   const sp<Fence>& releaseFence);
+
 protected:
     // For unit tests
     friend class TestableSurfaceFlinger;
@@ -851,7 +857,7 @@
     void preparePerFrameCompositionState();
     void preparePerFrameBufferCompositionState();
     void preparePerFrameEffectsCompositionState();
-    virtual void commitTransaction(State& stateToCommit);
+    virtual void commitTransaction(State& stateToCommit, nsecs_t currentLatchTime = 0);
     void gatherBufferInfo();
     void onSurfaceFrameCreated(const std::shared_ptr<frametimeline::SurfaceFrame>&);
 
@@ -1047,6 +1053,10 @@
                                    const sp<Fence>& releaseFence,
                                    uint32_t currentMaxAcquiredBufferCount);
 
+    // Returns true if the transformed buffer size does not match the layer size and we need
+    // to apply filtering.
+    bool bufferNeedsFiltering() const;
+
     // Returns true if there is a valid color to fill.
     bool fillsColor() const;
     // Returns true if this layer has a blur value.
@@ -1064,6 +1074,11 @@
 
     void updateChildrenSnapshots(bool updateGeometry);
 
+    // Fills the provided vector with the currently available JankData and removes the processed
+    // JankData from the pending list.
+    void transferAvailableJankData(const std::deque<sp<CallbackHandle>>& handles,
+                                   std::vector<JankData>& jankData);
+
     // Cached properties computed from drawing state
     // Effective transform taking into account parent transforms and any parent scaling, which is
     // a transform from the current layer coordinate space to display(screen) coordinate space.
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp
index 5e79a5c..eb6d7e4 100644
--- a/services/surfaceflinger/Scheduler/EventThread.cpp
+++ b/services/surfaceflinger/Scheduler/EventThread.cpp
@@ -238,19 +238,29 @@
 
 namespace impl {
 
-EventThread::EventThread(const char* name, std::shared_ptr<scheduler::VsyncSchedule> vsyncSchedule,
-                         IEventThreadCallback& eventThreadCallback,
+EventThread::EventThread(const char* name, scheduler::VsyncSchedule& vsyncSchedule,
                          android::frametimeline::TokenManager* tokenManager,
+                         ThrottleVsyncCallback throttleVsyncCallback,
+                         GetVsyncPeriodFunction getVsyncPeriodFunction,
                          std::chrono::nanoseconds workDuration,
                          std::chrono::nanoseconds readyDuration)
       : mThreadName(name),
         mVsyncTracer(base::StringPrintf("VSYNC-%s", name), 0),
         mWorkDuration(base::StringPrintf("VsyncWorkDuration-%s", name), workDuration),
         mReadyDuration(readyDuration),
-        mVsyncSchedule(std::move(vsyncSchedule)),
-        mVsyncRegistration(mVsyncSchedule->getDispatch(), createDispatchCallback(), name),
+        mVsyncSchedule(vsyncSchedule),
+        mVsyncRegistration(
+                vsyncSchedule.getDispatch(),
+                [this](nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
+                    onVsync(vsyncTime, wakeupTime, readyTime);
+                },
+                name),
         mTokenManager(tokenManager),
-        mEventThreadCallback(eventThreadCallback) {
+        mThrottleVsyncCallback(std::move(throttleVsyncCallback)),
+        mGetVsyncPeriodFunction(std::move(getVsyncPeriodFunction)) {
+    LOG_ALWAYS_FATAL_IF(getVsyncPeriodFunction == nullptr,
+            "getVsyncPeriodFunction must not be null");
+
     mThread = std::thread([this]() NO_THREAD_SAFETY_ANALYSIS {
         std::unique_lock<std::mutex> lock(mMutex);
         threadMain(lock);
@@ -361,16 +371,16 @@
     }
 
     VsyncEventData vsyncEventData;
-    const Fps frameInterval = mEventThreadCallback.getLeaderRenderFrameRate(connection->mOwnerUid);
-    vsyncEventData.frameInterval = frameInterval.getPeriodNsecs();
+    nsecs_t frameInterval = mGetVsyncPeriodFunction(connection->mOwnerUid);
+    vsyncEventData.frameInterval = frameInterval;
     const auto [presentTime, deadline] = [&]() -> std::pair<nsecs_t, nsecs_t> {
         std::lock_guard<std::mutex> lock(mMutex);
-        const auto vsyncTime = mVsyncSchedule->getTracker().nextAnticipatedVSyncTimeFrom(
+        const auto vsyncTime = mVsyncSchedule.getTracker().nextAnticipatedVSyncTimeFrom(
                 systemTime() + mWorkDuration.get().count() + mReadyDuration.count());
         return {vsyncTime, vsyncTime - mReadyDuration.count()};
     }();
-    generateFrameTimeline(vsyncEventData, frameInterval.getPeriodNsecs(),
-                          systemTime(SYSTEM_TIME_MONOTONIC), presentTime, deadline);
+    generateFrameTimeline(vsyncEventData, frameInterval, systemTime(SYSTEM_TIME_MONOTONIC),
+                          presentTime, deadline);
     return vsyncEventData;
 }
 
@@ -533,15 +543,14 @@
     const auto throttleVsync = [&] {
         const auto& vsyncData = event.vsync.vsyncData;
         if (connection->frameRate.isValid()) {
-            return !mVsyncSchedule->getTracker()
+            return !mVsyncSchedule.getTracker()
                             .isVSyncInPhase(vsyncData.preferredExpectedPresentationTime(),
                                             connection->frameRate);
         }
 
-        const auto expectedPresentTime =
-                TimePoint::fromNs(vsyncData.preferredExpectedPresentationTime());
-        return !mEventThreadCallback.isVsyncTargetForUid(expectedPresentTime,
-                                                         connection->mOwnerUid);
+        return mThrottleVsyncCallback &&
+                mThrottleVsyncCallback(event.vsync.vsyncData.preferredExpectedPresentationTime(),
+                                       connection->mOwnerUid);
     };
 
     switch (event.header.type) {
@@ -629,11 +638,9 @@
     for (const auto& consumer : consumers) {
         DisplayEventReceiver::Event copy = event;
         if (event.header.type == DisplayEventReceiver::DISPLAY_EVENT_VSYNC) {
-            const Fps frameInterval =
-                    mEventThreadCallback.getLeaderRenderFrameRate(consumer->mOwnerUid);
-            copy.vsync.vsyncData.frameInterval = frameInterval.getPeriodNsecs();
-            generateFrameTimeline(copy.vsync.vsyncData, frameInterval.getPeriodNsecs(),
-                                  copy.header.timestamp,
+            const int64_t frameInterval = mGetVsyncPeriodFunction(consumer->mOwnerUid);
+            copy.vsync.vsyncData.frameInterval = frameInterval;
+            generateFrameTimeline(copy.vsync.vsyncData, frameInterval, copy.header.timestamp,
                                   event.vsync.vsyncData.preferredExpectedPresentationTime(),
                                   event.vsync.vsyncData.preferredDeadlineTimestamp());
         }
@@ -699,26 +706,6 @@
     }
 }
 
-void EventThread::onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule> schedule) {
-    std::lock_guard<std::mutex> lock(mMutex);
-    const bool reschedule = mVsyncRegistration.cancel() == scheduler::CancelResult::Cancelled;
-    mVsyncSchedule = std::move(schedule);
-    mVsyncRegistration =
-            scheduler::VSyncCallbackRegistration(mVsyncSchedule->getDispatch(),
-                                                 createDispatchCallback(), mThreadName);
-    if (reschedule) {
-        mVsyncRegistration.schedule({.workDuration = mWorkDuration.get().count(),
-                                     .readyDuration = mReadyDuration.count(),
-                                     .earliestVsync = mLastVsyncCallbackTime.ns()});
-    }
-}
-
-scheduler::VSyncDispatch::Callback EventThread::createDispatchCallback() {
-    return [this](nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
-        onVsync(vsyncTime, wakeupTime, readyTime);
-    };
-}
-
 } // namespace impl
 } // namespace android
 
diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h
index aa27091..347dc4a 100644
--- a/services/surfaceflinger/Scheduler/EventThread.h
+++ b/services/surfaceflinger/Scheduler/EventThread.h
@@ -23,7 +23,6 @@
 #include <sys/types.h>
 #include <utils/Errors.h>
 
-#include <scheduler/Fps.h>
 #include <scheduler/FrameRateMode.h>
 #include <condition_variable>
 #include <cstdint>
@@ -68,15 +67,6 @@
     // Subsequent values are periods.
 };
 
-class IEventThreadCallback {
-public:
-    virtual ~IEventThreadCallback() = default;
-
-    virtual bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const = 0;
-
-    virtual Fps getLeaderRenderFrameRate(uid_t uid) const = 0;
-};
-
 class EventThreadConnection : public gui::BnDisplayEventConnection {
 public:
     EventThreadConnection(EventThread*, uid_t callingUid, ResyncCallback,
@@ -146,17 +136,18 @@
 
     // Retrieves the number of event connections tracked by this EventThread.
     virtual size_t getEventThreadConnectionCount() = 0;
-
-    virtual void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) = 0;
 };
 
 namespace impl {
 
 class EventThread : public android::EventThread {
 public:
-    EventThread(const char* name, std::shared_ptr<scheduler::VsyncSchedule>, IEventThreadCallback&,
-                frametimeline::TokenManager*, std::chrono::nanoseconds workDuration,
-                std::chrono::nanoseconds readyDuration);
+    using ThrottleVsyncCallback = std::function<bool(nsecs_t, uid_t)>;
+    using GetVsyncPeriodFunction = std::function<nsecs_t(uid_t)>;
+
+    EventThread(const char* name, scheduler::VsyncSchedule&, frametimeline::TokenManager*,
+                ThrottleVsyncCallback, GetVsyncPeriodFunction,
+                std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration);
     ~EventThread();
 
     sp<EventThreadConnection> createEventConnection(
@@ -188,8 +179,6 @@
 
     size_t getEventThreadConnectionCount() override;
 
-    void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) override;
-
 private:
     friend EventThreadTest;
 
@@ -213,19 +202,17 @@
                                nsecs_t timestamp, nsecs_t preferredExpectedPresentationTime,
                                nsecs_t preferredDeadlineTimestamp) const;
 
-    scheduler::VSyncDispatch::Callback createDispatchCallback();
-
     const char* const mThreadName;
     TracedOrdinal<int> mVsyncTracer;
     TracedOrdinal<std::chrono::nanoseconds> mWorkDuration GUARDED_BY(mMutex);
     std::chrono::nanoseconds mReadyDuration GUARDED_BY(mMutex);
-    std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
+    scheduler::VsyncSchedule& mVsyncSchedule;
     TimePoint mLastVsyncCallbackTime GUARDED_BY(mMutex) = TimePoint::now();
     scheduler::VSyncCallbackRegistration mVsyncRegistration GUARDED_BY(mMutex);
     frametimeline::TokenManager* const mTokenManager;
 
-    // mEventThreadCallback will outlive the EventThread.
-    IEventThreadCallback& mEventThreadCallback;
+    const ThrottleVsyncCallback mThrottleVsyncCallback;
+    const GetVsyncPeriodFunction mGetVsyncPeriodFunction;
 
     std::thread mThread;
     mutable std::mutex mMutex;
diff --git a/services/surfaceflinger/Scheduler/ISchedulerCallback.h b/services/surfaceflinger/Scheduler/ISchedulerCallback.h
index 92c2189..c4de749 100644
--- a/services/surfaceflinger/Scheduler/ISchedulerCallback.h
+++ b/services/surfaceflinger/Scheduler/ISchedulerCallback.h
@@ -18,14 +18,12 @@
 
 #include <vector>
 
-#include <ui/DisplayId.h>
-
 #include "Display/DisplayModeRequest.h"
 
 namespace android::scheduler {
 
 struct ISchedulerCallback {
-    virtual void setVsyncEnabled(PhysicalDisplayId, bool) = 0;
+    virtual void setVsyncEnabled(bool) = 0;
     virtual void requestDisplayModes(std::vector<display::DisplayModeRequest>) = 0;
     virtual void kernelTimerChanged(bool expired) = 0;
     virtual void triggerOnFrameRateOverridesChanged() = 0;
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.cpp b/services/surfaceflinger/Scheduler/MessageQueue.cpp
index 925f739..dec8f59 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.cpp
+++ b/services/surfaceflinger/Scheduler/MessageQueue.cpp
@@ -75,37 +75,19 @@
     mHandler->dispatchFrame(vsyncId, expectedVsyncTime);
 }
 
-void MessageQueue::initVsync(std::shared_ptr<scheduler::VSyncDispatch> dispatch,
+void MessageQueue::initVsync(scheduler::VSyncDispatch& dispatch,
                              frametimeline::TokenManager& tokenManager,
                              std::chrono::nanoseconds workDuration) {
     std::lock_guard lock(mVsync.mutex);
     mVsync.workDuration = workDuration;
     mVsync.tokenManager = &tokenManager;
-    updateVsyncRegistrationLocked(std::move(dispatch));
-}
-
-void MessageQueue::updateVsyncRegistration(std::shared_ptr<scheduler::VSyncDispatch> dispatch) {
-    std::lock_guard lock(mVsync.mutex);
-    updateVsyncRegistrationLocked(std::move(dispatch));
-}
-
-void MessageQueue::updateVsyncRegistrationLocked(
-        std::shared_ptr<scheduler::VSyncDispatch> dispatch) {
-    const bool reschedule = mVsync.registration &&
-            mVsync.registration->cancel() == scheduler::CancelResult::Cancelled;
     mVsync.registration = std::make_unique<
-            scheduler::VSyncCallbackRegistration>(std::move(dispatch),
+            scheduler::VSyncCallbackRegistration>(dispatch,
                                                   std::bind(&MessageQueue::vsyncCallback, this,
                                                             std::placeholders::_1,
                                                             std::placeholders::_2,
                                                             std::placeholders::_3),
                                                   "sf");
-    if (reschedule) {
-        mVsync.scheduledFrameTime =
-                mVsync.registration->schedule({.workDuration = mVsync.workDuration.get().count(),
-                                               .readyDuration = 0,
-                                               .earliestVsync = mVsync.lastCallbackTime.ns()});
-    }
 }
 
 void MessageQueue::destroyVsync() {
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.h b/services/surfaceflinger/Scheduler/MessageQueue.h
index ecb237d..0d59337 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.h
+++ b/services/surfaceflinger/Scheduler/MessageQueue.h
@@ -65,7 +65,7 @@
 public:
     virtual ~MessageQueue() = default;
 
-    virtual void initVsync(std::shared_ptr<scheduler::VSyncDispatch>, frametimeline::TokenManager&,
+    virtual void initVsync(scheduler::VSyncDispatch&, frametimeline::TokenManager&,
                            std::chrono::nanoseconds workDuration) = 0;
     virtual void destroyVsync() = 0;
     virtual void setDuration(std::chrono::nanoseconds workDuration) = 0;
@@ -106,8 +106,6 @@
 
     void vsyncCallback(nsecs_t vsyncTime, nsecs_t targetWakeupTime, nsecs_t readyTime);
 
-    void updateVsyncRegistration(std::shared_ptr<scheduler::VSyncDispatch>) EXCLUDES(mVsync.mutex);
-
 private:
     virtual void onFrameSignal(ICompositor&, VsyncId, TimePoint expectedVsyncTime) = 0;
 
@@ -129,13 +127,10 @@
 
     Vsync mVsync;
 
-    void updateVsyncRegistrationLocked(std::shared_ptr<scheduler::VSyncDispatch>)
-            REQUIRES(mVsync.mutex);
-
 public:
     explicit MessageQueue(ICompositor&);
 
-    void initVsync(std::shared_ptr<scheduler::VSyncDispatch>, frametimeline::TokenManager&,
+    void initVsync(scheduler::VSyncDispatch&, frametimeline::TokenManager&,
                    std::chrono::nanoseconds workDuration) override;
     void destroyVsync() override;
     void setDuration(std::chrono::nanoseconds workDuration) override;
diff --git a/services/surfaceflinger/Scheduler/OneShotTimer.h b/services/surfaceflinger/Scheduler/OneShotTimer.h
index 02e8719..f95646c 100644
--- a/services/surfaceflinger/Scheduler/OneShotTimer.h
+++ b/services/surfaceflinger/Scheduler/OneShotTimer.h
@@ -40,7 +40,7 @@
 
     OneShotTimer(std::string name, const Interval& interval, const ResetCallback& resetCallback,
                  const TimeoutCallback& timeoutCallback,
-                 std::unique_ptr<android::Clock> clock = std::make_unique<SteadyClock>());
+                 std::unique_ptr<Clock> clock = std::make_unique<SteadyClock>());
     ~OneShotTimer();
 
     Duration interval() const { return mInterval; }
@@ -82,7 +82,7 @@
     std::thread mThread;
 
     // Clock object for the timer. Mocked in unit tests.
-    std::unique_ptr<android::Clock> mClock;
+    std::unique_ptr<Clock> mClock;
 
     // Semaphore to keep mThread synchronized.
     sem_t mSemaphore;
diff --git a/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp b/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp
index c5b3e14..f6fe468 100644
--- a/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp
+++ b/services/surfaceflinger/Scheduler/RefreshRateSelector.cpp
@@ -238,7 +238,6 @@
         std::string name = to_string(frameRateMode);
 
         ALOGV("%s sorting scores %.2f", name.c_str(), overallScore);
-        ATRACE_INT(name.c_str(), static_cast<int>(std::round(overallScore * 100)));
 
         if (!ScoredFrameRate::scoresEqual(overallScore, rhs.overallScore)) {
             return overallScore > rhs.overallScore;
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index eed57ef..17cdff9 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -28,10 +28,10 @@
 #include <ftl/enum.h>
 #include <ftl/fake_guard.h>
 #include <ftl/small_map.h>
+#include <gui/TraceUtils.h>
 #include <gui/WindowInfo.h>
 #include <system/window.h>
 #include <utils/Timers.h>
-#include <utils/Trace.h>
 
 #include <FrameTimeline/FrameTimeline.h>
 #include <scheduler/interface/ICompositor.h>
@@ -114,18 +114,10 @@
 }
 
 void Scheduler::registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr) {
-    registerDisplayInternal(displayId, std::move(selectorPtr),
-                            std::make_shared<VsyncSchedule>(displayId, mFeatures));
-}
-
-void Scheduler::registerDisplayInternal(PhysicalDisplayId displayId,
-                                        RefreshRateSelectorPtr selectorPtr,
-                                        std::shared_ptr<VsyncSchedule> vsyncSchedule) {
     demoteLeaderDisplay();
 
     std::scoped_lock lock(mDisplayLock);
     mRefreshRateSelectors.emplace_or_replace(displayId, std::move(selectorPtr));
-    mVsyncSchedules.emplace_or_replace(displayId, std::move(vsyncSchedule));
 
     promoteLeaderDisplay();
 }
@@ -135,7 +127,6 @@
 
     std::scoped_lock lock(mDisplayLock);
     mRefreshRateSelectors.erase(displayId);
-    mVsyncSchedules.erase(displayId);
 
     // Do not allow removing the final display. Code in the scheduler expects
     // there to be at least one display. (This may be relaxed in the future with
@@ -163,49 +154,53 @@
     compositor.sample();
 }
 
-std::optional<Fps> Scheduler::getFrameRateOverride(uid_t uid) const {
-    std::scoped_lock lock(mDisplayLock);
-    return getFrameRateOverrideLocked(uid);
+void Scheduler::createVsyncSchedule(FeatureFlags features) {
+    mVsyncSchedule = std::make_unique<VsyncSchedule>(features);
 }
 
-std::optional<Fps> Scheduler::getFrameRateOverrideLocked(uid_t uid) const {
+std::optional<Fps> Scheduler::getFrameRateOverride(uid_t uid) const {
     const bool supportsFrameRateOverrideByContent =
-            leaderSelectorPtrLocked()->supportsAppFrameRateOverrideByContent();
+            leaderSelectorPtr()->supportsAppFrameRateOverrideByContent();
     return mFrameRateOverrideMappings
             .getFrameRateOverrideForUid(uid, supportsFrameRateOverrideByContent);
 }
 
-bool Scheduler::isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const {
+bool Scheduler::isVsyncValid(TimePoint expectedVsyncTimestamp, uid_t uid) const {
     const auto frameRate = getFrameRateOverride(uid);
     if (!frameRate.has_value()) {
         return true;
     }
 
-    return isVsyncInPhase(expectedVsyncTime, *frameRate);
+    ATRACE_FORMAT("%s uid: %d frameRate: %s", __func__, uid, to_string(*frameRate).c_str());
+    return mVsyncSchedule->getTracker().isVSyncInPhase(expectedVsyncTimestamp.ns(), *frameRate);
 }
 
-bool Scheduler::isVsyncInPhase(TimePoint expectedVsyncTime, Fps frameRate) const {
-    return getVsyncSchedule()->getTracker().isVSyncInPhase(expectedVsyncTime.ns(), frameRate);
+bool Scheduler::isVsyncInPhase(TimePoint timePoint, const Fps frameRate) const {
+    return mVsyncSchedule->getTracker().isVSyncInPhase(timePoint.ns(), frameRate);
 }
 
-Fps Scheduler::getLeaderRenderFrameRate(uid_t uid) const {
-    std::scoped_lock lock(mDisplayLock);
-    ftl::FakeGuard guard(kMainThreadContext);
-    auto vsyncSchedule = getVsyncScheduleLocked();
+impl::EventThread::ThrottleVsyncCallback Scheduler::makeThrottleVsyncCallback() const {
+    return [this](nsecs_t expectedVsyncTimestamp, uid_t uid) {
+        return !isVsyncValid(TimePoint::fromNs(expectedVsyncTimestamp), uid);
+    };
+}
 
-    const Fps refreshRate = leaderSelectorPtrLocked()->getActiveMode().fps;
-    const nsecs_t currentPeriod = vsyncSchedule->period().ns() ?: refreshRate.getPeriodNsecs();
+impl::EventThread::GetVsyncPeriodFunction Scheduler::makeGetVsyncPeriodFunction() const {
+    return [this](uid_t uid) {
+        const Fps refreshRate = leaderSelectorPtr()->getActiveMode().fps;
+        const nsecs_t currentPeriod = mVsyncSchedule->period().ns() ?: refreshRate.getPeriodNsecs();
 
-    const auto frameRate = getFrameRateOverrideLocked(uid);
-    if (!frameRate.has_value()) {
-        return Fps::fromPeriodNsecs(currentPeriod);
-    }
+        const auto frameRate = getFrameRateOverride(uid);
+        if (!frameRate.has_value()) {
+            return currentPeriod;
+        }
 
-    const auto divisor = RefreshRateSelector::getFrameRateDivisor(refreshRate, *frameRate);
-    if (divisor <= 1) {
-        return Fps::fromPeriodNsecs(currentPeriod);
-    }
-    return Fps::fromPeriodNsecs(currentPeriod * divisor);
+        const auto divisor = RefreshRateSelector::getFrameRateDivisor(refreshRate, *frameRate);
+        if (divisor <= 1) {
+            return currentPeriod;
+        }
+        return currentPeriod * divisor;
+    };
 }
 
 ConnectionHandle Scheduler::createEventThread(Cycle cycle,
@@ -213,7 +208,9 @@
                                               std::chrono::nanoseconds workDuration,
                                               std::chrono::nanoseconds readyDuration) {
     auto eventThread = std::make_unique<impl::EventThread>(cycle == Cycle::Render ? "app" : "appSf",
-                                                           getVsyncSchedule(), *this, tokenManager,
+                                                           *mVsyncSchedule, tokenManager,
+                                                           makeThrottleVsyncCallback(),
+                                                           makeGetVsyncPeriodFunction(),
                                                            workDuration, readyDuration);
 
     auto& handle = cycle == Cycle::Render ? mAppConnectionHandle : mSfConnectionHandle;
@@ -396,57 +393,27 @@
     setDuration(config.sfWorkDuration);
 }
 
-void Scheduler::enableHardwareVsync(PhysicalDisplayId id) {
-    auto schedule = getVsyncSchedule(id);
-    schedule->enableHardwareVsync(mSchedulerCallback);
+void Scheduler::enableHardwareVsync() {
+    mVsyncSchedule->enableHardwareVsync(mSchedulerCallback);
 }
 
-void Scheduler::disableHardwareVsync(PhysicalDisplayId id, bool disallow) {
-    auto schedule = getVsyncSchedule(id);
-    schedule->disableHardwareVsync(mSchedulerCallback, disallow);
+void Scheduler::disableHardwareVsync(bool disallow) {
+    mVsyncSchedule->disableHardwareVsync(mSchedulerCallback, disallow);
 }
 
-void Scheduler::resyncAllToHardwareVsync(bool allowToEnable) {
-    std::scoped_lock lock(mDisplayLock);
-    ftl::FakeGuard guard(kMainThreadContext);
-
-    for (const auto& [id, _] : mRefreshRateSelectors) {
-        resyncToHardwareVsyncLocked(id, allowToEnable);
+void Scheduler::resyncToHardwareVsync(bool allowToEnable, Fps refreshRate) {
+    if (mVsyncSchedule->isHardwareVsyncAllowed(allowToEnable) && refreshRate.isValid()) {
+        mVsyncSchedule->startPeriodTransition(mSchedulerCallback, refreshRate.getPeriod());
     }
 }
 
-void Scheduler::resyncToHardwareVsyncLocked(PhysicalDisplayId id, bool allowToEnable,
-                                            std::optional<Fps> refreshRate) {
-    if (!refreshRate) {
-        auto selectorPtr = mRefreshRateSelectors.get(id);
-        LOG_ALWAYS_FATAL_IF(!selectorPtr);
-        refreshRate = selectorPtr->get()->getActiveMode().modePtr->getFps();
-    }
-    auto schedule = getVsyncScheduleLocked(id);
-    if (allowToEnable) {
-        schedule->allowHardwareVsync();
-    } else if (!schedule->isHardwareVsyncAllowed()) {
-        // Hardware vsync is not currently allowed, so abort the resync
-        // attempt for now.
-        return;
-    }
-
-    setVsyncPeriod(schedule, refreshRate->getPeriodNsecs(), false /* force */);
-}
-
-void Scheduler::setRenderRate(PhysicalDisplayId id, Fps renderFrameRate) {
-    std::scoped_lock lock(mDisplayLock);
-    ftl::FakeGuard guard(kMainThreadContext);
-
-    auto selectorPtr = mRefreshRateSelectors.get(id);
-    LOG_ALWAYS_FATAL_IF(!selectorPtr);
-    const auto mode = selectorPtr->get()->getActiveMode();
+void Scheduler::setRenderRate(Fps renderFrameRate) {
+    const auto mode = leaderSelectorPtr()->getActiveMode();
 
     using fps_approx_ops::operator!=;
     LOG_ALWAYS_FATAL_IF(renderFrameRate != mode.fps,
-                        "Mismatch in render frame rates. Selector: %s, Scheduler: %s, Display: "
-                        "%" PRIu64,
-                        to_string(mode.fps).c_str(), to_string(renderFrameRate).c_str(), id.value);
+                        "Mismatch in render frame rates. Selector: %s, Scheduler: %s",
+                        to_string(mode.fps).c_str(), to_string(renderFrameRate).c_str());
 
     ALOGV("%s %s (%s)", __func__, to_string(mode.fps).c_str(),
           to_string(mode.modePtr->getFps()).c_str());
@@ -455,7 +422,7 @@
     LOG_ALWAYS_FATAL_IF(divisor == 0, "%s <> %s -- not divisors", to_string(mode.fps).c_str(),
                         to_string(mode.fps).c_str());
 
-    getVsyncScheduleLocked(id)->getTracker().setDivisor(static_cast<unsigned>(divisor));
+    mVsyncSchedule->getTracker().setDivisor(static_cast<unsigned>(divisor));
 }
 
 void Scheduler::resync() {
@@ -465,43 +432,24 @@
     const nsecs_t last = mLastResyncTime.exchange(now);
 
     if (now - last > kIgnoreDelay) {
-        resyncAllToHardwareVsync(false /* allowToEnable */);
+        const auto refreshRate = leaderSelectorPtr()->getActiveMode().modePtr->getFps();
+        resyncToHardwareVsync(false, refreshRate);
     }
 }
 
-void Scheduler::setVsyncPeriod(const std::shared_ptr<VsyncSchedule>& schedule, nsecs_t period,
-                               bool force) {
-    ALOGD("Scheduler::setVsyncPeriod");
-    if (period <= 0) return;
-
-    // TODO (b/266712910):The old code held mHWVsyncLock before calling
-    // startPeriodTransition. Move these into a new method on VsyncSchedule that
-    // encapsulates this behavior there and allows holding the lock the whole
-    // time.
-    schedule->getController().startPeriodTransition(period, force);
-    schedule->enableHardwareVsync(mSchedulerCallback);
+bool Scheduler::addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriodIn) {
+    const auto hwcVsyncPeriod = ftl::Optional(hwcVsyncPeriodIn).transform([](nsecs_t nanos) {
+        return Period::fromNs(nanos);
+    });
+    return mVsyncSchedule->addResyncSample(mSchedulerCallback, TimePoint::fromNs(timestamp),
+                                           hwcVsyncPeriod);
 }
 
-bool Scheduler::addResyncSample(PhysicalDisplayId id, nsecs_t timestamp,
-                                std::optional<nsecs_t> hwcVsyncPeriod) {
-    bool periodFlushed = false;
-    auto schedule = getVsyncSchedule(id);
-    if (schedule->getController().addHwVsyncTimestamp(timestamp, hwcVsyncPeriod, &periodFlushed)) {
-        schedule->enableHardwareVsync(mSchedulerCallback);
+void Scheduler::addPresentFence(std::shared_ptr<FenceTime> fence) {
+    if (mVsyncSchedule->getController().addPresentFence(std::move(fence))) {
+        enableHardwareVsync();
     } else {
-        schedule->disableHardwareVsync(mSchedulerCallback, false /* disallow */);
-    }
-
-    return periodFlushed;
-}
-
-void Scheduler::addPresentFence(PhysicalDisplayId id, std::shared_ptr<FenceTime> fence) {
-    auto schedule = getVsyncSchedule(id);
-    const bool needMoreSignals = schedule->getController().addPresentFence(std::move(fence));
-    if (needMoreSignals) {
-        schedule->enableHardwareVsync(mSchedulerCallback);
-    } else {
-        schedule->disableHardwareVsync(mSchedulerCallback, false /* disallow */);
+        disableHardwareVsync(false);
     }
 }
 
@@ -553,22 +501,12 @@
     }
 }
 
-void Scheduler::setDisplayPowerMode(PhysicalDisplayId id, hal::PowerMode powerMode) {
-    const bool isLeader = [this, id]() REQUIRES(kMainThreadContext) {
-        ftl::FakeGuard guard(mDisplayLock);
-        return id == mLeaderDisplayId;
-    }();
-    if (isLeader) {
-        // TODO (b/255657128): This needs to be handled per display.
+void Scheduler::setDisplayPowerMode(hal::PowerMode powerMode) {
+    {
         std::lock_guard<std::mutex> lock(mPolicyLock);
         mPolicy.displayPowerMode = powerMode;
     }
-    {
-        std::scoped_lock lock(mDisplayLock);
-        auto vsyncSchedule = getVsyncScheduleLocked(id);
-        vsyncSchedule->getController().setDisplayPowerMode(powerMode);
-    }
-    if (!isLeader) return;
+    mVsyncSchedule->getController().setDisplayPowerMode(powerMode);
 
     if (mDisplayPowerTimer) {
         mDisplayPowerTimer->reset();
@@ -579,24 +517,6 @@
     mLayerHistory.clear();
 }
 
-std::shared_ptr<const VsyncSchedule> Scheduler::getVsyncSchedule(
-        std::optional<PhysicalDisplayId> idOpt) const {
-    std::scoped_lock lock(mDisplayLock);
-    return getVsyncScheduleLocked(idOpt);
-}
-
-std::shared_ptr<const VsyncSchedule> Scheduler::getVsyncScheduleLocked(
-        std::optional<PhysicalDisplayId> idOpt) const {
-    ftl::FakeGuard guard(kMainThreadContext);
-    if (!idOpt) {
-        LOG_ALWAYS_FATAL_IF(!mLeaderDisplayId, "Missing a leader!");
-        idOpt = mLeaderDisplayId;
-    }
-    auto scheduleOpt = mVsyncSchedules.get(*idOpt);
-    LOG_ALWAYS_FATAL_IF(!scheduleOpt);
-    return std::const_pointer_cast<const VsyncSchedule>(scheduleOpt->get());
-}
-
 void Scheduler::kernelIdleTimerCallback(TimerState state) {
     ATRACE_INT("ExpiredKernelIdleTimer", static_cast<int>(state));
 
@@ -611,17 +531,12 @@
         // If we're not in performance mode then the kernel timer shouldn't do
         // anything, as the refresh rate during DPU power collapse will be the
         // same.
-        resyncAllToHardwareVsync(true /* allowToEnable */);
+        resyncToHardwareVsync(true /* makeAvailable */, refreshRate);
     } else if (state == TimerState::Expired && refreshRate <= FPS_THRESHOLD_FOR_KERNEL_TIMER) {
         // Disable HW VSYNC if the timer expired, as we don't need it enabled if
         // we're not pushing frames, and if we're in PERFORMANCE mode then we'll
         // need to update the VsyncController model anyway.
-        std::scoped_lock lock(mDisplayLock);
-        ftl::FakeGuard guard(kMainThreadContext);
-        constexpr bool disallow = false;
-        for (auto& [_, schedule] : mVsyncSchedules) {
-            schedule->disableHardwareVsync(mSchedulerCallback, disallow);
-        }
+        disableHardwareVsync(false /* makeUnavailable */);
     }
 
     mSchedulerCallback.kernelTimerChanged(state == TimerState::Expired);
@@ -678,20 +593,7 @@
 }
 
 void Scheduler::dumpVsync(std::string& out) const {
-    std::scoped_lock lock(mDisplayLock);
-    ftl::FakeGuard guard(kMainThreadContext);
-    if (mLeaderDisplayId) {
-        base::StringAppendF(&out, "VsyncSchedule for leader %s:\n",
-                            to_string(*mLeaderDisplayId).c_str());
-        getVsyncScheduleLocked()->dump(out);
-    }
-    for (auto& [id, vsyncSchedule] : mVsyncSchedules) {
-        if (id == mLeaderDisplayId) {
-            continue;
-        }
-        base::StringAppendF(&out, "VsyncSchedule for follower %s:\n", to_string(id).c_str());
-        vsyncSchedule->dump(out);
-    }
+    mVsyncSchedule->dump(out);
 }
 
 bool Scheduler::updateFrameRateOverrides(GlobalSignals consideredSignals, Fps displayRefreshRate) {
@@ -711,7 +613,6 @@
     mLeaderDisplayId = leaderIdOpt.value_or(mRefreshRateSelectors.begin()->first);
     ALOGI("Display %s is the leader", to_string(*mLeaderDisplayId).c_str());
 
-    auto vsyncSchedule = getVsyncScheduleLocked(*mLeaderDisplayId);
     if (const auto leaderPtr = leaderSelectorPtrLocked()) {
         leaderPtr->setIdleTimerCallbacks(
                 {.platform = {.onReset = [this] { idleTimerCallback(TimerState::Reset); },
@@ -721,17 +622,6 @@
                                     [this] { kernelIdleTimerCallback(TimerState::Expired); }}});
 
         leaderPtr->startIdleTimer();
-
-        const Fps refreshRate = leaderPtr->getActiveMode().modePtr->getFps();
-        setVsyncPeriod(vsyncSchedule, refreshRate.getPeriodNsecs(), true /* force */);
-    }
-
-    updateVsyncRegistration(vsyncSchedule->getDispatch());
-    {
-        std::lock_guard<std::mutex> lock(mConnectionsLock);
-        for (auto& [_, connection] : mConnections) {
-            connection.thread->onNewVsyncSchedule(vsyncSchedule);
-        }
     }
 }
 
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index 8c8fc21..a340919 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -93,7 +93,7 @@
 
 using GlobalSignals = RefreshRateSelector::GlobalSignals;
 
-class Scheduler : android::impl::MessageQueue, public IEventThreadCallback {
+class Scheduler : android::impl::MessageQueue {
     using Impl = android::impl::MessageQueue;
 
 public:
@@ -114,6 +114,8 @@
 
     void run();
 
+    void createVsyncSchedule(FeatureFlags);
+
     using Impl::initVsync;
 
     using Impl::getScheduledFrameTime;
@@ -167,21 +169,9 @@
 
     const VsyncModulator& vsyncModulator() const { return *mVsyncModulator; }
 
-    // In some cases, we should only modulate for the leader display. In those
-    // cases, the caller should pass in the relevant display, and the method
-    // will no-op if it's not the leader. Other cases are not specific to a
-    // display.
     template <typename... Args,
               typename Handler = std::optional<VsyncConfig> (VsyncModulator::*)(Args...)>
-    void modulateVsync(std::optional<PhysicalDisplayId> id, Handler handler, Args... args) {
-        if (id) {
-            std::scoped_lock lock(mDisplayLock);
-            ftl::FakeGuard guard(kMainThreadContext);
-            if (id != mLeaderDisplayId) {
-                return;
-            }
-        }
-
+    void modulateVsync(Handler handler, Args... args) {
         if (const auto config = (*mVsyncModulator.*handler)(args...)) {
             setVsyncConfig(*config, getLeaderVsyncPeriod());
         }
@@ -190,32 +180,24 @@
     void setVsyncConfigSet(const VsyncConfigSet&, Period vsyncPeriod);
 
     // Sets the render rate for the scheduler to run at.
-    void setRenderRate(PhysicalDisplayId, Fps);
+    void setRenderRate(Fps);
 
-    void enableHardwareVsync(PhysicalDisplayId);
-    void disableHardwareVsync(PhysicalDisplayId, bool makeUnavailable);
+    void enableHardwareVsync();
+    void disableHardwareVsync(bool disallow);
 
     // Resyncs the scheduler to hardware vsync.
     // If allowToEnable is true, then hardware vsync will be turned on.
     // Otherwise, if hardware vsync is not already enabled then this method will
     // no-op.
-    // If refreshRate is nullopt, use the existing refresh rate of the display.
-    void resyncToHardwareVsync(PhysicalDisplayId id, bool allowToEnable,
-                               std::optional<Fps> refreshRate = std::nullopt)
-            EXCLUDES(mDisplayLock) {
-        std::scoped_lock lock(mDisplayLock);
-        ftl::FakeGuard guard(kMainThreadContext);
-        resyncToHardwareVsyncLocked(id, allowToEnable, refreshRate);
-    }
+    void resyncToHardwareVsync(bool allowToEnable, Fps refreshRate);
     void resync() EXCLUDES(mDisplayLock);
     void forceNextResync() { mLastResyncTime = 0; }
 
     // Passes a vsync sample to VsyncController. Returns true if
     // VsyncController detected that the vsync period changed and false
     // otherwise.
-    bool addResyncSample(PhysicalDisplayId, nsecs_t timestamp,
-                         std::optional<nsecs_t> hwcVsyncPeriod);
-    void addPresentFence(PhysicalDisplayId, std::shared_ptr<FenceTime>) EXCLUDES(mDisplayLock);
+    bool addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod);
+    void addPresentFence(std::shared_ptr<FenceTime>);
 
     // Layers are registered on creation, and unregistered when the weak reference expires.
     void registerLayer(Layer*);
@@ -233,22 +215,20 @@
     // Indicates that touch interaction is taking place.
     void onTouchHint();
 
-    void setDisplayPowerMode(PhysicalDisplayId, hal::PowerMode powerMode)
-            REQUIRES(kMainThreadContext);
+    void setDisplayPowerMode(hal::PowerMode powerMode);
 
-    std::shared_ptr<const VsyncSchedule> getVsyncSchedule(
-            std::optional<PhysicalDisplayId> idOpt = std::nullopt) const EXCLUDES(mDisplayLock);
-    std::shared_ptr<VsyncSchedule> getVsyncSchedule(
-            std::optional<PhysicalDisplayId> idOpt = std::nullopt) EXCLUDES(mDisplayLock) {
-        return std::const_pointer_cast<VsyncSchedule>(
-                static_cast<const Scheduler*>(this)->getVsyncSchedule(idOpt));
-    }
+    VsyncSchedule& getVsyncSchedule() { return *mVsyncSchedule; }
 
-    bool isVsyncInPhase(TimePoint expectedVsyncTime, Fps frameRate) const;
+    // Returns true if a given vsync timestamp is considered valid vsync
+    // for a given uid
+    bool isVsyncValid(TimePoint expectedVsyncTimestamp, uid_t uid) const;
+
+    // Checks if a vsync timestamp is in phase for a frame rate
+    bool isVsyncInPhase(TimePoint timePoint, const Fps frameRate) const;
 
     void dump(utils::Dumper&) const;
     void dump(ConnectionHandle, std::string&) const;
-    void dumpVsync(std::string&) const EXCLUDES(mDisplayLock);
+    void dumpVsync(std::string&) const;
 
     // Returns the preferred refresh rate and frame rate for the leader display.
     FrameRateMode getPreferredDisplayMode();
@@ -286,10 +266,6 @@
         return mLayerHistory.getLayerFramerate(now, id);
     }
 
-    // IEventThreadCallback overrides:
-    bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const override;
-    Fps getLeaderRenderFrameRate(uid_t uid) const override;
-
 private:
     friend class TestableScheduler;
 
@@ -312,12 +288,6 @@
     void touchTimerCallback(TimerState);
     void displayPowerTimerCallback(TimerState);
 
-    void resyncToHardwareVsyncLocked(PhysicalDisplayId, bool allowToEnable,
-                                     std::optional<Fps> refreshRate = std::nullopt)
-            REQUIRES(kMainThreadContext, mDisplayLock);
-    void resyncAllToHardwareVsync(bool allowToEnable) EXCLUDES(mDisplayLock);
-    void setVsyncPeriod(const std::shared_ptr<VsyncSchedule>&, nsecs_t period, bool force)
-            REQUIRES(mDisplayLock);
     void setVsyncConfig(const VsyncConfig&, Period vsyncPeriod);
 
     // Chooses a leader among the registered displays, unless `leaderIdOpt` is specified. The new
@@ -329,12 +299,6 @@
     // caller on the main thread to avoid deadlock, since the timer thread locks it before exit.
     void demoteLeaderDisplay() REQUIRES(kMainThreadContext) EXCLUDES(mDisplayLock, mPolicyLock);
 
-    void registerDisplayInternal(PhysicalDisplayId, RefreshRateSelectorPtr,
-                                 std::shared_ptr<VsyncSchedule>) REQUIRES(kMainThreadContext)
-            EXCLUDES(mDisplayLock);
-
-    std::optional<Fps> getFrameRateOverrideLocked(uid_t) const REQUIRES(mDisplayLock);
-
     struct Policy;
 
     // Sets the S state of the policy to the T value under mPolicyLock, and chooses a display mode
@@ -372,6 +336,9 @@
 
     void dispatchCachedReportedMode() REQUIRES(mPolicyLock) EXCLUDES(mDisplayLock);
 
+    android::impl::EventThread::ThrottleVsyncCallback makeThrottleVsyncCallback() const;
+    android::impl::EventThread::GetVsyncPeriodFunction makeGetVsyncPeriodFunction() const;
+
     // Stores EventThread associated with a given VSyncSource, and an initial EventThreadConnection.
     struct Connection {
         sp<EventThreadConnection> connection;
@@ -388,6 +355,7 @@
     std::atomic<nsecs_t> mLastResyncTime = 0;
 
     const FeatureFlags mFeatures;
+    std::unique_ptr<VsyncSchedule> mVsyncSchedule;
 
     // Shifts the VSYNC phase during certain transactions and refresh rate changes.
     const sp<VsyncModulator> mVsyncModulator;
@@ -412,10 +380,6 @@
     display::PhysicalDisplayMap<PhysicalDisplayId, RefreshRateSelectorPtr> mRefreshRateSelectors
             GUARDED_BY(mDisplayLock) GUARDED_BY(kMainThreadContext);
 
-    // TODO (b/266715559): Store in the same map as mRefreshRateSelectors.
-    display::PhysicalDisplayMap<PhysicalDisplayId, std::shared_ptr<VsyncSchedule>> mVsyncSchedules
-            GUARDED_BY(mDisplayLock) GUARDED_BY(kMainThreadContext);
-
     ftl::Optional<PhysicalDisplayId> mLeaderDisplayId GUARDED_BY(mDisplayLock)
             GUARDED_BY(kMainThreadContext);
 
@@ -435,14 +399,6 @@
                 .value_or(std::cref(noLeader));
     }
 
-    std::shared_ptr<const VsyncSchedule> getVsyncScheduleLocked(
-            std::optional<PhysicalDisplayId> idOpt = std::nullopt) const REQUIRES(mDisplayLock);
-    std::shared_ptr<VsyncSchedule> getVsyncScheduleLocked(
-            std::optional<PhysicalDisplayId> idOpt = std::nullopt) REQUIRES(mDisplayLock) {
-        return std::const_pointer_cast<VsyncSchedule>(
-                static_cast<const Scheduler*>(this)->getVsyncScheduleLocked(idOpt));
-    }
-
     struct Policy {
         // Policy for choosing the display mode.
         LayerHistory::Summary contentRequirements;
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatch.h b/services/surfaceflinger/Scheduler/VSyncDispatch.h
index 77875e3..9520131 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatch.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatch.h
@@ -161,8 +161,7 @@
  */
 class VSyncCallbackRegistration {
 public:
-    VSyncCallbackRegistration(std::shared_ptr<VSyncDispatch>, VSyncDispatch::Callback,
-                              std::string callbackName);
+    VSyncCallbackRegistration(VSyncDispatch&, VSyncDispatch::Callback, std::string callbackName);
     ~VSyncCallbackRegistration();
 
     VSyncCallbackRegistration(VSyncCallbackRegistration&&);
@@ -178,7 +177,7 @@
     CancelResult cancel();
 
 private:
-    std::shared_ptr<VSyncDispatch> mDispatch;
+    std::reference_wrapper<VSyncDispatch> mDispatch;
     VSyncDispatch::CallbackToken mToken;
     bool mValidToken;
 };
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
index 26389eb..73d52cf 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
@@ -215,10 +215,10 @@
 }
 
 VSyncDispatchTimerQueue::VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper> tk,
-                                                 VsyncSchedule::TrackerPtr tracker,
-                                                 nsecs_t timerSlack, nsecs_t minVsyncDistance)
+                                                 VSyncTracker& tracker, nsecs_t timerSlack,
+                                                 nsecs_t minVsyncDistance)
       : mTimeKeeper(std::move(tk)),
-        mTracker(std::move(tracker)),
+        mTracker(tracker),
         mTimerSlack(timerSlack),
         mMinVsyncDistance(minVsyncDistance) {}
 
@@ -255,7 +255,7 @@
         }
 
         if (it != skipUpdateIt) {
-            callback->update(*mTracker, now);
+            callback->update(mTracker, now);
         }
         auto const wakeupTime = *callback->wakeupTime();
         if (!min || *min > wakeupTime) {
@@ -365,10 +365,10 @@
     auto const rearmImminent = now > mIntendedWakeupTime;
     if (CC_UNLIKELY(rearmImminent)) {
         callback->addPendingWorkloadUpdate(scheduleTiming);
-        return getExpectedCallbackTime(*mTracker, now, scheduleTiming);
+        return getExpectedCallbackTime(mTracker, now, scheduleTiming);
     }
 
-    const ScheduleResult result = callback->schedule(scheduleTiming, *mTracker, now);
+    const ScheduleResult result = callback->schedule(scheduleTiming, mTracker, now);
     if (!result.has_value()) {
         return {};
     }
@@ -434,15 +434,15 @@
     }
 }
 
-VSyncCallbackRegistration::VSyncCallbackRegistration(std::shared_ptr<VSyncDispatch> dispatch,
+VSyncCallbackRegistration::VSyncCallbackRegistration(VSyncDispatch& dispatch,
                                                      VSyncDispatch::Callback callback,
                                                      std::string callbackName)
-      : mDispatch(std::move(dispatch)),
-        mToken(mDispatch->registerCallback(std::move(callback), std::move(callbackName))),
+      : mDispatch(dispatch),
+        mToken(dispatch.registerCallback(std::move(callback), std::move(callbackName))),
         mValidToken(true) {}
 
 VSyncCallbackRegistration::VSyncCallbackRegistration(VSyncCallbackRegistration&& other)
-      : mDispatch(std::move(other.mDispatch)),
+      : mDispatch(other.mDispatch),
         mToken(std::move(other.mToken)),
         mValidToken(std::move(other.mValidToken)) {
     other.mValidToken = false;
@@ -457,28 +457,28 @@
 }
 
 VSyncCallbackRegistration::~VSyncCallbackRegistration() {
-    if (mValidToken) mDispatch->unregisterCallback(mToken);
+    if (mValidToken) mDispatch.get().unregisterCallback(mToken);
 }
 
 ScheduleResult VSyncCallbackRegistration::schedule(VSyncDispatch::ScheduleTiming scheduleTiming) {
     if (!mValidToken) {
         return std::nullopt;
     }
-    return mDispatch->schedule(mToken, scheduleTiming);
+    return mDispatch.get().schedule(mToken, scheduleTiming);
 }
 
 ScheduleResult VSyncCallbackRegistration::update(VSyncDispatch::ScheduleTiming scheduleTiming) {
     if (!mValidToken) {
         return std::nullopt;
     }
-    return mDispatch->update(mToken, scheduleTiming);
+    return mDispatch.get().update(mToken, scheduleTiming);
 }
 
 CancelResult VSyncCallbackRegistration::cancel() {
     if (!mValidToken) {
         return CancelResult::Error;
     }
-    return mDispatch->cancel(mToken);
+    return mDispatch.get().cancel(mToken);
 }
 
 } // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
index 6499d69..c3af136 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
@@ -26,11 +26,11 @@
 #include <android-base/thread_annotations.h>
 
 #include "VSyncDispatch.h"
-#include "VsyncSchedule.h"
 
 namespace android::scheduler {
 
 class TimeKeeper;
+class VSyncTracker;
 
 // VSyncDispatchTimerQueueEntry is a helper class representing internal state for each entry in
 // VSyncDispatchTimerQueue hoisted to public for unit testing.
@@ -120,8 +120,8 @@
     //                                  should be grouped into one wakeup.
     // \param[in] minVsyncDistance      The minimum distance between two vsync estimates before the
     //                                  vsyncs are considered the same vsync event.
-    VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper>, VsyncSchedule::TrackerPtr,
-                            nsecs_t timerSlack, nsecs_t minVsyncDistance);
+    VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper>, VSyncTracker&, nsecs_t timerSlack,
+                            nsecs_t minVsyncDistance);
     ~VSyncDispatchTimerQueue();
 
     CallbackToken registerCallback(Callback, std::string callbackName) final;
@@ -148,7 +148,7 @@
 
     static constexpr nsecs_t kInvalidTime = std::numeric_limits<int64_t>::max();
     std::unique_ptr<TimeKeeper> const mTimeKeeper;
-    VsyncSchedule::TrackerPtr mTracker;
+    VSyncTracker& mTracker;
     nsecs_t const mTimerSlack;
     nsecs_t const mMinVsyncDistance;
 
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index a3b8a56..f8cb323 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -33,7 +33,6 @@
 #include <cutils/properties.h>
 #include <gui/TraceUtils.h>
 #include <utils/Log.h>
-#include <utils/Trace.h>
 
 #include "RefreshRateSelector.h"
 #include "VSyncPredictor.h"
@@ -41,16 +40,14 @@
 namespace android::scheduler {
 
 using base::StringAppendF;
-using base::StringPrintf;
 
 static auto constexpr kMaxPercent = 100u;
 
 VSyncPredictor::~VSyncPredictor() = default;
 
-VSyncPredictor::VSyncPredictor(std::string name, nsecs_t idealPeriod, size_t historySize,
+VSyncPredictor::VSyncPredictor(nsecs_t idealPeriod, size_t historySize,
                                size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent)
-      : mName(name),
-        mTraceOn(property_get_bool("debug.sf.vsp_trace", false)),
+      : mTraceOn(property_get_bool("debug.sf.vsp_trace", false)),
         kHistorySize(historySize),
         kMinimumSamplesForPrediction(minimumSamplesForPrediction),
         kOutlierTolerancePercent(std::min(outlierTolerancePercent, kMaxPercent)),
@@ -60,14 +57,12 @@
 
 inline void VSyncPredictor::traceInt64If(const char* name, int64_t value) const {
     if (CC_UNLIKELY(mTraceOn)) {
-        traceInt64(name, value);
+        ATRACE_INT64(name, value);
     }
 }
 
 inline void VSyncPredictor::traceInt64(const char* name, int64_t value) const {
-    // TODO (b/266817103): Pass in PhysicalDisplayId and use ftl::Concat to
-    // avoid unnecessary allocations.
-    ATRACE_INT64(StringPrintf("%s %s", name, mName.c_str()).c_str(), value);
+    ATRACE_INT64(name, value);
 }
 
 inline size_t VSyncPredictor::next(size_t i) const {
@@ -219,8 +214,8 @@
 
     it->second = {anticipatedPeriod, intercept};
 
-    ALOGV("model update ts %s: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, mName.c_str(),
-          timestamp, anticipatedPeriod, intercept);
+    ALOGV("model update ts: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, timestamp,
+          anticipatedPeriod, intercept);
     return true;
 }
 
@@ -287,6 +282,13 @@
 }
 
 bool VSyncPredictor::isVSyncInPhaseLocked(nsecs_t timePoint, unsigned divisor) const {
+    const TimePoint now = TimePoint::now();
+    const auto getTimePointIn = [](TimePoint now, nsecs_t timePoint) -> float {
+        return ticks<std::milli, float>(TimePoint::fromNs(timePoint) - now);
+    };
+    ATRACE_FORMAT("%s timePoint in: %.2f divisor: %zu", __func__, getTimePointIn(now, timePoint),
+                  divisor);
+
     struct VsyncError {
         nsecs_t vsyncTimestamp;
         float error;
@@ -309,6 +311,7 @@
     if (knownTimestampIter == mRateDivisorKnownTimestampMap.end()) {
         const auto vsync = nextAnticipatedVSyncTimeFromLocked(justBeforeTimePoint);
         mRateDivisorKnownTimestampMap[dividedPeriod] = vsync;
+        ATRACE_FORMAT_INSTANT("(first) knownVsync in: %.2f", getTimePointIn(now, vsync));
         return true;
     }
 
@@ -328,11 +331,13 @@
 
     const auto minVsyncError = std::min_element(vsyncs.begin(), vsyncs.end());
     mRateDivisorKnownTimestampMap[dividedPeriod] = minVsyncError->vsyncTimestamp;
+    ATRACE_FORMAT_INSTANT("knownVsync in: %.2f",
+                          getTimePointIn(now, minVsyncError->vsyncTimestamp));
     return std::abs(minVsyncError->vsyncTimestamp - timePoint) < period / 2;
 }
 
 void VSyncPredictor::setDivisor(unsigned divisor) {
-    ALOGV("%s %s: %d", __func__, mName.c_str(), divisor);
+    ALOGV("%s: %d", __func__, divisor);
     std::lock_guard lock(mMutex);
     mDivisor = divisor;
 }
@@ -348,7 +353,7 @@
 }
 
 void VSyncPredictor::setPeriod(nsecs_t period) {
-    ATRACE_FORMAT("%s %s", __func__, mName.c_str());
+    ATRACE_CALL();
     traceInt64("VSP-setPeriod", period);
 
     std::lock_guard lock(mMutex);
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
index 1ded54f..305cdb0 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.h
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -29,15 +29,14 @@
 class VSyncPredictor : public VSyncTracker {
 public:
     /*
-     * \param [in] name The name of the display this corresponds to.
      * \param [in] idealPeriod  The initial ideal period to use.
      * \param [in] historySize  The internal amount of entries to store in the model.
      * \param [in] minimumSamplesForPrediction The minimum number of samples to collect before
      * predicting. \param [in] outlierTolerancePercent a number 0 to 100 that will be used to filter
      * samples that fall outlierTolerancePercent from an anticipated vsync event.
      */
-    VSyncPredictor(std::string name, nsecs_t idealPeriod, size_t historySize,
-                   size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent);
+    VSyncPredictor(nsecs_t idealPeriod, size_t historySize, size_t minimumSamplesForPrediction,
+                   uint32_t outlierTolerancePercent);
     ~VSyncPredictor();
 
     bool addVsyncTimestamp(nsecs_t timestamp) final EXCLUDES(mMutex);
@@ -77,8 +76,6 @@
     VSyncPredictor& operator=(VSyncPredictor const&) = delete;
     void clearTimestamps() REQUIRES(mMutex);
 
-    const std::string mName;
-
     inline void traceInt64If(const char* name, int64_t value) const;
     inline void traceInt64(const char* name, int64_t value) const;
     bool const mTraceOn;
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.cpp b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
index a831f66..b5f212e 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
@@ -21,7 +21,6 @@
 
 #include <assert.h>
 #include <cutils/properties.h>
-#include <gui/TraceUtils.h>
 #include <log/log.h>
 #include <utils/Trace.h>
 
@@ -33,7 +32,6 @@
 namespace android::scheduler {
 
 using base::StringAppendF;
-using base::StringPrintf;
 
 VsyncController::~VsyncController() = default;
 
@@ -41,12 +39,12 @@
     return systemTime(SYSTEM_TIME_MONOTONIC);
 }
 
-VSyncReactor::VSyncReactor(std::string name, std::unique_ptr<Clock> clock, VSyncTracker& tracker,
+VSyncReactor::VSyncReactor(std::unique_ptr<Clock> clock, VSyncTracker& tracker,
                            size_t pendingFenceLimit, bool supportKernelIdleTimer)
-      : mName(name),
-        mClock(std::move(clock)),
+      : mClock(std::move(clock)),
         mTracker(tracker),
         mPendingLimit(pendingFenceLimit),
+        // TODO(adyabr): change mSupportKernelIdleTimer when the active display changes
         mSupportKernelIdleTimer(supportKernelIdleTimer) {}
 
 VSyncReactor::~VSyncReactor() = default;
@@ -116,7 +114,7 @@
 }
 
 void VSyncReactor::startPeriodTransitionInternal(nsecs_t newPeriod) {
-    ATRACE_FORMAT("%s %s", __func__, mName.c_str());
+    ATRACE_CALL();
     mPeriodConfirmationInProgress = true;
     mPeriodTransitioningTo = newPeriod;
     mMoreSamplesNeeded = true;
@@ -124,20 +122,18 @@
 }
 
 void VSyncReactor::endPeriodTransition() {
-    ATRACE_FORMAT("%s %s", __func__, mName.c_str());
+    ATRACE_CALL();
     mPeriodTransitioningTo.reset();
     mPeriodConfirmationInProgress = false;
     mLastHwVsync.reset();
 }
 
-void VSyncReactor::startPeriodTransition(nsecs_t period, bool force) {
-    // TODO (b/266817103): Pass in PhysicalDisplayId and use ftl::Concat to
-    // avoid unnecessary allocations.
-    ATRACE_INT64(StringPrintf("VSR-startPeriodTransition %s", mName.c_str()).c_str(), period);
+void VSyncReactor::startPeriodTransition(nsecs_t period) {
+    ATRACE_INT64("VSR-startPeriodTransition", period);
     std::lock_guard lock(mMutex);
     mLastHwVsync.reset();
 
-    if (!mSupportKernelIdleTimer && period == mTracker.currentPeriod() && !force) {
+    if (!mSupportKernelIdleTimer && period == mTracker.currentPeriod()) {
         endPeriodTransition();
         setIgnorePresentFencesInternal(false);
         mMoreSamplesNeeded = false;
@@ -185,7 +181,7 @@
 
     std::lock_guard lock(mMutex);
     if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
-        ATRACE_FORMAT("VSR %s: period confirmed", mName.c_str());
+        ATRACE_NAME("VSR: period confirmed");
         if (mPeriodTransitioningTo) {
             mTracker.setPeriod(*mPeriodTransitioningTo);
             *periodFlushed = true;
@@ -199,12 +195,12 @@
         endPeriodTransition();
         mMoreSamplesNeeded = mTracker.needsMoreSamples();
     } else if (mPeriodConfirmationInProgress) {
-        ATRACE_FORMAT("VSR %s: still confirming period", mName.c_str());
+        ATRACE_NAME("VSR: still confirming period");
         mLastHwVsync = timestamp;
         mMoreSamplesNeeded = true;
         *periodFlushed = false;
     } else {
-        ATRACE_FORMAT("VSR %s: adding sample", mName.c_str());
+        ATRACE_NAME("VSR: adding sample");
         *periodFlushed = false;
         mTracker.addVsyncTimestamp(timestamp);
         mMoreSamplesNeeded = mTracker.needsMoreSamples();
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.h b/services/surfaceflinger/Scheduler/VSyncReactor.h
index fd9ca42..4501487 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.h
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.h
@@ -22,7 +22,6 @@
 #include <vector>
 
 #include <android-base/thread_annotations.h>
-#include <ui/DisplayId.h>
 #include <ui/FenceTime.h>
 
 #include <scheduler/TimeKeeper.h>
@@ -38,14 +37,14 @@
 // TODO (b/145217110): consider renaming.
 class VSyncReactor : public VsyncController {
 public:
-    VSyncReactor(std::string name, std::unique_ptr<Clock> clock, VSyncTracker& tracker,
-                 size_t pendingFenceLimit, bool supportKernelIdleTimer);
+    VSyncReactor(std::unique_ptr<Clock> clock, VSyncTracker& tracker, size_t pendingFenceLimit,
+                 bool supportKernelIdleTimer);
     ~VSyncReactor();
 
     bool addPresentFence(std::shared_ptr<FenceTime>) final;
     void setIgnorePresentFences(bool ignore) final;
 
-    void startPeriodTransition(nsecs_t period, bool force) final;
+    void startPeriodTransition(nsecs_t period) final;
 
     bool addHwVsyncTimestamp(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
                              bool* periodFlushed) final;
@@ -62,7 +61,6 @@
     bool periodConfirmed(nsecs_t vsync_timestamp, std::optional<nsecs_t> hwcVsyncPeriod)
             REQUIRES(mMutex);
 
-    const std::string mName;
     std::unique_ptr<Clock> const mClock;
     VSyncTracker& mTracker;
     size_t const mPendingLimit;
diff --git a/services/surfaceflinger/Scheduler/VsyncController.h b/services/surfaceflinger/Scheduler/VsyncController.h
index 9177899..726a420 100644
--- a/services/surfaceflinger/Scheduler/VsyncController.h
+++ b/services/surfaceflinger/Scheduler/VsyncController.h
@@ -63,9 +63,8 @@
      * itself. The controller will end the period transition internally.
      *
      * \param [in] period   The period that the system is changing into.
-     * \param [in] force    True to recalibrate even if period matches the existing period.
      */
-    virtual void startPeriodTransition(nsecs_t period, bool force) = 0;
+    virtual void startPeriodTransition(nsecs_t period) = 0;
 
     /*
      * Tells the tracker to stop using present fences to get a vsync signal.
diff --git a/services/surfaceflinger/Scheduler/VsyncSchedule.cpp b/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
index 951c1ec..5245556 100644
--- a/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
+++ b/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
@@ -16,13 +16,13 @@
 
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 
+#include <ftl/fake_guard.h>
 #include <scheduler/Fps.h>
 #include <scheduler/Timer.h>
 
 #include "VsyncSchedule.h"
 
 #include "ISchedulerCallback.h"
-#include "Scheduler.h"
 #include "Utils/Dumper.h"
 #include "VSyncDispatchTimerQueue.h"
 #include "VSyncPredictor.h"
@@ -42,8 +42,8 @@
     }
 
 public:
-    explicit PredictedVsyncTracer(std::shared_ptr<VsyncDispatch> dispatch)
-          : mRegistration(std::move(dispatch), makeVsyncCallback(), __func__) {
+    explicit PredictedVsyncTracer(VsyncDispatch& dispatch)
+          : mRegistration(dispatch, makeVsyncCallback(), __func__) {
         schedule();
     }
 
@@ -54,20 +54,16 @@
     VSyncCallbackRegistration mRegistration;
 };
 
-VsyncSchedule::VsyncSchedule(PhysicalDisplayId id, FeatureFlags features)
-      : mId(id),
-        mTracker(createTracker(id)),
-        mDispatch(createDispatch(mTracker)),
-        mController(createController(id, *mTracker, features)) {
-    if (features.test(Feature::kTracePredictedVsync)) {
-        mTracer = std::make_unique<PredictedVsyncTracer>(mDispatch);
-    }
-}
+VsyncSchedule::VsyncSchedule(FeatureFlags features)
+      : mTracker(createTracker()),
+        mDispatch(createDispatch(*mTracker)),
+        mController(createController(*mTracker, features)),
+        mTracer(features.test(Feature::kTracePredictedVsync)
+                        ? std::make_unique<PredictedVsyncTracer>(*mDispatch)
+                        : nullptr) {}
 
-VsyncSchedule::VsyncSchedule(PhysicalDisplayId id, TrackerPtr tracker, DispatchPtr dispatch,
-                             ControllerPtr controller)
-      : mId(id),
-        mTracker(std::move(tracker)),
+VsyncSchedule::VsyncSchedule(TrackerPtr tracker, DispatchPtr dispatch, ControllerPtr controller)
+      : mTracker(std::move(tracker)),
         mDispatch(std::move(dispatch)),
         mController(std::move(controller)) {}
 
@@ -86,7 +82,10 @@
     {
         std::lock_guard<std::mutex> lock(mHwVsyncLock);
         dumper.dump("hwVsyncState", ftl::enum_string(mHwVsyncState));
-        dumper.dump("lastHwVsyncState", ftl::enum_string(mLastHwVsyncState));
+
+        ftl::FakeGuard guard(kMainThreadContext);
+        dumper.dump("pendingHwVsyncState", ftl::enum_string(mPendingHwVsyncState));
+        dumper.eol();
     }
 
     out.append("VsyncController:\n");
@@ -96,72 +95,103 @@
     mDispatch->dump(out);
 }
 
-VsyncSchedule::TrackerPtr VsyncSchedule::createTracker(PhysicalDisplayId id) {
+VsyncSchedule::TrackerPtr VsyncSchedule::createTracker() {
     // TODO(b/144707443): Tune constants.
     constexpr nsecs_t kInitialPeriod = (60_Hz).getPeriodNsecs();
     constexpr size_t kHistorySize = 20;
     constexpr size_t kMinSamplesForPrediction = 6;
     constexpr uint32_t kDiscardOutlierPercent = 20;
 
-    return std::make_unique<VSyncPredictor>(to_string(id), kInitialPeriod, kHistorySize,
-                                            kMinSamplesForPrediction, kDiscardOutlierPercent);
+    return std::make_unique<VSyncPredictor>(kInitialPeriod, kHistorySize, kMinSamplesForPrediction,
+                                            kDiscardOutlierPercent);
 }
 
-VsyncSchedule::DispatchPtr VsyncSchedule::createDispatch(TrackerPtr tracker) {
+VsyncSchedule::DispatchPtr VsyncSchedule::createDispatch(VsyncTracker& tracker) {
     using namespace std::chrono_literals;
 
     // TODO(b/144707443): Tune constants.
     constexpr std::chrono::nanoseconds kGroupDispatchWithin = 500us;
     constexpr std::chrono::nanoseconds kSnapToSameVsyncWithin = 3ms;
 
-    return std::make_unique<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), std::move(tracker),
+    return std::make_unique<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
                                                      kGroupDispatchWithin.count(),
                                                      kSnapToSameVsyncWithin.count());
 }
 
-VsyncSchedule::ControllerPtr VsyncSchedule::createController(PhysicalDisplayId id,
-                                                             VsyncTracker& tracker,
+VsyncSchedule::ControllerPtr VsyncSchedule::createController(VsyncTracker& tracker,
                                                              FeatureFlags features) {
     // TODO(b/144707443): Tune constants.
     constexpr size_t kMaxPendingFences = 20;
     const bool hasKernelIdleTimer = features.test(Feature::kKernelIdleTimer);
 
-    auto reactor = std::make_unique<VSyncReactor>(to_string(id), std::make_unique<SystemClock>(),
-                                                  tracker, kMaxPendingFences, hasKernelIdleTimer);
+    auto reactor = std::make_unique<VSyncReactor>(std::make_unique<SystemClock>(), tracker,
+                                                  kMaxPendingFences, hasKernelIdleTimer);
 
     reactor->setIgnorePresentFences(!features.test(Feature::kPresentFences));
     return reactor;
 }
 
+void VsyncSchedule::startPeriodTransition(ISchedulerCallback& callback, Period period) {
+    std::lock_guard<std::mutex> lock(mHwVsyncLock);
+    mController->startPeriodTransition(period.ns());
+    enableHardwareVsyncLocked(callback);
+}
+
+bool VsyncSchedule::addResyncSample(ISchedulerCallback& callback, TimePoint timestamp,
+                                    ftl::Optional<Period> hwcVsyncPeriod) {
+    bool needsHwVsync = false;
+    bool periodFlushed = false;
+    {
+        std::lock_guard<std::mutex> lock(mHwVsyncLock);
+        if (mHwVsyncState == HwVsyncState::Enabled) {
+            needsHwVsync = mController->addHwVsyncTimestamp(timestamp.ns(),
+                                                            hwcVsyncPeriod.transform(&Period::ns),
+                                                            &periodFlushed);
+        }
+    }
+    if (needsHwVsync) {
+        enableHardwareVsync(callback);
+    } else {
+        disableHardwareVsync(callback, false /* disallow */);
+    }
+    return periodFlushed;
+}
+
 void VsyncSchedule::enableHardwareVsync(ISchedulerCallback& callback) {
     std::lock_guard<std::mutex> lock(mHwVsyncLock);
+    enableHardwareVsyncLocked(callback);
+}
+
+void VsyncSchedule::enableHardwareVsyncLocked(ISchedulerCallback& callback) {
     if (mHwVsyncState == HwVsyncState::Disabled) {
         getTracker().resetModel();
-        callback.setVsyncEnabled(mId, true);
+        callback.setVsyncEnabled(true);
         mHwVsyncState = HwVsyncState::Enabled;
-        mLastHwVsyncState = HwVsyncState::Enabled;
     }
 }
 
 void VsyncSchedule::disableHardwareVsync(ISchedulerCallback& callback, bool disallow) {
     std::lock_guard<std::mutex> lock(mHwVsyncLock);
     if (mHwVsyncState == HwVsyncState::Enabled) {
-        callback.setVsyncEnabled(mId, false);
-        mLastHwVsyncState = HwVsyncState::Disabled;
+        callback.setVsyncEnabled(false);
     }
     mHwVsyncState = disallow ? HwVsyncState::Disallowed : HwVsyncState::Disabled;
 }
 
-bool VsyncSchedule::isHardwareVsyncAllowed() const {
+bool VsyncSchedule::isHardwareVsyncAllowed(bool makeAllowed) {
     std::lock_guard<std::mutex> lock(mHwVsyncLock);
+    if (makeAllowed && mHwVsyncState == HwVsyncState::Disallowed) {
+        mHwVsyncState = HwVsyncState::Disabled;
+    }
     return mHwVsyncState != HwVsyncState::Disallowed;
 }
 
-void VsyncSchedule::allowHardwareVsync() {
-    std::lock_guard<std::mutex> lock(mHwVsyncLock);
-    if (mHwVsyncState == HwVsyncState::Disallowed) {
-        mHwVsyncState = HwVsyncState::Disabled;
-    }
+void VsyncSchedule::setPendingHardwareVsyncState(bool enabled) {
+    mPendingHwVsyncState = enabled ? HwVsyncState::Enabled : HwVsyncState::Disabled;
+}
+
+bool VsyncSchedule::getPendingHardwareVsyncState() const {
+    return mPendingHwVsyncState == HwVsyncState::Enabled;
 }
 
 } // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VsyncSchedule.h b/services/surfaceflinger/Scheduler/VsyncSchedule.h
index ffb7ad5..d88f1d1 100644
--- a/services/surfaceflinger/Scheduler/VsyncSchedule.h
+++ b/services/surfaceflinger/Scheduler/VsyncSchedule.h
@@ -19,10 +19,11 @@
 #include <memory>
 #include <string>
 
+#include <ThreadContext.h>
 #include <ftl/enum.h>
+#include <ftl/optional.h>
 #include <scheduler/Features.h>
 #include <scheduler/Time.h>
-#include <ui/DisplayId.h>
 
 namespace android {
 class EventThreadTest;
@@ -47,67 +48,79 @@
 // Schedule that synchronizes to hardware VSYNC of a physical display.
 class VsyncSchedule {
 public:
-    VsyncSchedule(PhysicalDisplayId, FeatureFlags);
+    explicit VsyncSchedule(FeatureFlags);
     ~VsyncSchedule();
 
     Period period() const;
     TimePoint vsyncDeadlineAfter(TimePoint) const;
 
+    // Inform the schedule that the period is changing and the schedule needs to recalibrate
+    // itself. The schedule will end the period transition internally. This will
+    // enable hardware VSYNCs in order to calibrate.
+    //
+    // \param [in] period   The period that the system is changing into.
+    void startPeriodTransition(ISchedulerCallback&, Period period);
+
+    // Pass a VSYNC sample to VsyncController. Return true if
+    // VsyncController detected that the VSYNC period changed. Enable or disable
+    // hardware VSYNCs depending on whether more samples are needed.
+    bool addResyncSample(ISchedulerCallback&, TimePoint timestamp,
+                         ftl::Optional<Period> hwcVsyncPeriod);
+
     // TODO(b/185535769): Hide behind API.
     const VsyncTracker& getTracker() const { return *mTracker; }
     VsyncTracker& getTracker() { return *mTracker; }
     VsyncController& getController() { return *mController; }
 
-    // TODO(b/185535769): Once these are hidden behind the API, they may no
-    // longer need to be shared_ptrs.
-    using DispatchPtr = std::shared_ptr<VsyncDispatch>;
-    using TrackerPtr = std::shared_ptr<VsyncTracker>;
-
     // TODO(b/185535769): Remove once VsyncSchedule owns all registrations.
-    DispatchPtr getDispatch() { return mDispatch; }
+    VsyncDispatch& getDispatch() { return *mDispatch; }
 
     void dump(std::string&) const;
 
-    // Turn on hardware vsyncs, unless mHwVsyncState is Disallowed, in which
+    // Turn on hardware VSYNCs, unless mHwVsyncState is Disallowed, in which
     // case this call is ignored.
     void enableHardwareVsync(ISchedulerCallback&) EXCLUDES(mHwVsyncLock);
 
-    // Disable hardware vsyncs. If `disallow` is true, future calls to
+    // Disable hardware VSYNCs. If `disallow` is true, future calls to
     // enableHardwareVsync are ineffective until allowHardwareVsync is called.
     void disableHardwareVsync(ISchedulerCallback&, bool disallow) EXCLUDES(mHwVsyncLock);
 
-    // Restore the ability to enable hardware vsync.
-    void allowHardwareVsync() EXCLUDES(mHwVsyncLock);
-
-    // If true, enableHardwareVsync can enable hardware vsync (if not already
+    // If true, enableHardwareVsync can enable hardware VSYNC (if not already
     // enabled). If false, enableHardwareVsync does nothing.
-    bool isHardwareVsyncAllowed() const EXCLUDES(mHwVsyncLock);
+    bool isHardwareVsyncAllowed(bool makeAllowed) EXCLUDES(mHwVsyncLock);
 
-protected:
-    using ControllerPtr = std::unique_ptr<VsyncController>;
+    void setPendingHardwareVsyncState(bool enabled) REQUIRES(kMainThreadContext);
 
-    // For tests.
-    VsyncSchedule(PhysicalDisplayId, TrackerPtr, DispatchPtr, ControllerPtr);
+    bool getPendingHardwareVsyncState() const REQUIRES(kMainThreadContext);
 
 private:
     friend class TestableScheduler;
     friend class android::EventThreadTest;
     friend class android::fuzz::SchedulerFuzzer;
 
-    static TrackerPtr createTracker(PhysicalDisplayId);
-    static DispatchPtr createDispatch(TrackerPtr);
-    static ControllerPtr createController(PhysicalDisplayId, VsyncTracker&, FeatureFlags);
+    using TrackerPtr = std::unique_ptr<VsyncTracker>;
+    using DispatchPtr = std::unique_ptr<VsyncDispatch>;
+    using ControllerPtr = std::unique_ptr<VsyncController>;
+
+    // For tests.
+    VsyncSchedule(TrackerPtr, DispatchPtr, ControllerPtr);
+
+    static TrackerPtr createTracker();
+    static DispatchPtr createDispatch(VsyncTracker&);
+    static ControllerPtr createController(VsyncTracker&, FeatureFlags);
+
+    void enableHardwareVsyncLocked(ISchedulerCallback&) REQUIRES(mHwVsyncLock);
 
     mutable std::mutex mHwVsyncLock;
     enum class HwVsyncState {
-        // Hardware vsyncs are currently enabled.
+        // Hardware VSYNCs are currently enabled.
         Enabled,
 
-        // Hardware vsyncs are currently disabled. They can be enabled by a call
+        // Hardware VSYNCs are currently disabled. They can be enabled by a call
         // to `enableHardwareVsync`.
         Disabled,
 
-        // Hardware vsyncs are not currently allowed (e.g. because the display
+        // Hardware VSYNCs are not currently allowed (e.g. because the display
         // is off).
         Disallowed,
 
@@ -115,19 +128,17 @@
     };
     HwVsyncState mHwVsyncState GUARDED_BY(mHwVsyncLock) = HwVsyncState::Disallowed;
 
-    // The last state, which may be the current state, or the state prior to setting to Disallowed.
-    HwVsyncState mLastHwVsyncState GUARDED_BY(mHwVsyncLock) = HwVsyncState::Disabled;
+    // Pending state, in case an attempt is made to set the state while the
+    // device is off.
+    HwVsyncState mPendingHwVsyncState GUARDED_BY(kMainThreadContext) = HwVsyncState::Disabled;
 
     class PredictedVsyncTracer;
     using TracerPtr = std::unique_ptr<PredictedVsyncTracer>;
 
-    const PhysicalDisplayId mId;
-
-    // Effectively const except in move constructor.
-    TrackerPtr mTracker;
-    DispatchPtr mDispatch;
-    ControllerPtr mController;
-    TracerPtr mTracer;
+    const TrackerPtr mTracker;
+    const DispatchPtr mDispatch;
+    const ControllerPtr mController;
+    const TracerPtr mTracer;
 };
 
 } // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/include/scheduler/Time.h b/services/surfaceflinger/Scheduler/include/scheduler/Time.h
index bd4e3c2..ba1459a 100644
--- a/services/surfaceflinger/Scheduler/include/scheduler/Time.h
+++ b/services/surfaceflinger/Scheduler/include/scheduler/Time.h
@@ -26,7 +26,7 @@
 namespace scheduler {
 
 // TODO(b/185535769): Pull Clock.h to libscheduler to reuse this.
-using SchedulerClock = std::chrono::high_resolution_clock;
+using SchedulerClock = std::chrono::steady_clock;
 static_assert(SchedulerClock::is_steady);
 
 } // namespace scheduler
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index a0c3eb0..b42576f 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1130,33 +1130,21 @@
     return NO_ERROR;
 }
 
-status_t SurfaceFlinger::getDisplayStats(const sp<IBinder>& displayToken,
-                                         DisplayStatInfo* outStats) {
+status_t SurfaceFlinger::getDisplayStats(const sp<IBinder>&, DisplayStatInfo* outStats) {
     if (!outStats) {
         return BAD_VALUE;
     }
 
-    std::optional<PhysicalDisplayId> displayIdOpt;
-    {
-        Mutex::Autolock lock(mStateLock);
-        displayIdOpt = getPhysicalDisplayIdLocked(displayToken);
-    }
-
-    if (!displayIdOpt) {
-        ALOGE("%s: Invalid physical display token %p", __func__, displayToken.get());
-        return NAME_NOT_FOUND;
-    }
-    const auto schedule = mScheduler->getVsyncSchedule(displayIdOpt);
-    outStats->vsyncTime = schedule->vsyncDeadlineAfter(TimePoint::now()).ns();
-    outStats->vsyncPeriod = schedule->period().ns();
+    const auto& schedule = mScheduler->getVsyncSchedule();
+    outStats->vsyncTime = schedule.vsyncDeadlineAfter(TimePoint::now()).ns();
+    outStats->vsyncPeriod = schedule.period().ns();
     return NO_ERROR;
 }
 
 void SurfaceFlinger::setDesiredActiveMode(display::DisplayModeRequest&& request, bool force) {
     ATRACE_CALL();
 
-    const auto displayId = request.mode.modePtr->getPhysicalDisplayId();
-    const auto display = getDisplayDeviceLocked(displayId);
+    auto display = getDisplayDeviceLocked(request.mode.modePtr->getPhysicalDisplayId());
     if (!display) {
         ALOGW("%s: display is no longer valid", __func__);
         return;
@@ -1169,25 +1157,23 @@
                                           force)) {
         case DisplayDevice::DesiredActiveModeAction::InitiateDisplayModeSwitch:
             // Set the render rate as setDesiredActiveMode updated it.
-            mScheduler->setRenderRate(displayId,
-                                      display->refreshRateSelector().getActiveMode().fps);
+            mScheduler->setRenderRate(display->refreshRateSelector().getActiveMode().fps);
 
             // Schedule a new frame to initiate the display mode switch.
             scheduleComposite(FrameHint::kNone);
 
             // Start receiving vsync samples now, so that we can detect a period
             // switch.
-            mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */,
-                                              mode.modePtr->getFps());
-
+            mScheduler->resyncToHardwareVsync(true, mode.modePtr->getFps());
             // As we called to set period, we will call to onRefreshRateChangeCompleted once
             // VsyncController model is locked.
-            mScheduler->modulateVsync(displayId, &VsyncModulator::onRefreshRateChangeInitiated);
+            mScheduler->modulateVsync(&VsyncModulator::onRefreshRateChangeInitiated);
+
             updatePhaseConfiguration(mode.fps);
             mScheduler->setModeChangePending(true);
             break;
         case DisplayDevice::DesiredActiveModeAction::InitiateRenderRateSwitch:
-            mScheduler->setRenderRate(displayId, mode.fps);
+            mScheduler->setRenderRate(mode.fps);
             updatePhaseConfiguration(mode.fps);
             mRefreshRateStats->setRefreshRate(mode.fps);
             if (display->getPhysicalId() == mActiveDisplayId && emitEvent) {
@@ -1303,14 +1289,11 @@
 }
 
 void SurfaceFlinger::desiredActiveModeChangeDone(const sp<DisplayDevice>& display) {
-    const auto desiredActiveMode = display->getDesiredActiveMode();
-    const auto& modeOpt = desiredActiveMode->modeOpt;
-    const auto displayId = modeOpt->modePtr->getPhysicalDisplayId();
-    const auto displayFps = modeOpt->modePtr->getFps();
-    const auto renderFps = modeOpt->fps;
+    const auto displayFps = display->getDesiredActiveMode()->modeOpt->modePtr->getFps();
+    const auto renderFps = display->getDesiredActiveMode()->modeOpt->fps;
     clearDesiredActiveModeState(display);
-    mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, displayFps);
-    mScheduler->setRenderRate(displayId, renderFps);
+    mScheduler->resyncToHardwareVsync(true, displayFps);
+    mScheduler->setRenderRate(renderFps);
     updatePhaseConfiguration(renderFps);
 }
 
@@ -2047,11 +2030,16 @@
     ATRACE_FORMAT("onComposerHalVsync%s", tracePeriod.c_str());
 
     Mutex::Autolock lock(mStateLock);
-    if (const auto displayIdOpt = getHwComposer().onVsync(hwcDisplayId, timestamp)) {
-        if (mScheduler->addResyncSample(*displayIdOpt, timestamp, vsyncPeriod)) {
-            // period flushed
-            mScheduler->modulateVsync(displayIdOpt, &VsyncModulator::onRefreshRateChangeCompleted);
-        }
+
+    if (const auto displayIdOpt = getHwComposer().onVsync(hwcDisplayId, timestamp);
+        displayIdOpt != mActiveDisplayId) {
+        // Ignore VSYNC for invalid/inactive displays.
+        return;
+    }
+
+    const bool periodFlushed = mScheduler->addResyncSample(timestamp, vsyncPeriod);
+    if (periodFlushed) {
+        mScheduler->modulateVsync(&VsyncModulator::onRefreshRateChangeCompleted);
     }
 }
 
@@ -2092,15 +2080,23 @@
     mScheduler->forceNextResync();
 }
 
-void SurfaceFlinger::setVsyncEnabled(PhysicalDisplayId id, bool enabled) {
-    const char* const whence = __func__;
-    ATRACE_FORMAT("%s (%d) for %" PRIu64, whence, enabled, id.value);
+void SurfaceFlinger::onRefreshRateChangedDebug(const RefreshRateChangedDebugData&) {
+    // TODO(b/202734676) update refresh rate value on the RefreshRateOverlay
+}
+
+void SurfaceFlinger::setVsyncEnabled(bool enabled) {
+    ATRACE_CALL();
 
     // On main thread to avoid race conditions with display power state.
     static_cast<void>(mScheduler->schedule([=]() FTL_FAKE_GUARD(mStateLock) {
-        ATRACE_FORMAT("%s (%d) for %" PRIu64 " (main thread)", whence, enabled, id.value);
-        if (const auto display = getDisplayDeviceLocked(id); display && display->isPoweredOn()) {
-            setHWCVsyncEnabled(id, enabled);
+        {
+            ftl::FakeGuard guard(kMainThreadContext);
+            mScheduler->getVsyncSchedule().setPendingHardwareVsyncState(enabled);
+        }
+
+        if (const auto display = getDefaultDisplayDeviceLocked();
+            display && display->isPoweredOn()) {
+            setHWCVsyncEnabled(display->getPhysicalId(), enabled);
         }
     }));
 }
@@ -2127,13 +2123,13 @@
 TimePoint SurfaceFlinger::calculateExpectedPresentTime(TimePoint frameTime) const {
     const auto& schedule = mScheduler->getVsyncSchedule();
 
-    const TimePoint vsyncDeadline = schedule->vsyncDeadlineAfter(frameTime);
+    const TimePoint vsyncDeadline = schedule.vsyncDeadlineAfter(frameTime);
     if (mScheduler->vsyncModulator().getVsyncConfig().sfOffset > 0) {
         return vsyncDeadline;
     }
 
     // Inflate the expected present time if we're targeting the next vsync.
-    return vsyncDeadline + schedule->period();
+    return vsyncDeadline + schedule.period();
 }
 
 void SurfaceFlinger::configure() FTL_FAKE_GUARD(kMainThreadContext) {
@@ -2264,7 +2260,7 @@
                   ticks<std::milli, float>(mExpectedPresentTime - TimePoint::now()),
                   mExpectedPresentTime == expectedVsyncTime ? "" : " (adjusted)");
 
-    const Period vsyncPeriod = mScheduler->getVsyncSchedule()->period();
+    const Period vsyncPeriod = mScheduler->getVsyncSchedule().period();
     const FenceTimePtr& previousPresentFence = getPreviousPresentFence(frameTime, vsyncPeriod);
 
     // When backpressure propagation is enabled, we want to give a small grace period of 1ms
@@ -2514,7 +2510,7 @@
         refreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::milliseconds(mDebugFlashDelay);
     }
 
-    const auto prevVsyncTime = mExpectedPresentTime - mScheduler->getVsyncSchedule()->period();
+    const auto prevVsyncTime = mExpectedPresentTime - mScheduler->getVsyncSchedule().period();
     const auto hwcMinWorkDuration = mVsyncConfiguration->getCurrentConfigs().hwcMinWorkDuration;
 
     refreshArgs.earliestPresentTime = prevVsyncTime - hwcMinWorkDuration;
@@ -2596,7 +2592,7 @@
     // TODO(b/160583065): Enable skip validation when SF caches all client composition layers.
     const bool hasGpuUseOrReuse =
             mCompositionCoverage.any(CompositionCoverage::Gpu | CompositionCoverage::GpuReuse);
-    mScheduler->modulateVsync({}, &VsyncModulator::onDisplayRefresh, hasGpuUseOrReuse);
+    mScheduler->modulateVsync(&VsyncModulator::onDisplayRefresh, hasGpuUseOrReuse);
 
     mLayersWithQueuedFrames.clear();
     if (mLayerTracingEnabled && mLayerTracing.flagIsSet(LayerTracing::TRACE_COMPOSITION)) {
@@ -2740,9 +2736,9 @@
             ? mPresentLatencyTracker.trackPendingFrame(compositeTime, presentFenceTime)
             : Duration::zero();
 
-    const auto schedule = mScheduler->getVsyncSchedule();
-    const TimePoint vsyncDeadline = schedule->vsyncDeadlineAfter(presentTime);
-    const Period vsyncPeriod = schedule->period();
+    const auto& schedule = mScheduler->getVsyncSchedule();
+    const TimePoint vsyncDeadline = schedule.vsyncDeadlineAfter(presentTime);
+    const Period vsyncPeriod = schedule.period();
     const nsecs_t vsyncPhase = mVsyncConfiguration->getCurrentConfigs().late.sfOffset;
 
     const CompositorTiming compositorTiming(vsyncDeadline.ns(), vsyncPeriod.ns(), vsyncPhase,
@@ -2817,19 +2813,15 @@
     mTimeStats->incrementTotalFrames();
     mTimeStats->setPresentFenceGlobal(presentFenceTime);
 
-    {
-        ftl::FakeGuard guard(mStateLock);
-        for (const auto& [id, physicalDisplay] : mPhysicalDisplays) {
-            if (auto displayDevice = getDisplayDeviceLocked(id);
-                displayDevice && displayDevice->isPoweredOn() && physicalDisplay.isInternal()) {
-                auto presentFenceTimeI = defaultDisplay && defaultDisplay->getPhysicalId() == id
-                        ? std::move(presentFenceTime)
-                        : std::make_shared<FenceTime>(getHwComposer().getPresentFence(id));
-                if (presentFenceTimeI->isValid()) {
-                    mScheduler->addPresentFence(id, std::move(presentFenceTimeI));
-                }
-            }
-        }
+    const bool isInternalDisplay = defaultDisplay &&
+            FTL_FAKE_GUARD(mStateLock, mPhysicalDisplays)
+                    .get(defaultDisplay->getPhysicalId())
+                    .transform(&PhysicalDisplay::isInternal)
+                    .value_or(false);
+
+    if (isInternalDisplay && defaultDisplay && defaultDisplay->getPowerMode() == hal::PowerMode::ON &&
+        presentFenceTime->isValid()) {
+        mScheduler->addPresentFence(std::move(presentFenceTime));
     }
 
     const bool isDisplayConnected =
@@ -2837,7 +2829,7 @@
 
     if (!hasSyncFramework) {
         if (isDisplayConnected && defaultDisplay->isPoweredOn()) {
-            mScheduler->enableHardwareVsync(defaultDisplay->getPhysicalId());
+            mScheduler->enableHardwareVsync();
         }
     }
 
@@ -2948,7 +2940,7 @@
     // so we can call commitTransactionsLocked unconditionally.
     // We clear the flags with mStateLock held to guarantee that
     // mCurrentState won't change until the transaction is committed.
-    mScheduler->modulateVsync({}, &VsyncModulator::onTransactionCommit);
+    mScheduler->modulateVsync(&VsyncModulator::onTransactionCommit);
     commitTransactionsLocked(clearTransactionFlags(eTransactionMask));
 
     mDebugInTransaction = 0;
@@ -3787,9 +3779,10 @@
     mScheduler = std::make_unique<Scheduler>(static_cast<ICompositor&>(*this),
                                              static_cast<ISchedulerCallback&>(*this), features,
                                              std::move(modulatorPtr));
+    mScheduler->createVsyncSchedule(features);
     mScheduler->registerDisplay(display->getPhysicalId(), display->holdRefreshRateSelector());
 
-    setVsyncEnabled(display->getPhysicalId(), false);
+    setVsyncEnabled(false);
     mScheduler->startTimers();
 
     const auto configs = mVsyncConfiguration->getCurrentConfigs();
@@ -3805,7 +3798,7 @@
                                           /* workDuration */ activeRefreshRate.getPeriod(),
                                           /* readyDuration */ configs.late.sfWorkDuration);
 
-    mScheduler->initVsync(mScheduler->getVsyncSchedule()->getDispatch(),
+    mScheduler->initVsync(mScheduler->getVsyncSchedule().getDispatch(),
                           *mFrameTimeline->getTokenManager(), configs.late.sfWorkDuration);
 
     mRegionSamplingThread =
@@ -3870,7 +3863,7 @@
     for (Layer* offscreenLayer : mOffscreenLayers) {
         offscreenLayer->traverse(LayerVector::StateSet::Drawing, [](Layer* layer) {
             if (layer->clearTransactionFlags(eTransactionNeeded)) {
-                layer->doTransaction(0);
+                layer->doTransaction(0, 0);
                 layer->commitChildList();
             }
         });
@@ -3906,7 +3899,7 @@
     // second frame. But layer 0's second frame could be waiting on display.
     mDrawingState.traverse([&](Layer* layer) {
         if (layer->clearTransactionFlags(eTransactionNeeded) || mForceTransactionDisplayChange) {
-            const uint32_t flags = layer->doTransaction(0);
+            const uint32_t flags = layer->doTransaction(0, latchTime);
             if (flags & Layer::eVisibleRegion) {
                 mVisibleRegionsDirty = true;
             }
@@ -4019,7 +4012,7 @@
 
 void SurfaceFlinger::setTransactionFlags(uint32_t mask, TransactionSchedule schedule,
                                          const sp<IBinder>& applyToken, FrameHint frameHint) {
-    mScheduler->modulateVsync({}, &VsyncModulator::setTransactionSchedule, schedule, applyToken);
+    mScheduler->modulateVsync(&VsyncModulator::setTransactionSchedule, schedule, applyToken);
     uint32_t transactionFlags = mTransactionFlags.fetch_or(mask);
     ATRACE_INT("mTransactionFlags", transactionFlags);
 
@@ -4048,7 +4041,7 @@
         return TransactionReadiness::NotReady;
     }
 
-    if (!mScheduler->isVsyncTargetForUid(mExpectedPresentTime, transaction.originUid)) {
+    if (!mScheduler->isVsyncValid(mExpectedPresentTime, transaction.originUid)) {
         ATRACE_NAME("!isVsyncValid");
         return TransactionReadiness::NotReady;
     }
@@ -4071,16 +4064,30 @@
         sp<Layer> layer = LayerHandle::getLayer(s.surface);
         const auto& transaction = *flushState.transaction;
         // check for barrier frames
-        if (s.bufferData->hasBarrier &&
-            ((layer->getDrawingState().frameNumber) < s.bufferData->barrierFrameNumber)) {
-            const bool willApplyBarrierFrame =
-                    flushState.bufferLayersReadyToPresent.contains(s.surface.get()) &&
-                    (flushState.bufferLayersReadyToPresent.get(s.surface.get()) >=
-                     s.bufferData->barrierFrameNumber);
-            if (!willApplyBarrierFrame) {
-                ATRACE_NAME("NotReadyBarrier");
-                ready = TransactionReadiness::NotReadyBarrier;
-                return false;
+        if (s.bufferData->hasBarrier) {
+            // The current producerId is already a newer producer than the buffer that has a
+            // barrier. This means the incoming buffer is older and we can release it here. We
+            // don't wait on the barrier since we know that's stale information.
+            if (layer->getDrawingState().producerId > s.bufferData->producerId) {
+                layer->callReleaseBufferCallback(s.bufferData->releaseBufferListener,
+                                                 s.bufferData->buffer, s.bufferData->frameNumber,
+                                                 s.bufferData->acquireFence);
+                // Delete the entire state at this point and not just release the buffer because
+                // everything associated with the Layer in this Transaction is now out of date.
+                ATRACE_NAME("DeleteStaleBuffer");
+                return TraverseBuffersReturnValues::DELETE_AND_CONTINUE_TRAVERSAL;
+            }
+
+            if (layer->getDrawingState().frameNumber < s.bufferData->barrierFrameNumber) {
+                const bool willApplyBarrierFrame =
+                        flushState.bufferLayersReadyToPresent.contains(s.surface.get()) &&
+                        ((flushState.bufferLayersReadyToPresent.get(s.surface.get()) >=
+                          s.bufferData->barrierFrameNumber));
+                if (!willApplyBarrierFrame) {
+                    ATRACE_NAME("NotReadyBarrier");
+                    ready = TransactionReadiness::NotReadyBarrier;
+                    return TraverseBuffersReturnValues::STOP_TRAVERSAL;
+                }
             }
         }
 
@@ -4091,7 +4098,7 @@
         if (layer->backpressureEnabled() && hasPendingBuffer && transaction.isAutoTimestamp) {
             ATRACE_NAME("hasPendingBuffer");
             ready = TransactionReadiness::NotReady;
-            return false;
+            return TraverseBuffersReturnValues::STOP_TRAVERSAL;
         }
 
         // check fence status
@@ -4118,14 +4125,14 @@
                                                        "Buffer processing hung up due to stuck "
                                                        "fence. Indicates GPU hang");
                 }
-                return false;
+                return TraverseBuffersReturnValues::STOP_TRAVERSAL;
             }
 
             ready = enableLatchUnsignaledConfig == LatchUnsignaledConfig::AutoSingleLayer
                     ? TransactionReadiness::ReadyUnsignaledSingle
                     : TransactionReadiness::ReadyUnsignaled;
         }
-        return true;
+        return TraverseBuffersReturnValues::CONTINUE_TRAVERSAL;
     });
     ATRACE_INT("TransactionReadiness", static_cast<int>(ready));
     return ready;
@@ -4192,7 +4199,7 @@
         return false;
     }
 
-    const Duration earlyLatchVsyncThreshold = mScheduler->getVsyncSchedule()->period() / 2;
+    const Duration earlyLatchVsyncThreshold = mScheduler->getVsyncSchedule().period() / 2;
 
     return predictedPresentTime >= expectedPresentTime &&
             predictedPresentTime - expectedPresentTime >= earlyLatchVsyncThreshold;
@@ -4565,8 +4572,10 @@
     }
     if (layer == nullptr) {
         for (auto& [listener, callbackIds] : s.listeners) {
-            mTransactionCallbackInvoker.registerUnpresentedCallbackHandle(
-                    sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+            mTransactionCallbackInvoker.addCallbackHandle(sp<CallbackHandle>::make(listener,
+                                                                                   callbackIds,
+                                                                                   s.surface),
+                                                          std::vector<JankData>());
         }
         return 0;
     }
@@ -4843,6 +4852,10 @@
                                           s.trustedPresentationListener);
     }
 
+    if (what & layer_state_t::eFlushJankData) {
+        // Do nothing. Processing the transaction completed listeners currently cause the flush.
+    }
+
     if (layer->setTransactionCompletedListeners(callbackHandles,
                                                 layer->willPresentCurrentTransaction())) {
         flags |= eTraversalNeeded;
@@ -4900,8 +4913,10 @@
     }
     if (layer == nullptr) {
         for (auto& [listener, callbackIds] : s.listeners) {
-            mTransactionCallbackInvoker.registerUnpresentedCallbackHandle(
-                    sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+            mTransactionCallbackInvoker.addCallbackHandle(sp<CallbackHandle>::make(listener,
+                                                                                   callbackIds,
+                                                                                   s.surface),
+                                                          std::vector<JankData>());
         }
         return 0;
     }
@@ -5233,11 +5248,11 @@
             ALOGW("Couldn't set SCHED_FIFO on display on: %s\n", strerror(errno));
         }
         getHwComposer().setPowerMode(displayId, mode);
-        if (mode != hal::PowerMode::DOZE_SUSPEND) {
-            if (isActiveDisplay) {
-                mScheduler->onScreenAcquired(mAppConnectionHandle);
-            }
-            mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, refreshRate);
+        if (isActiveDisplay && mode != hal::PowerMode::DOZE_SUSPEND) {
+            setHWCVsyncEnabled(displayId,
+                               mScheduler->getVsyncSchedule().getPendingHardwareVsyncState());
+            mScheduler->onScreenAcquired(mAppConnectionHandle);
+            mScheduler->resyncToHardwareVsync(true, refreshRate);
         }
 
         mVisibleRegionsDirty = true;
@@ -5250,34 +5265,33 @@
         if (SurfaceFlinger::setSchedAttr(false) != NO_ERROR) {
             ALOGW("Couldn't set uclamp.min on display off: %s\n", strerror(errno));
         }
-        if (*currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
-            mScheduler->disableHardwareVsync(displayId, true);
-            if (isActiveDisplay) {
-                mScheduler->onScreenReleased(mAppConnectionHandle);
-            }
+        if (isActiveDisplay && *currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
+            mScheduler->disableHardwareVsync(true);
+            mScheduler->onScreenReleased(mAppConnectionHandle);
         }
 
+        // Make sure HWVsync is disabled before turning off the display
+        setHWCVsyncEnabled(displayId, false);
+
         getHwComposer().setPowerMode(displayId, mode);
         mVisibleRegionsDirty = true;
         // from this point on, SF will stop drawing on this display
     } else if (mode == hal::PowerMode::DOZE || mode == hal::PowerMode::ON) {
         // Update display while dozing
         getHwComposer().setPowerMode(displayId, mode);
-        if (*currentModeOpt == hal::PowerMode::DOZE_SUSPEND) {
-            if (isActiveDisplay) {
-                mScheduler->onScreenAcquired(mAppConnectionHandle);
-            }
+        if (isActiveDisplay && *currentModeOpt == hal::PowerMode::DOZE_SUSPEND) {
             ALOGI("Force repainting for DOZE_SUSPEND -> DOZE or ON.");
             mVisibleRegionsDirty = true;
             scheduleRepaint();
-            mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, refreshRate);
+            mScheduler->onScreenAcquired(mAppConnectionHandle);
+            mScheduler->resyncToHardwareVsync(true, refreshRate);
         }
     } else if (mode == hal::PowerMode::DOZE_SUSPEND) {
         // Leave display going to doze
         if (isActiveDisplay) {
+            mScheduler->disableHardwareVsync(true);
             mScheduler->onScreenReleased(mAppConnectionHandle);
         }
-        mScheduler->disableHardwareVsync(displayId, true);
         getHwComposer().setPowerMode(displayId, mode);
     } else {
         ALOGE("Attempting to set unknown power mode: %d\n", mode);
@@ -5287,8 +5301,8 @@
     if (isActiveDisplay) {
         mTimeStats->setPowerMode(mode);
         mRefreshRateStats->setPowerMode(mode);
+        mScheduler->setDisplayPowerMode(mode);
     }
-    mScheduler->setDisplayPowerMode(displayId, mode);
 
     ALOGD("Finished setting power mode %d on display %s", mode, to_string(displayId).c_str());
 }
@@ -7740,12 +7754,12 @@
     }
     if (mLegacyFrontEndEnabled && !mLayerLifecycleManagerEnabled) {
         mDrawingState.traverseInZOrder([&refreshArgs, cursorOnly, &layers](Layer* layer) {
-            if (auto layerFE = layer->getCompositionEngineLayerFE()) {
+            if (const auto& layerFE = layer->getCompositionEngineLayerFE()) {
                 if (cursorOnly &&
                     layer->getLayerSnapshot()->compositionType !=
                             aidl::android::hardware::graphics::composer3::Composition::CURSOR)
                     return;
-                layer->updateSnapshot(/* refreshArgs.updatingGeometryThisFrame */ true);
+                layer->updateSnapshot(refreshArgs.updatingGeometryThisFrame);
                 layerFE->mSnapshot = layer->stealLayerSnapshot();
                 refreshArgs.layers.push_back(layerFE);
                 layers.emplace_back(layer, layerFE.get());
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 0bd15dc..aded52a 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -105,6 +105,7 @@
 #include <vector>
 
 #include <aidl/android/hardware/graphics/common/DisplayDecorationSupport.h>
+#include <aidl/android/hardware/graphics/composer3/RefreshRateChangedDebugData.h>
 #include "Client.h"
 
 using namespace android::surfaceflinger;
@@ -128,6 +129,7 @@
 class ScreenCapturer;
 class WindowInfosListenerInvoker;
 
+using ::aidl::android::hardware::graphics::composer3::RefreshRateChangedDebugData;
 using frontend::TransactionHandler;
 using gui::CaptureArgs;
 using gui::DisplayCaptureArgs;
@@ -631,6 +633,7 @@
                                                const hal::VsyncPeriodChangeTimeline&) override;
     void onComposerHalSeamlessPossible(hal::HWDisplayId) override;
     void onComposerHalVsyncIdle(hal::HWDisplayId) override;
+    void onRefreshRateChangedDebug(const RefreshRateChangedDebugData&) override;
 
     // ICompositor overrides:
     void configure() override;
@@ -642,7 +645,7 @@
 
     // Toggles hardware VSYNC by calling into HWC.
     // TODO(b/241286146): Rename for self-explanatory API.
-    void setVsyncEnabled(PhysicalDisplayId, bool) override;
+    void setVsyncEnabled(bool) override;
     void requestDisplayModes(std::vector<display::DisplayModeRequest>) override;
     void kernelTimerChanged(bool expired) override;
     void triggerOnFrameRateOverridesChanged() override;
@@ -1130,15 +1133,7 @@
     pid_t mPid;
     std::future<void> mRenderEnginePrimeCacheFuture;
 
-    // mStateLock has conventions related to the current thread, because only
-    // the main thread should modify variables protected by mStateLock.
-    // - read access from a non-main thread must lock mStateLock, since the main
-    // thread may modify these variables.
-    // - write access from a non-main thread is not permitted.
-    // - read access from the main thread can use an ftl::FakeGuard, since other
-    // threads must not modify these variables.
-    // - write access from the main thread must lock mStateLock, since another
-    // thread may be reading these variables.
+    // access must be protected by mStateLock
     mutable Mutex mStateLock;
     State mCurrentState{LayerVector::StateSet::Current};
     std::atomic<int32_t> mTransactionFlags = 0;
diff --git a/services/surfaceflinger/TransactionCallbackInvoker.cpp b/services/surfaceflinger/TransactionCallbackInvoker.cpp
index 3da98d4..3587a72 100644
--- a/services/surfaceflinger/TransactionCallbackInvoker.cpp
+++ b/services/surfaceflinger/TransactionCallbackInvoker.cpp
@@ -92,11 +92,6 @@
     return NO_ERROR;
 }
 
-status_t TransactionCallbackInvoker::registerUnpresentedCallbackHandle(
-        const sp<CallbackHandle>& handle) {
-    return addCallbackHandle(handle, std::vector<JankData>());
-}
-
 status_t TransactionCallbackInvoker::findOrCreateTransactionStats(
         const sp<IBinder>& listener, const std::vector<CallbackId>& callbackIds,
         TransactionStats** outTransactionStats) {
diff --git a/services/surfaceflinger/TransactionCallbackInvoker.h b/services/surfaceflinger/TransactionCallbackInvoker.h
index 61ff9bc..3074795 100644
--- a/services/surfaceflinger/TransactionCallbackInvoker.h
+++ b/services/surfaceflinger/TransactionCallbackInvoker.h
@@ -66,9 +66,6 @@
     status_t addOnCommitCallbackHandles(const std::deque<sp<CallbackHandle>>& handles,
                                              std::deque<sp<CallbackHandle>>& outRemainingHandles);
 
-    // Adds the Transaction CallbackHandle from a layer that does not need to be relatched and
-    // presented this frame.
-    status_t registerUnpresentedCallbackHandle(const sp<CallbackHandle>& handle);
     void addEmptyTransaction(const ListenerCallbacks& listenerCallbacks);
 
     void addPresentFence(sp<Fence>);
diff --git a/services/surfaceflinger/TransactionState.h b/services/surfaceflinger/TransactionState.h
index 5025c49..6c5a8b2 100644
--- a/services/surfaceflinger/TransactionState.h
+++ b/services/surfaceflinger/TransactionState.h
@@ -27,6 +27,12 @@
 
 namespace android {
 
+enum TraverseBuffersReturnValues {
+    CONTINUE_TRAVERSAL,
+    STOP_TRAVERSAL,
+    DELETE_AND_CONTINUE_TRAVERSAL,
+};
+
 // Extends the client side composer state by resolving buffer.
 class ResolvedComposerState : public ComposerState {
 public:
@@ -75,12 +81,18 @@
     }
 
     template <typename Visitor>
-    void traverseStatesWithBuffersWhileTrue(Visitor&& visitor) const {
-        for (const auto& state : states) {
-            if (state.state.hasBufferChanges() && state.state.hasValidBuffer() &&
-                state.state.surface) {
-                if (!visitor(state.state)) return;
+    void traverseStatesWithBuffersWhileTrue(Visitor&& visitor) {
+        for (auto state = states.begin(); state != states.end();) {
+            if (state->state.hasBufferChanges() && state->state.hasValidBuffer() &&
+                state->state.surface) {
+                int result = visitor(state->state);
+                if (result == STOP_TRAVERSAL) return;
+                if (result == DELETE_AND_CONTINUE_TRAVERSAL) {
+                    state = states.erase(state);
+                    continue;
+                }
             }
+            state++;
         }
     }
 
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_displayhardware_fuzzer_utils.h b/services/surfaceflinger/fuzzer/surfaceflinger_displayhardware_fuzzer_utils.h
index 6a6e3db..1a951b3 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_displayhardware_fuzzer_utils.h
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_displayhardware_fuzzer_utils.h
@@ -41,6 +41,7 @@
 
 namespace android::hardware::graphics::composer::hal {
 
+using aidl::android::hardware::graphics::composer3::RefreshRateChangedDebugData;
 using ::android::hardware::Return;
 using ::android::hardware::Void;
 using ::android::HWC2::ComposerCallback;
@@ -99,6 +100,7 @@
     void onComposerHalVsyncPeriodTimingChanged(HWDisplayId, const VsyncPeriodChangeTimeline&) {}
     void onComposerHalSeamlessPossible(HWDisplayId) {}
     void onComposerHalVsyncIdle(HWDisplayId) {}
+    void onRefreshRateChangedDebug(const RefreshRateChangedDebugData&) {}
 };
 
 } // namespace android::hardware::graphics::composer::hal
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
index 5303db3..609fd33 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
@@ -226,21 +226,19 @@
     TestableScheduler(const std::shared_ptr<scheduler::RefreshRateSelector>& selectorPtr,
                       sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
           : TestableScheduler(std::make_unique<android::mock::VsyncController>(),
-                              std::make_shared<android::mock::VSyncTracker>(), selectorPtr,
+                              std::make_unique<android::mock::VSyncTracker>(), selectorPtr,
                               std::move(modulatorPtr), callback) {}
 
     TestableScheduler(std::unique_ptr<VsyncController> controller,
-                      VsyncSchedule::TrackerPtr tracker,
+                      std::unique_ptr<VSyncTracker> tracker,
                       std::shared_ptr<RefreshRateSelector> selectorPtr,
                       sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
           : Scheduler(*this, callback, Feature::kContentDetection, std::move(modulatorPtr)) {
+        mVsyncSchedule = std::unique_ptr<VsyncSchedule>(
+                new VsyncSchedule(std::move(tracker), nullptr, std::move(controller)));
+
         const auto displayId = selectorPtr->getActiveMode().modePtr->getPhysicalDisplayId();
         registerDisplay(displayId, std::move(selectorPtr));
-        mVsyncSchedules.emplace_or_replace(displayId,
-                                           std::shared_ptr<VsyncSchedule>(
-                                                   new VsyncSchedule(displayId, std::move(tracker),
-                                                                     nullptr,
-                                                                     std::move(controller))));
     }
 
     ConnectionHandle createConnection(std::unique_ptr<EventThread> eventThread) {
@@ -649,10 +647,10 @@
 
     // The ISchedulerCallback argument can be nullptr for a no-op implementation.
     void setupScheduler(std::unique_ptr<scheduler::VsyncController> vsyncController,
-                        std::shared_ptr<scheduler::VSyncTracker> vsyncTracker,
+                        std::unique_ptr<scheduler::VSyncTracker> vsyncTracker,
                         std::unique_ptr<EventThread> appEventThread,
                         std::unique_ptr<EventThread> sfEventThread,
-                        scheduler::ISchedulerCallback* callback = nullptr,
+                        scheduler::ISchedulerCallback *callback = nullptr,
                         bool hasMultipleModes = false) {
         constexpr DisplayModeId kModeId60{0};
         DisplayModes modes = makeModes(mock::createDisplayMode(kModeId60, 60_Hz));
@@ -791,7 +789,7 @@
     }
 
 private:
-    void setVsyncEnabled(PhysicalDisplayId, bool) override {}
+    void setVsyncEnabled(bool) override {}
     void requestDisplayModes(std::vector<display::DisplayModeRequest>) override {}
     void kernelTimerChanged(bool) override {}
     void triggerOnFrameRateOverridesChanged() override {}
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp b/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
index b7b42ab..61fb29a 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
@@ -47,23 +47,19 @@
                                      PowerMode::DOZE_SUSPEND, PowerMode::ON_SUSPEND};
 
 constexpr uint16_t kRandomStringLength = 256;
+constexpr std::chrono::duration kSyncPeriod(16ms);
+
 template <typename T>
 void dump(T* component, FuzzedDataProvider* fdp) {
     std::string res = fdp->ConsumeRandomLengthString(kRandomStringLength);
     component->dump(res);
 }
 
-class SchedulerFuzzer : public IEventThreadCallback {
+class SchedulerFuzzer {
 public:
     SchedulerFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
     void process();
 
-    // IEventThreadCallback overrides.
-    bool isVsyncTargetForUid(TimePoint /* expectedVsyncTime */, uid_t) const override {
-        return true;
-    }
-    Fps getLeaderRenderFrameRate(uid_t) const override { return 60_Hz; }
-
 private:
     void fuzzRefreshRateSelection();
     void fuzzRefreshRateSelector();
@@ -80,7 +76,7 @@
 
     FuzzedDataProvider mFdp;
 
-    std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
+    std::unique_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
 };
 
 PhysicalDisplayId SchedulerFuzzer::getPhysicalDisplayId() {
@@ -94,12 +90,12 @@
 }
 
 void SchedulerFuzzer::fuzzEventThread() {
-    mVsyncSchedule = std::shared_ptr<scheduler::VsyncSchedule>(
-            new scheduler::VsyncSchedule(getPhysicalDisplayId(),
-                                         std::make_shared<mock::VSyncTracker>(),
-                                         std::make_shared<mock::VSyncDispatch>(), nullptr));
+    mVsyncSchedule = std::unique_ptr<scheduler::VsyncSchedule>(
+            new scheduler::VsyncSchedule(std::make_unique<mock::VSyncTracker>(),
+                                         std::make_unique<mock::VSyncDispatch>(), nullptr));
+    const auto getVsyncPeriod = [](uid_t /* uid */) { return kSyncPeriod.count(); };
     std::unique_ptr<android::impl::EventThread> thread = std::make_unique<
-            android::impl::EventThread>("fuzzer", mVsyncSchedule, *this, nullptr /* TokenManager */,
+            android::impl::EventThread>("fuzzer", *mVsyncSchedule, nullptr, nullptr, getVsyncPeriod,
                                         (std::chrono::nanoseconds)mFdp.ConsumeIntegral<uint64_t>(),
                                         (std::chrono::nanoseconds)mFdp.ConsumeIntegral<uint64_t>());
 
@@ -136,7 +132,7 @@
 }
 
 void SchedulerFuzzer::fuzzVSyncDispatchTimerQueue() {
-    auto stubTracker = std::make_shared<FuzzImplVSyncTracker>(mFdp.ConsumeIntegral<nsecs_t>());
+    FuzzImplVSyncTracker stubTracker{mFdp.ConsumeIntegral<nsecs_t>()};
     scheduler::VSyncDispatchTimerQueue
             mDispatch{std::make_unique<scheduler::ControllableClock>(), stubTracker,
                       mFdp.ConsumeIntegral<nsecs_t>() /*dispatchGroupThreshold*/,
@@ -149,17 +145,17 @@
     scheduler::VSyncDispatchTimerQueueEntry entry(
             "fuzz", [](auto, auto, auto) {},
             mFdp.ConsumeIntegral<nsecs_t>() /*vSyncMoveThreshold*/);
-    entry.update(*stubTracker, 0);
+    entry.update(stubTracker, 0);
     entry.schedule({.workDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .readyDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .earliestVsync = mFdp.ConsumeIntegral<nsecs_t>()},
-                   *stubTracker, 0);
+                   stubTracker, 0);
     entry.disarm();
     entry.ensureNotRunning();
     entry.schedule({.workDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .readyDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .earliestVsync = mFdp.ConsumeIntegral<nsecs_t>()},
-                   *stubTracker, 0);
+                   stubTracker, 0);
     auto const wakeup = entry.wakeupTime();
     auto const ready = entry.readyTime();
     entry.callback(entry.executing(), *wakeup, *ready);
@@ -173,8 +169,8 @@
     uint16_t now = mFdp.ConsumeIntegral<uint16_t>();
     uint16_t historySize = mFdp.ConsumeIntegralInRange<uint16_t>(1, UINT16_MAX);
     uint16_t minimumSamplesForPrediction = mFdp.ConsumeIntegralInRange<uint16_t>(1, UINT16_MAX);
-    scheduler::VSyncPredictor tracker{"predictor", mFdp.ConsumeIntegral<uint16_t>() /*period*/,
-                                      historySize, minimumSamplesForPrediction,
+    scheduler::VSyncPredictor tracker{mFdp.ConsumeIntegral<uint16_t>() /*period*/, historySize,
+                                      minimumSamplesForPrediction,
                                       mFdp.ConsumeIntegral<uint32_t>() /*outlierTolerancePercent*/};
     uint16_t period = mFdp.ConsumeIntegral<uint16_t>();
     tracker.setPeriod(period);
@@ -246,15 +242,13 @@
 
 void SchedulerFuzzer::fuzzVSyncReactor() {
     std::shared_ptr<FuzzImplVSyncTracker> vSyncTracker = std::make_shared<FuzzImplVSyncTracker>();
-    scheduler::VSyncReactor reactor("fuzzer_reactor",
-                                    std::make_unique<ClockWrapper>(
+    scheduler::VSyncReactor reactor(std::make_unique<ClockWrapper>(
                                             std::make_shared<FuzzImplClock>()),
                                     *vSyncTracker, mFdp.ConsumeIntegral<uint8_t>() /*pendingLimit*/,
                                     false);
 
-    reactor.startPeriodTransition(mFdp.ConsumeIntegral<nsecs_t>(), mFdp.ConsumeBool());
-    bool periodFlushed = false; // Value does not matter, since this is an out
-                                // param from addHwVsyncTimestamp.
+    reactor.startPeriodTransition(mFdp.ConsumeIntegral<nsecs_t>());
+    bool periodFlushed = mFdp.ConsumeBool();
     reactor.addHwVsyncTimestamp(0, std::nullopt, &periodFlushed);
     reactor.addHwVsyncTimestamp(mFdp.ConsumeIntegral<nsecs_t>() /*newPeriod*/, std::nullopt,
                                 &periodFlushed);
diff --git a/services/surfaceflinger/main_surfaceflinger.cpp b/services/surfaceflinger/main_surfaceflinger.cpp
index fedd71e..0495678 100644
--- a/services/surfaceflinger/main_surfaceflinger.cpp
+++ b/services/surfaceflinger/main_surfaceflinger.cpp
@@ -139,11 +139,6 @@
 
     set_sched_policy(0, SP_FOREGROUND);
 
-    // Put most SurfaceFlinger threads in the system-background cpuset
-    // Keeps us from unnecessarily using big cores
-    // Do this after the binder thread pool init
-    if (cpusets_enabled()) set_cpuset_policy(0, SP_SYSTEM);
-
     // initialize before clients can connect
     flinger->init();
 
diff --git a/services/surfaceflinger/tests/ReleaseBufferCallback_test.cpp b/services/surfaceflinger/tests/ReleaseBufferCallback_test.cpp
index 16076ea..c23fb9b 100644
--- a/services/surfaceflinger/tests/ReleaseBufferCallback_test.cpp
+++ b/services/surfaceflinger/tests/ReleaseBufferCallback_test.cpp
@@ -85,7 +85,8 @@
                              sp<Fence> fence, CallbackHelper& callback, const ReleaseCallbackId& id,
                              ReleaseBufferCallbackHelper& releaseCallback) {
         Transaction t;
-        t.setBuffer(layer, buffer, fence, id.framenumber, releaseCallback.getCallback());
+        t.setBuffer(layer, buffer, fence, id.framenumber, 0 /* producerId */,
+                    releaseCallback.getCallback());
         t.addTransactionCompletedCallback(callback.function, callback.getContext());
         t.apply();
     }
@@ -301,7 +302,7 @@
 
     Transaction t;
     t.setBuffer(layer, firstBuffer, std::nullopt, firstBufferCallbackId.framenumber,
-                releaseCallback->getCallback());
+                0 /* producerId */, releaseCallback->getCallback());
     t.addTransactionCompletedCallback(transactionCallback.function,
                                       transactionCallback.getContext());
     t.setDesiredPresentTime(time);
@@ -317,7 +318,7 @@
     sp<GraphicBuffer> secondBuffer = getBuffer();
     ReleaseCallbackId secondBufferCallbackId(secondBuffer->getId(), generateFrameNumber());
     t.setBuffer(layer, secondBuffer, std::nullopt, secondBufferCallbackId.framenumber,
-                releaseCallback->getCallback());
+                0 /* producerId */, releaseCallback->getCallback());
     t.addTransactionCompletedCallback(transactionCallback.function,
                                       transactionCallback.getContext());
     t.setDesiredPresentTime(time);
@@ -362,7 +363,7 @@
 
     Transaction transaction1;
     transaction1.setBuffer(layer, secondBuffer, std::nullopt, secondBufferCallbackId.framenumber,
-                           releaseCallback->getCallback());
+                           0 /* producerId */, releaseCallback->getCallback());
     transaction1.addTransactionCompletedCallback(callback1.function, callback1.getContext());
 
     // Set a different TransactionCompletedListener to mimic a second process
@@ -397,14 +398,14 @@
     // Create transaction with a buffer.
     Transaction transaction;
     transaction.setBuffer(layer, firstBuffer, std::nullopt, firstBufferCallbackId.framenumber,
-                          releaseCallback->getCallback());
+                          0 /* producerId */, releaseCallback->getCallback());
 
     sp<GraphicBuffer> secondBuffer = getBuffer();
     ReleaseCallbackId secondBufferCallbackId(secondBuffer->getId(), generateFrameNumber());
 
     // Call setBuffer on the same transaction with a different buffer.
     transaction.setBuffer(layer, secondBuffer, std::nullopt, secondBufferCallbackId.framenumber,
-                          releaseCallback->getCallback());
+                          0 /* producerId */, releaseCallback->getCallback());
 
     ASSERT_NO_FATAL_FAILURE(waitForReleaseBufferCallback(*releaseCallback, firstBufferCallbackId));
 }
@@ -419,7 +420,7 @@
     // Create transaction with a buffer.
     Transaction transaction1;
     transaction1.setBuffer(layer, firstBuffer, std::nullopt, firstBufferCallbackId.framenumber,
-                           releaseCallback->getCallback());
+                           0 /* producerId */, releaseCallback->getCallback());
 
     sp<GraphicBuffer> secondBuffer = getBuffer();
     ReleaseCallbackId secondBufferCallbackId(secondBuffer->getId(), generateFrameNumber());
@@ -427,7 +428,7 @@
     // Create a second transaction with a new buffer for the same layer.
     Transaction transaction2;
     transaction2.setBuffer(layer, secondBuffer, std::nullopt, secondBufferCallbackId.framenumber,
-                           releaseCallback->getCallback());
+                           0 /* producerId */, releaseCallback->getCallback());
 
     // merge transaction1 into transaction2 so ensure we get a proper buffer release callback.
     transaction1.merge(std::move(transaction2));
@@ -450,7 +451,7 @@
 
     Transaction transaction1;
     transaction1.setBuffer(layer, firstBuffer, std::nullopt, firstBufferCallbackId.framenumber,
-                           releaseCallback->getCallback());
+                           0 /* producerId */, releaseCallback->getCallback());
 
     // Sent a second buffer to allow the first buffer to get released.
     sp<GraphicBuffer> secondBuffer = getBuffer();
@@ -458,7 +459,7 @@
 
     Transaction transaction2;
     transaction2.setBuffer(layer, secondBuffer, std::nullopt, secondBufferCallbackId.framenumber,
-                           releaseCallback->getCallback());
+                           0 /* producerId */, releaseCallback->getCallback());
 
     // Set a different TransactionCompletedListener to mimic a second process
     TransactionCompletedListener::setInstance(secondCompletedListener);
@@ -479,10 +480,11 @@
     // Create transaction with a buffer.
     Transaction transaction;
     transaction.setBuffer(layer, firstBuffer, std::nullopt, firstBufferCallbackId.framenumber,
-                          releaseCallback->getCallback());
+                          0 /* producerId */, releaseCallback->getCallback());
 
     // Call setBuffer on the same transaction with a null buffer.
-    transaction.setBuffer(layer, nullptr, std::nullopt, 0, releaseCallback->getCallback());
+    transaction.setBuffer(layer, nullptr, std::nullopt, 0, 0 /* producerId */,
+                          releaseCallback->getCallback());
 
     ASSERT_NO_FATAL_FAILURE(waitForReleaseBufferCallback(*releaseCallback, firstBufferCallbackId));
 }
diff --git a/services/surfaceflinger/tests/unittests/CompositionTest.cpp b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
index 419c818..0416e93 100644
--- a/services/surfaceflinger/tests/unittests/CompositionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
@@ -139,7 +139,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index 214b028..e0b508a 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -74,8 +74,8 @@
                                                              mock::EventThread::kCallingUid,
                                                              ResyncCallback())));
 
-    mFlinger.setupScheduler(std::make_unique<mock::VsyncController>(),
-                            std::make_shared<mock::VSyncTracker>(),
+    mFlinger.setupScheduler(std::unique_ptr<scheduler::VsyncController>(mVsyncController),
+                            std::unique_ptr<scheduler::VSyncTracker>(mVSyncTracker),
                             std::unique_ptr<EventThread>(mEventThread),
                             std::unique_ptr<EventThread>(mSFEventThread),
                             TestableSurfaceFlinger::SchedulerCallbackImpl::kMock);
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
index c9245d6..223f4db 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
@@ -128,6 +128,8 @@
     renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
     Hwc2::mock::Composer* mComposer = nullptr;
 
+    mock::VsyncController* mVsyncController = new mock::VsyncController;
+    mock::VSyncTracker* mVSyncTracker = new mock::VSyncTracker;
     mock::EventThread* mEventThread = new mock::EventThread;
     mock::EventThread* mSFEventThread = new mock::EventThread;
 
diff --git a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
index 5cecb8e..f6bcadc 100644
--- a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
+++ b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
@@ -52,9 +52,11 @@
 constexpr PhysicalDisplayId DISPLAY_ID_64BIT =
         PhysicalDisplayId::fromEdid(0xffu, 0xffffu, 0xffff'ffffu);
 
+constexpr std::chrono::duration VSYNC_PERIOD(16ms);
+
 } // namespace
 
-class EventThreadTest : public testing::Test, public IEventThreadCallback {
+class EventThreadTest : public testing::Test {
 protected:
     static constexpr std::chrono::nanoseconds kWorkDuration = 0ms;
     static constexpr std::chrono::nanoseconds kReadyDuration = 3ms;
@@ -95,7 +97,7 @@
     void expectConfigChangedEventReceivedByConnection(PhysicalDisplayId expectedDisplayId,
                                                       int32_t expectedConfigId,
                                                       nsecs_t expectedVsyncPeriod);
-    void expectThrottleVsyncReceived(TimePoint expectedTimestamp, uid_t);
+    void expectThrottleVsyncReceived(nsecs_t expectedTimestamp, uid_t);
     void expectUidFrameRateMappingEventReceivedByConnection(PhysicalDisplayId expectedDisplayId,
                                                             std::vector<FrameRateOverride>);
 
@@ -104,14 +106,6 @@
         mThread->onVsync(expectedPresentationTime, timestamp, deadlineTimestamp);
     }
 
-    // IEventThreadCallback overrides.
-    bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const override {
-        mThrottleVsyncCallRecorder.getInvocable()(expectedVsyncTime, uid);
-        return uid != mThrottledConnectionUid;
-    }
-
-    Fps getLeaderRenderFrameRate(uid_t uid) const override { return 60_Hz; }
-
     AsyncCallRecorderWithCannedReturn<
             scheduler::ScheduleResult (*)(scheduler::VSyncDispatch::CallbackToken,
                                           scheduler::VSyncDispatch::ScheduleTiming)>
@@ -127,11 +121,11 @@
     AsyncCallRecorder<void (*)(scheduler::VSyncDispatch::CallbackToken)>
             mVSyncCallbackUnregisterRecorder;
     AsyncCallRecorder<void (*)()> mResyncCallRecorder;
-    mutable AsyncCallRecorder<void (*)(TimePoint, uid_t)> mThrottleVsyncCallRecorder;
+    AsyncCallRecorder<void (*)(nsecs_t, uid_t)> mThrottleVsyncCallRecorder;
     ConnectionEventRecorder mConnectionEventCallRecorder{0};
     ConnectionEventRecorder mThrottledConnectionEventCallRecorder{0};
 
-    std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
+    std::unique_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
     std::unique_ptr<impl::EventThread> mThread;
     sp<MockEventThreadConnection> mConnection;
     sp<MockEventThreadConnection> mThrottledConnection;
@@ -146,12 +140,12 @@
             ::testing::UnitTest::GetInstance()->current_test_info();
     ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
 
-    auto mockDispatchPtr = std::make_shared<mock::VSyncDispatch>();
-    mVsyncSchedule = std::shared_ptr<scheduler::VsyncSchedule>(
-            new scheduler::VsyncSchedule(INTERNAL_DISPLAY_ID,
-                                         std::make_shared<mock::VSyncTracker>(), mockDispatchPtr,
-                                         nullptr));
-    mock::VSyncDispatch& mockDispatch = *mockDispatchPtr;
+    mVsyncSchedule = std::unique_ptr<scheduler::VsyncSchedule>(
+            new scheduler::VsyncSchedule(std::make_unique<mock::VSyncTracker>(),
+                                         std::make_unique<mock::VSyncDispatch>(), nullptr));
+
+    mock::VSyncDispatch& mockDispatch =
+            *static_cast<mock::VSyncDispatch*>(&mVsyncSchedule->getDispatch());
     EXPECT_CALL(mockDispatch, registerCallback(_, _))
             .WillRepeatedly(Invoke(mVSyncCallbackRegisterRecorder.getInvocable()));
     EXPECT_CALL(mockDispatch, schedule(_, _))
@@ -186,10 +180,19 @@
 }
 
 void EventThreadTest::createThread() {
+    const auto throttleVsync = [&](nsecs_t expectedVsyncTimestamp, uid_t uid) {
+        mThrottleVsyncCallRecorder.getInvocable()(expectedVsyncTimestamp, uid);
+        return (uid == mThrottledConnectionUid);
+    };
+    const auto getVsyncPeriod = [](uid_t uid) {
+        return VSYNC_PERIOD.count();
+    };
+
     mTokenManager = std::make_unique<frametimeline::impl::TokenManager>();
     mThread =
-            std::make_unique<impl::EventThread>("EventThreadTest", mVsyncSchedule, *this,
-                                                mTokenManager.get(), kWorkDuration, kReadyDuration);
+            std::make_unique<impl::EventThread>(/*std::move(source), */ "EventThreadTest",
+                                                *mVsyncSchedule, mTokenManager.get(), throttleVsync,
+                                                getVsyncPeriod, kWorkDuration, kReadyDuration);
 
     // EventThread should register itself as VSyncSource callback.
     EXPECT_TRUE(mVSyncCallbackRegisterRecorder.waitForCall().has_value());
@@ -222,7 +225,7 @@
     EXPECT_EQ(expectedReadyDuration.count(), std::get<1>(args.value()).readyDuration);
 }
 
-void EventThreadTest::expectThrottleVsyncReceived(TimePoint expectedTimestamp, uid_t uid) {
+void EventThreadTest::expectThrottleVsyncReceived(nsecs_t expectedTimestamp, uid_t uid) {
     auto args = mThrottleVsyncCallRecorder.waitForCall();
     ASSERT_TRUE(args.has_value());
     EXPECT_EQ(expectedTimestamp, std::get<0>(args.value()));
@@ -373,7 +376,7 @@
     // Use the received callback to signal a first vsync event.
     // The throttler should receive the event, as well as the connection.
     onVSyncEvent(123, 456, 789);
-    expectThrottleVsyncReceived(TimePoint::fromNs(456), mConnectionUid);
+    expectThrottleVsyncReceived(456, mConnectionUid);
     expectVsyncEventReceivedByConnection(123, 1u);
 
     // EventThread is requesting one more callback due to VsyncRequest::SingleSuppressCallback
@@ -491,17 +494,17 @@
     // Send a vsync event. EventThread should then make a call to the
     // throttler, and the connection.
     onVSyncEvent(123, 456, 789);
-    expectThrottleVsyncReceived(TimePoint::fromNs(456), mConnectionUid);
+    expectThrottleVsyncReceived(456, mConnectionUid);
     expectVsyncEventReceivedByConnection(123, 1u);
 
     // A second event should go to the same places.
     onVSyncEvent(456, 123, 0);
-    expectThrottleVsyncReceived(TimePoint::fromNs(123), mConnectionUid);
+    expectThrottleVsyncReceived(123, mConnectionUid);
     expectVsyncEventReceivedByConnection(456, 2u);
 
     // A third event should go to the same places.
     onVSyncEvent(789, 777, 111);
-    expectThrottleVsyncReceived(TimePoint::fromNs(777), mConnectionUid);
+    expectThrottleVsyncReceived(777, mConnectionUid);
     expectVsyncEventReceivedByConnection(789, 3u);
 }
 
@@ -740,7 +743,7 @@
     // Use the received callback to signal a first vsync event.
     // The throttler should receive the event, but not the connection.
     onVSyncEvent(123, 456, 789);
-    expectThrottleVsyncReceived(TimePoint::fromNs(456), mThrottledConnectionUid);
+    expectThrottleVsyncReceived(456, mThrottledConnectionUid);
     mThrottledConnectionEventCallRecorder.waitForUnexpectedCall();
     expectVSyncCallbackScheduleReceived(true);
 
@@ -748,7 +751,7 @@
     // The throttler should receive the event, but the connection should
     // not as it was only interested in the first.
     onVSyncEvent(456, 123, 0);
-    expectThrottleVsyncReceived(TimePoint::fromNs(123), mThrottledConnectionUid);
+    expectThrottleVsyncReceived(123, mThrottledConnectionUid);
     EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
     expectVSyncCallbackScheduleReceived(true);
 
diff --git a/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp b/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
index 248061c..1cd9e49 100644
--- a/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
@@ -137,7 +137,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
index ff7c2c9..ac63a0e 100644
--- a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
@@ -125,7 +125,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/GameModeTest.cpp b/services/surfaceflinger/tests/unittests/GameModeTest.cpp
index ddf871b..29aa717 100644
--- a/services/surfaceflinger/tests/unittests/GameModeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/GameModeTest.cpp
@@ -76,7 +76,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/HWComposerTest.cpp b/services/surfaceflinger/tests/unittests/HWComposerTest.cpp
index 9534f3b..da00377 100644
--- a/services/surfaceflinger/tests/unittests/HWComposerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/HWComposerTest.cpp
@@ -52,6 +52,7 @@
 
 using Hwc2::Config;
 
+using ::aidl::android::hardware::graphics::composer3::RefreshRateChangedDebugData;
 using ::testing::_;
 using ::testing::DoAll;
 using ::testing::ElementsAreArray;
@@ -155,6 +156,7 @@
                  void(hal::HWDisplayId, const hal::VsyncPeriodChangeTimeline&));
     MOCK_METHOD1(onComposerHalSeamlessPossible, void(hal::HWDisplayId));
     MOCK_METHOD1(onComposerHalVsyncIdle, void(hal::HWDisplayId));
+    MOCK_METHOD(void, onRefreshRateChangedDebug, (const RefreshRateChangedDebugData&), (override));
 };
 
 struct HWComposerSetCallbackTest : HWComposerTest {
diff --git a/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp b/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
index 23506b1..ee42e19 100644
--- a/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
+++ b/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
@@ -64,7 +64,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
index 8f1b450..7aa5201 100644
--- a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
@@ -67,12 +67,12 @@
 
 struct MessageQueueTest : testing::Test {
     void SetUp() override {
-        EXPECT_CALL(*mVSyncDispatch, registerCallback(_, "sf")).WillOnce(Return(mCallbackToken));
+        EXPECT_CALL(mVSyncDispatch, registerCallback(_, "sf")).WillOnce(Return(mCallbackToken));
         EXPECT_NO_FATAL_FAILURE(mEventQueue.initVsync(mVSyncDispatch, mTokenManager, kDuration));
-        EXPECT_CALL(*mVSyncDispatch, unregisterCallback(mCallbackToken)).Times(1);
+        EXPECT_CALL(mVSyncDispatch, unregisterCallback(mCallbackToken)).Times(1);
     }
 
-    std::shared_ptr<mock::VSyncDispatch> mVSyncDispatch = std::make_shared<mock::VSyncDispatch>();
+    mock::VSyncDispatch mVSyncDispatch;
     MockTokenManager mTokenManager;
     TestableMessageQueue mEventQueue;
 
@@ -90,7 +90,7 @@
                                                                  .earliestVsync = 0};
     EXPECT_FALSE(mEventQueue.getScheduledFrameTime());
 
-    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -103,13 +103,13 @@
                                                                  .readyDuration = 0,
                                                                  .earliestVsync = 0};
 
-    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
     EXPECT_EQ(1234, mEventQueue.getScheduledFrameTime()->time_since_epoch().count());
 
-    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(4567));
+    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(4567));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -122,7 +122,7 @@
                                                                  .readyDuration = 0,
                                                                  .earliestVsync = 0};
 
-    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -149,7 +149,7 @@
                                                      .readyDuration = 0,
                                                      .earliestVsync = kPresentTime.ns()};
 
-    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timingAfterCallback)).WillOnce(Return(0));
+    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timingAfterCallback)).WillOnce(Return(0));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 }
 
@@ -161,7 +161,7 @@
                                                      .readyDuration = 0,
                                                      .earliestVsync = 0};
 
-    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(0));
+    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(0));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 }
 
diff --git a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
index f0dd06d..4b15385 100644
--- a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-#include <ftl/fake_guard.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 #include <log/log.h>
@@ -177,7 +176,7 @@
     ASSERT_EQ(0u, mScheduler->getNumActiveLayers());
 
     constexpr hal::PowerMode kPowerModeOn = hal::PowerMode::ON;
-    FTL_FAKE_GUARD(kMainThreadContext, mScheduler->setDisplayPowerMode(kDisplayId1, kPowerModeOn));
+    mScheduler->setDisplayPowerMode(kPowerModeOn);
 
     constexpr uint32_t kDisplayArea = 999'999;
     mScheduler->onActiveDisplayAreaChanged(kDisplayArea);
@@ -249,7 +248,7 @@
     mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
 
     constexpr hal::PowerMode kPowerModeOn = hal::PowerMode::ON;
-    FTL_FAKE_GUARD(kMainThreadContext, mScheduler->setDisplayPowerMode(kDisplayId1, kPowerModeOn));
+    mScheduler->setDisplayPowerMode(kPowerModeOn);
 
     constexpr uint32_t kDisplayArea = 999'999;
     mScheduler->onActiveDisplayAreaChanged(kDisplayArea);
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
index 31f948f..ad3bd35 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
@@ -100,7 +100,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
index 98644aa..f553a23 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
@@ -44,10 +44,7 @@
     // We expect a scheduled commit for the display transaction.
     EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
-    EXPECT_CALL(static_cast<mock::VSyncTracker&>(
-                        mFlinger.scheduler()->getVsyncSchedule()->getTracker()),
-                nextAnticipatedVSyncTimeFrom(_))
-            .WillRepeatedly(Return(0));
+    EXPECT_CALL(*mVSyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
 
     // --------------------------------------------------------------------
     // Invocation
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
index 2a0f2ef..622717f 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
@@ -116,7 +116,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
index 80ad22c..88ddb0f 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
@@ -61,7 +61,7 @@
 struct EventThreadBaseSupportedVariant {
     static void setupVsyncAndEventThreadNoCallExpectations(DisplayTransactionTest* test) {
         // The callback should not be notified to toggle VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, _)).Times(0);
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_)).Times(0);
 
         // The event thread should not be notified.
         EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
@@ -71,28 +71,24 @@
 
 struct EventThreadNotSupportedVariant : public EventThreadBaseSupportedVariant {
     static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
-        // The callback should be notified to enable VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, true)).Times(1);
+        // These calls are only expected for the primary display.
 
-        // The event thread should not be notified.
-        EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
-        EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(0);
+        // Instead expect no calls.
+        setupVsyncAndEventThreadNoCallExpectations(test);
     }
 
     static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
-        // The callback should be notified to disable VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, false)).Times(1);
+        // These calls are only expected for the primary display.
 
-        // The event thread should not be notified.
-        EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
-        EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(0);
+        // Instead expect no calls.
+        setupVsyncAndEventThreadNoCallExpectations(test);
     }
 };
 
 struct EventThreadIsSupportedVariant : public EventThreadBaseSupportedVariant {
     static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
         // The callback should be notified to enable VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, true)).Times(1);
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(true)).Times(1);
 
         // The event thread should be notified that the screen was acquired.
         EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(1);
@@ -100,7 +96,7 @@
 
     static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
         // The callback should be notified to disable VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, false)).Times(1);
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(false)).Times(1);
 
         // The event thread should not be notified that the screen was released.
         EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(1);
@@ -109,12 +105,8 @@
 
 struct DispSyncIsSupportedVariant {
     static void setupResetModelCallExpectations(DisplayTransactionTest* test) {
-        auto vsyncSchedule = test->mFlinger.scheduler()->getVsyncSchedule();
-        EXPECT_CALL(static_cast<mock::VsyncController&>(vsyncSchedule->getController()),
-                    startPeriodTransition(DEFAULT_VSYNC_PERIOD, false))
-                .Times(1);
-        EXPECT_CALL(static_cast<mock::VSyncTracker&>(vsyncSchedule->getTracker()), resetModel())
-                .Times(1);
+        EXPECT_CALL(*test->mVsyncController, startPeriodTransition(DEFAULT_VSYNC_PERIOD)).Times(1);
+        EXPECT_CALL(*test->mVSyncTracker, resetModel()).Times(1);
     }
 };
 
@@ -270,9 +262,8 @@
         return display;
     }
 
-    static void setInitialHwVsyncEnabled(DisplayTransactionTest* test, PhysicalDisplayId id,
-                                         bool enabled) {
-        test->mFlinger.scheduler()->setInitialHwVsyncEnabled(id, enabled);
+    static void setInitialHwVsyncEnabled(DisplayTransactionTest* test, bool enabled) {
+        test->mFlinger.scheduler()->setInitialHwVsyncEnabled(enabled);
     }
 
     static void setupRepaintEverythingCallExpectations(DisplayTransactionTest* test) {
@@ -309,11 +300,6 @@
 // A sample configuration for the external display.
 // In addition to not having event thread support, we emulate not having doze
 // support.
-// FIXME (b/267483230): ExternalDisplay supports the features tracked in
-// DispSyncIsSupportedVariant, but is the follower, so the
-// expectations set by DispSyncIsSupportedVariant don't match (wrong schedule).
-// We need a way to retrieve the proper DisplayId from
-// setupResetModelCallExpectations (or pass it in).
 template <typename TransitionVariant>
 using ExternalDisplayPowerCase =
         DisplayPowerCase<ExternalDisplayVariant, DozeNotSupportedVariant<ExternalDisplayVariant>,
@@ -343,12 +329,9 @@
     Case::Doze::setupComposerCallExpectations(this);
     auto display =
             Case::injectDisplayWithInitialPowerMode(this, Case::Transition::INITIAL_POWER_MODE);
-    auto displayId = display->getId();
-    if (auto physicalDisplayId = PhysicalDisplayId::tryCast(displayId)) {
-        Case::setInitialHwVsyncEnabled(this, *physicalDisplayId,
-                                       PowerModeInitialVSyncEnabled<
-                                               Case::Transition::INITIAL_POWER_MODE>::value);
-    }
+    Case::setInitialHwVsyncEnabled(this,
+                                   PowerModeInitialVSyncEnabled<
+                                           Case::Transition::INITIAL_POWER_MODE>::value);
 
     // --------------------------------------------------------------------
     // Call Expectations
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
index 7e14588..fed6a1a 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
@@ -34,7 +34,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TestableScheduler.h b/services/surfaceflinger/tests/unittests/TestableScheduler.h
index c360f93..bd3f3ca 100644
--- a/services/surfaceflinger/tests/unittests/TestableScheduler.h
+++ b/services/surfaceflinger/tests/unittests/TestableScheduler.h
@@ -37,16 +37,19 @@
 public:
     TestableScheduler(RefreshRateSelectorPtr selectorPtr, ISchedulerCallback& callback)
           : TestableScheduler(std::make_unique<mock::VsyncController>(),
-                              std::make_shared<mock::VSyncTracker>(), std::move(selectorPtr),
+                              std::make_unique<mock::VSyncTracker>(), std::move(selectorPtr),
                               /* modulatorPtr */ nullptr, callback) {}
 
     TestableScheduler(std::unique_ptr<VsyncController> controller,
-                      std::shared_ptr<VSyncTracker> tracker, RefreshRateSelectorPtr selectorPtr,
+                      std::unique_ptr<VSyncTracker> tracker, RefreshRateSelectorPtr selectorPtr,
                       sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
           : Scheduler(*this, callback, Feature::kContentDetection, std::move(modulatorPtr)) {
+        mVsyncSchedule = std::unique_ptr<VsyncSchedule>(
+                new VsyncSchedule(std::move(tracker), std::make_unique<mock::VSyncDispatch>(),
+                                  std::move(controller)));
+
         const auto displayId = selectorPtr->getActiveMode().modePtr->getPhysicalDisplayId();
-        registerDisplay(displayId, std::move(selectorPtr), std::move(controller),
-                        std::move(tracker));
+        registerDisplay(displayId, std::move(selectorPtr));
 
         ON_CALL(*this, postMessage).WillByDefault([](sp<MessageHandler>&& handler) {
             // Execute task to prevent broken promise exception on destruction.
@@ -70,21 +73,8 @@
     }
 
     void registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr) {
-        registerDisplay(displayId, std::move(selectorPtr),
-                        std::make_unique<mock::VsyncController>(),
-                        std::make_shared<mock::VSyncTracker>());
-    }
-
-    void registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr,
-                         std::unique_ptr<VsyncController> controller,
-                         std::shared_ptr<VSyncTracker> tracker) {
         ftl::FakeGuard guard(kMainThreadContext);
-        Scheduler::registerDisplayInternal(displayId, std::move(selectorPtr),
-                                           std::shared_ptr<VsyncSchedule>(
-                                                   new VsyncSchedule(displayId, std::move(tracker),
-                                                                     std::make_shared<
-                                                                             mock::VSyncDispatch>(),
-                                                                     std::move(controller))));
+        Scheduler::registerDisplay(displayId, std::move(selectorPtr));
     }
 
     void unregisterDisplay(PhysicalDisplayId displayId) {
@@ -160,11 +150,10 @@
         Scheduler::onNonPrimaryDisplayModeChanged(handle, mode);
     }
 
-    void setInitialHwVsyncEnabled(PhysicalDisplayId id, bool enabled) {
-        auto schedule = getVsyncSchedule(id);
-        std::lock_guard<std::mutex> lock(schedule->mHwVsyncLock);
-        schedule->mHwVsyncState = enabled ? VsyncSchedule::HwVsyncState::Enabled
-                                          : VsyncSchedule::HwVsyncState::Disabled;
+    void setInitialHwVsyncEnabled(bool enabled) {
+        std::lock_guard<std::mutex> lock(mVsyncSchedule->mHwVsyncLock);
+        mVsyncSchedule->mHwVsyncState = enabled ? VsyncSchedule::HwVsyncState::Enabled
+                                                : VsyncSchedule::HwVsyncState::Disabled;
     }
 
 private:
diff --git a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
index 63b79a4..68c738f 100644
--- a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
+++ b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
@@ -201,7 +201,7 @@
             std::variant<OneDisplayMode, TwoDisplayModes, RefreshRateSelectorPtr>;
 
     void setupScheduler(std::unique_ptr<scheduler::VsyncController> vsyncController,
-                        std::shared_ptr<scheduler::VSyncTracker> vsyncTracker,
+                        std::unique_ptr<scheduler::VSyncTracker> vsyncTracker,
                         std::unique_ptr<EventThread> appEventThread,
                         std::unique_ptr<EventThread> sfEventThread,
                         SchedulerCallbackImpl callbackImpl = SchedulerCallbackImpl::kNoOp,
@@ -253,7 +253,7 @@
                                                           std::move(modulatorPtr), callback);
         }
 
-        mScheduler->initVsync(mScheduler->getVsyncSchedule()->getDispatch(), *mTokenManager, 0ms);
+        mScheduler->initVsync(mScheduler->getVsyncSchedule().getDispatch(), *mTokenManager, 0ms);
 
         mFlinger->mAppConnectionHandle = mScheduler->createConnection(std::move(appEventThread));
         mFlinger->mSfConnectionHandle = mScheduler->createConnection(std::move(sfEventThread));
diff --git a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
index a9a617b..859f702 100644
--- a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
@@ -83,14 +83,15 @@
 
         mFlinger.setupComposer(std::make_unique<Hwc2::mock::Composer>());
         mFlinger.setupScheduler(std::unique_ptr<mock::VsyncController>(mVsyncController),
-                                mVSyncTracker, std::move(eventThread), std::move(sfEventThread));
+                                std::unique_ptr<mock::VSyncTracker>(mVSyncTracker),
+                                std::move(eventThread), std::move(sfEventThread));
         mFlinger.flinger()->addTransactionReadyFilters();
     }
 
     TestableSurfaceFlinger mFlinger;
 
     mock::VsyncController* mVsyncController = new mock::VsyncController();
-    std::shared_ptr<mock::VSyncTracker> mVSyncTracker = std::make_shared<mock::VSyncTracker>();
+    mock::VSyncTracker* mVSyncTracker = new mock::VSyncTracker();
 
     struct TransactionInfo {
         Vector<ComposerState> states;
diff --git a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
index b228bcb..1173d1c 100644
--- a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
@@ -85,7 +85,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
index bfebecd..ae03db4 100644
--- a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
@@ -84,7 +84,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
index aa33716..da87f1d 100644
--- a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
@@ -117,7 +117,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
index fcd2f56..47c2dee 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -109,8 +109,7 @@
 
 class RepeatingCallbackReceiver {
 public:
-    RepeatingCallbackReceiver(std::shared_ptr<VSyncDispatch> dispatch, nsecs_t workload,
-                              nsecs_t readyDuration)
+    RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t workload, nsecs_t readyDuration)
           : mWorkload(workload),
             mReadyDuration(readyDuration),
             mCallback(
@@ -167,10 +166,9 @@
 };
 
 TEST_F(VSyncDispatchRealtimeTest, triple_alarm) {
-    auto tracker = std::make_shared<FixedRateIdealStubTracker>();
-    auto dispatch =
-            std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
-                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
+    FixedRateIdealStubTracker tracker;
+    VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
+                                     mVsyncMoveThreshold);
 
     static size_t constexpr num_clients = 3;
     std::array<RepeatingCallbackReceiver, num_clients>
@@ -197,15 +195,14 @@
 // starts at 333hz, slides down to 43hz
 TEST_F(VSyncDispatchRealtimeTest, vascillating_vrr) {
     auto next_vsync_interval = toNs(3ms);
-    auto tracker = std::make_shared<VRRStubTracker>(next_vsync_interval);
-    auto dispatch =
-            std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
-                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
+    VRRStubTracker tracker(next_vsync_interval);
+    VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
+                                     mVsyncMoveThreshold);
 
     RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
 
     auto const on_each_frame = [&](nsecs_t last_known) {
-        tracker->set_interval(next_vsync_interval += toNs(1ms), last_known);
+        tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
     };
 
     std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
@@ -216,10 +213,9 @@
 
 // starts at 333hz, jumps to 200hz at frame 10
 TEST_F(VSyncDispatchRealtimeTest, fixed_jump) {
-    auto tracker = std::make_shared<VRRStubTracker>(toNs(3ms));
-    auto dispatch =
-            std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
-                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
+    VRRStubTracker tracker(toNs(3ms));
+    VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
+                                     mVsyncMoveThreshold);
 
     RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
 
@@ -227,7 +223,7 @@
     auto constexpr jump_frame_at = 10u;
     auto const on_each_frame = [&](nsecs_t last_known) {
         if (jump_frame_counter++ == jump_frame_at) {
-            tracker->set_interval(toNs(5ms), last_known);
+            tracker.set_interval(toNs(5ms), last_known);
         }
     };
     std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
index 82daffd..14a2860 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
@@ -116,14 +116,13 @@
 
 class CountingCallback {
 public:
-    CountingCallback(std::shared_ptr<VSyncDispatch> dispatch)
-          : mDispatch(std::move(dispatch)),
-            mToken(mDispatch->registerCallback(std::bind(&CountingCallback::counter, this,
-                                                         std::placeholders::_1,
-                                                         std::placeholders::_2,
-                                                         std::placeholders::_3),
-                                               "test")) {}
-    ~CountingCallback() { mDispatch->unregisterCallback(mToken); }
+    CountingCallback(VSyncDispatch& dispatch)
+          : mDispatch(dispatch),
+            mToken(dispatch.registerCallback(std::bind(&CountingCallback::counter, this,
+                                                       std::placeholders::_1, std::placeholders::_2,
+                                                       std::placeholders::_3),
+                                             "test")) {}
+    ~CountingCallback() { mDispatch.unregisterCallback(mToken); }
 
     operator VSyncDispatch::CallbackToken() const { return mToken; }
 
@@ -133,7 +132,7 @@
         mReadyTime.push_back(readyTime);
     }
 
-    std::shared_ptr<VSyncDispatch> mDispatch;
+    VSyncDispatch& mDispatch;
     VSyncDispatch::CallbackToken mToken;
     std::vector<nsecs_t> mCalls;
     std::vector<nsecs_t> mWakeupTime;
@@ -142,12 +141,12 @@
 
 class PausingCallback {
 public:
-    PausingCallback(std::shared_ptr<VSyncDispatch> dispatch, std::chrono::milliseconds pauseAmount)
-          : mDispatch(std::move(dispatch)),
-            mToken(mDispatch->registerCallback(std::bind(&PausingCallback::pause, this,
-                                                         std::placeholders::_1,
-                                                         std::placeholders::_2),
-                                               "test")),
+    PausingCallback(VSyncDispatch& dispatch, std::chrono::milliseconds pauseAmount)
+          : mDispatch(dispatch),
+            mToken(dispatch.registerCallback(std::bind(&PausingCallback::pause, this,
+                                                       std::placeholders::_1,
+                                                       std::placeholders::_2),
+                                             "test")),
             mRegistered(true),
             mPauseAmount(pauseAmount) {}
     ~PausingCallback() { unregister(); }
@@ -182,12 +181,12 @@
 
     void unregister() {
         if (mRegistered) {
-            mDispatch->unregisterCallback(mToken);
+            mDispatch.unregisterCallback(mToken);
             mRegistered = false;
         }
     }
 
-    std::shared_ptr<VSyncDispatch> mDispatch;
+    VSyncDispatch& mDispatch;
     VSyncDispatch::CallbackToken mToken;
     bool mRegistered = true;
 
@@ -232,26 +231,22 @@
     static nsecs_t constexpr mDispatchGroupThreshold = 5;
     nsecs_t const mPeriod = 1000;
     nsecs_t const mVsyncMoveThreshold = 300;
-    std::shared_ptr<NiceMock<MockVSyncTracker>> mStubTracker =
-            std::make_shared<NiceMock<MockVSyncTracker>>(mPeriod);
-    std::shared_ptr<VSyncDispatch> mDispatch =
-            std::make_shared<VSyncDispatchTimerQueue>(createTimeKeeper(), mStubTracker,
-                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
+    NiceMock<MockVSyncTracker> mStubTracker{mPeriod};
+    VSyncDispatchTimerQueue mDispatch{createTimeKeeper(), mStubTracker, mDispatchGroupThreshold,
+                                      mVsyncMoveThreshold};
 };
 
 TEST_F(VSyncDispatchTimerQueueTest, unregistersSetAlarmOnDestruction) {
     EXPECT_CALL(mMockClock, alarmAt(_, 900));
     EXPECT_CALL(mMockClock, alarmCancel());
     {
-        std::shared_ptr<VSyncDispatch> mDispatch =
-                std::make_shared<VSyncDispatchTimerQueue>(createTimeKeeper(), mStubTracker,
-                                                          mDispatchGroupThreshold,
-                                                          mVsyncMoveThreshold);
+        VSyncDispatchTimerQueue mDispatch{createTimeKeeper(), mStubTracker, mDispatchGroupThreshold,
+                                          mVsyncMoveThreshold};
         CountingCallback cb(mDispatch);
-        const auto result = mDispatch->schedule(cb,
-                                                {.workDuration = 100,
-                                                 .readyDuration = 0,
-                                                 .earliestVsync = 1000});
+        const auto result = mDispatch.schedule(cb,
+                                               {.workDuration = 100,
+                                                .readyDuration = 0,
+                                                .earliestVsync = 1000});
         EXPECT_TRUE(result.has_value());
         EXPECT_EQ(900, *result);
     }
@@ -262,10 +257,10 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 900));
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch->schedule(cb,
-                                            {.workDuration = 100,
-                                             .readyDuration = 0,
-                                             .earliestVsync = intended});
+    const auto result = mDispatch.schedule(cb,
+                                           {.workDuration = 100,
+                                            .readyDuration = 0,
+                                            .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 
@@ -282,15 +277,14 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 700)).InSequence(seq);
 
     CountingCallback cb(mDispatch);
-    auto result = mDispatch->schedule(cb,
-                                      {.workDuration = 100,
-                                       .readyDuration = 0,
-                                       .earliestVsync = intended});
+    auto result = mDispatch.schedule(cb,
+                                     {.workDuration = 100,
+                                      .readyDuration = 0,
+                                      .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 
-    result =
-            mDispatch->update(cb,
+    result = mDispatch.update(cb,
                               {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(700, *result);
@@ -308,17 +302,17 @@
 
     CountingCallback cb(mDispatch);
     const auto result =
-            mDispatch->update(cb,
-                              {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
+            mDispatch.update(cb,
+                             {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
     EXPECT_FALSE(result.has_value());
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicAlarmSettingFutureWithAdjustmentToTrueVsync) {
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000)).WillOnce(Return(1150));
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000)).WillOnce(Return(1150));
     EXPECT_CALL(mMockClock, alarmAt(_, 1050));
 
     CountingCallback cb(mDispatch);
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
     advanceToNextCallback();
 
     ASSERT_THAT(cb.mCalls.size(), Eq(1));
@@ -329,15 +323,15 @@
     auto const now = 234;
     mMockClock.advanceBy(234);
     auto const workDuration = 10 * mPeriod;
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(now + workDuration))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(now + workDuration))
             .WillOnce(Return(mPeriod * 11));
     EXPECT_CALL(mMockClock, alarmAt(_, mPeriod));
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch->schedule(cb,
-                                            {.workDuration = workDuration,
-                                             .readyDuration = 0,
-                                             .earliestVsync = mPeriod});
+    const auto result = mDispatch.schedule(cb,
+                                           {.workDuration = workDuration,
+                                            .readyDuration = 0,
+                                            .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod, *result);
 }
@@ -347,13 +341,12 @@
     EXPECT_CALL(mMockClock, alarmCancel());
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch->schedule(cb,
-                                            {.workDuration = 100,
-                                             .readyDuration = 0,
-                                             .earliestVsync = mPeriod});
+    const auto result =
+            mDispatch.schedule(cb,
+                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
-    EXPECT_EQ(mDispatch->cancel(cb), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch.cancel(cb), CancelResult::Cancelled);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicAlarmCancelTooLate) {
@@ -361,14 +354,13 @@
     EXPECT_CALL(mMockClock, alarmCancel());
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch->schedule(cb,
-                                            {.workDuration = 100,
-                                             .readyDuration = 0,
-                                             .earliestVsync = mPeriod});
+    const auto result =
+            mDispatch.schedule(cb,
+                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
     mMockClock.advanceBy(950);
-    EXPECT_EQ(mDispatch->cancel(cb), CancelResult::TooLate);
+    EXPECT_EQ(mDispatch.cancel(cb), CancelResult::TooLate);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicAlarmCancelTooLateWhenRunning) {
@@ -376,16 +368,15 @@
     EXPECT_CALL(mMockClock, alarmCancel());
 
     PausingCallback cb(mDispatch, std::chrono::duration_cast<std::chrono::milliseconds>(1s));
-    const auto result = mDispatch->schedule(cb,
-                                            {.workDuration = 100,
-                                             .readyDuration = 0,
-                                             .earliestVsync = mPeriod});
+    const auto result =
+            mDispatch.schedule(cb,
+                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
 
     std::thread pausingThread([&] { mMockClock.advanceToNextCallback(); });
     EXPECT_TRUE(cb.waitForPause());
-    EXPECT_EQ(mDispatch->cancel(cb), CancelResult::TooLate);
+    EXPECT_EQ(mDispatch.cancel(cb), CancelResult::TooLate);
     cb.unpause();
     pausingThread.join();
 }
@@ -398,10 +389,9 @@
 
     PausingCallback cb(mDispatch, 50ms);
     cb.stashResource(resource);
-    const auto result = mDispatch->schedule(cb,
-                                            {.workDuration = 100,
-                                             .readyDuration = 0,
-                                             .earliestVsync = mPeriod});
+    const auto result =
+            mDispatch.schedule(cb,
+                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
 
@@ -418,7 +408,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicTwoAlarmSetting) {
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
             .Times(4)
             .WillOnce(Return(1055))
             .WillOnce(Return(1063))
@@ -433,8 +423,8 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
-    mDispatch->schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
 
     advanceToNextCallback();
     advanceToNextCallback();
@@ -446,7 +436,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, noCloseCallbacksAfterPeriodChange) {
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
             .Times(4)
             .WillOnce(Return(1000))
             .WillOnce(Return(2000))
@@ -460,21 +450,21 @@
 
     CountingCallback cb(mDispatch);
 
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 0});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 0});
 
     advanceToNextCallback();
 
     ASSERT_THAT(cb.mCalls.size(), Eq(1));
     EXPECT_THAT(cb.mCalls[0], Eq(1000));
 
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
 
     ASSERT_THAT(cb.mCalls.size(), Eq(2));
     EXPECT_THAT(cb.mCalls[1], Eq(2000));
 
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
 
     advanceToNextCallback();
 
@@ -483,7 +473,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, rearmsFaroutTimeoutWhenCancellingCloseOne) {
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
             .Times(4)
             .WillOnce(Return(10000))
             .WillOnce(Return(1000))
@@ -498,10 +488,10 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch->schedule(cb0,
-                        {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod * 10});
-    mDispatch->schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
-    mDispatch->cancel(cb1);
+    mDispatch.schedule(cb0,
+                       {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod * 10});
+    mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch.cancel(cb1);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, noUnnecessaryRearmsWhenRescheduling) {
@@ -512,9 +502,9 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 300, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 300, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
 }
 
@@ -527,9 +517,9 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
 }
 
@@ -547,10 +537,10 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1,
-                        {.workDuration = closeOffset, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1,
+                       {.workDuration = closeOffset, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
     ASSERT_THAT(cb0.mCalls.size(), Eq(1));
@@ -558,11 +548,9 @@
     ASSERT_THAT(cb1.mCalls.size(), Eq(1));
     EXPECT_THAT(cb1.mCalls[0], Eq(mPeriod));
 
-    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 2000});
-    mDispatch->schedule(cb1,
-                        {.workDuration = notCloseOffset,
-                         .readyDuration = 0,
-                         .earliestVsync = 2000});
+    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch.schedule(cb1,
+                       {.workDuration = notCloseOffset, .readyDuration = 0, .earliestVsync = 2000});
     advanceToNextCallback();
     ASSERT_THAT(cb1.mCalls.size(), Eq(2));
     EXPECT_THAT(cb1.mCalls[1], Eq(2000));
@@ -582,32 +570,32 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
-    EXPECT_EQ(mDispatch->cancel(cb0), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch.cancel(cb0), CancelResult::Cancelled);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, setAlarmCallsAtCorrectTimeWithChangingVsync) {
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
             .Times(3)
             .WillOnce(Return(950))
             .WillOnce(Return(1975))
             .WillOnce(Return(2950));
 
     CountingCallback cb(mDispatch);
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 920});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 920});
 
     mMockClock.advanceBy(850);
     EXPECT_THAT(cb.mCalls.size(), Eq(1));
 
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1900});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1900});
     mMockClock.advanceBy(900);
     EXPECT_THAT(cb.mCalls.size(), Eq(1));
     mMockClock.advanceBy(125);
     EXPECT_THAT(cb.mCalls.size(), Eq(2));
 
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2900});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2900});
     mMockClock.advanceBy(975);
     EXPECT_THAT(cb.mCalls.size(), Eq(3));
 }
@@ -618,48 +606,48 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
 
     VSyncDispatch::CallbackToken tmp;
-    tmp = mDispatch->registerCallback(
+    tmp = mDispatch.registerCallback(
             [&](auto, auto, auto) {
-                mDispatch->schedule(tmp,
-                                    {.workDuration = 100,
-                                     .readyDuration = 0,
-                                     .earliestVsync = 2000});
+                mDispatch.schedule(tmp,
+                                   {.workDuration = 100,
+                                    .readyDuration = 0,
+                                    .earliestVsync = 2000});
             },
             "o.o");
 
-    mDispatch->schedule(tmp, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(tmp, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, callbackReentrantWithPastWakeup) {
     VSyncDispatch::CallbackToken tmp;
     std::optional<nsecs_t> lastTarget;
-    tmp = mDispatch->registerCallback(
+    tmp = mDispatch.registerCallback(
             [&](auto timestamp, auto, auto) {
                 auto result =
-                        mDispatch->schedule(tmp,
+                        mDispatch.schedule(tmp,
+                                           {.workDuration = 400,
+                                            .readyDuration = 0,
+                                            .earliestVsync = timestamp - mVsyncMoveThreshold});
+                EXPECT_TRUE(result.has_value());
+                EXPECT_EQ(mPeriod + timestamp - 400, *result);
+                result = mDispatch.schedule(tmp,
                                             {.workDuration = 400,
                                              .readyDuration = 0,
-                                             .earliestVsync = timestamp - mVsyncMoveThreshold});
+                                             .earliestVsync = timestamp});
                 EXPECT_TRUE(result.has_value());
                 EXPECT_EQ(mPeriod + timestamp - 400, *result);
-                result = mDispatch->schedule(tmp,
-                                             {.workDuration = 400,
-                                              .readyDuration = 0,
-                                              .earliestVsync = timestamp});
-                EXPECT_TRUE(result.has_value());
-                EXPECT_EQ(mPeriod + timestamp - 400, *result);
-                result = mDispatch->schedule(tmp,
-                                             {.workDuration = 400,
-                                              .readyDuration = 0,
-                                              .earliestVsync = timestamp + mVsyncMoveThreshold});
+                result = mDispatch.schedule(tmp,
+                                            {.workDuration = 400,
+                                             .readyDuration = 0,
+                                             .earliestVsync = timestamp + mVsyncMoveThreshold});
                 EXPECT_TRUE(result.has_value());
                 EXPECT_EQ(mPeriod + timestamp - 400, *result);
                 lastTarget = timestamp;
             },
             "oo");
 
-    mDispatch->schedule(tmp, {.workDuration = 999, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(tmp, {.workDuration = 999, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
     EXPECT_THAT(lastTarget, Eq(1000));
 
@@ -675,16 +663,16 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
 
     CountingCallback cb(mDispatch);
-    mDispatch->schedule(cb, {.workDuration = 0, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb, {.workDuration = 0, .readyDuration = 0, .earliestVsync = 1000});
 
     mMockClock.advanceBy(750);
-    mDispatch->schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
-    mDispatch->schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 2000});
 
     mMockClock.advanceBy(800);
-    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, lateModifications) {
@@ -697,12 +685,12 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
-    mDispatch->schedule(cb0, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 2000});
-    mDispatch->schedule(cb1, {.workDuration = 150, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb0, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch.schedule(cb1, {.workDuration = 150, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
     advanceToNextCallback();
@@ -714,8 +702,8 @@
 
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
-    mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 20000});
+    mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 20000});
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, setsTimerAfterCancellation) {
@@ -725,30 +713,29 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 900)).InSequence(seq);
 
     CountingCallback cb0(mDispatch);
-    mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->cancel(cb0);
-    mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.cancel(cb0);
+    mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, makingUpIdsError) {
     VSyncDispatch::CallbackToken token(100);
-    EXPECT_FALSE(
-            mDispatch
-                    ->schedule(token,
-                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000})
-                    .has_value());
-    EXPECT_THAT(mDispatch->cancel(token), Eq(CancelResult::Error));
+    EXPECT_FALSE(mDispatch
+                         .schedule(token,
+                                   {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000})
+                         .has_value());
+    EXPECT_THAT(mDispatch.cancel(token), Eq(CancelResult::Error));
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, canMoveCallbackBackwardsInTime) {
     CountingCallback cb0(mDispatch);
     auto result =
-            mDispatch->schedule(cb0,
-                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb0,
+                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
-    result = mDispatch->schedule(cb0,
-                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch.schedule(cb0,
+                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 }
@@ -758,14 +745,14 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 500));
     CountingCallback cb(mDispatch);
     auto result =
-            mDispatch->schedule(cb,
-                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb,
+                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     mMockClock.advanceBy(400);
 
-    result = mDispatch->schedule(cb,
-                                 {.workDuration = 800, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch.schedule(cb,
+                                {.workDuration = 800, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1200, *result);
     advanceToNextCallback();
@@ -773,19 +760,19 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, targetOffsetMovingBackALittleCanStillSchedule) {
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
             .Times(2)
             .WillOnce(Return(1000))
             .WillOnce(Return(1002));
     CountingCallback cb(mDispatch);
     auto result =
-            mDispatch->schedule(cb,
-                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb,
+                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     mMockClock.advanceBy(400);
-    result = mDispatch->schedule(cb,
-                                 {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch.schedule(cb,
+                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(602, *result);
 }
@@ -793,13 +780,13 @@
 TEST_F(VSyncDispatchTimerQueueTest, canScheduleNegativeOffsetAgainstDifferentPeriods) {
     CountingCallback cb0(mDispatch);
     auto result =
-            mDispatch->schedule(cb0,
-                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb0,
+                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     advanceToNextCallback();
-    result = mDispatch->schedule(cb0,
-                                 {.workDuration = 1100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch.schedule(cb0,
+                                {.workDuration = 1100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 }
@@ -810,13 +797,13 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 1100)).InSequence(seq);
     CountingCallback cb0(mDispatch);
     auto result =
-            mDispatch->schedule(cb0,
-                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb0,
+                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     advanceToNextCallback();
-    result = mDispatch->schedule(cb0,
-                                 {.workDuration = 1900, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch.schedule(cb0,
+                                {.workDuration = 1900, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1100, *result);
 }
@@ -826,13 +813,13 @@
 
     CountingCallback cb(mDispatch);
     auto result =
-            mDispatch->schedule(cb,
-                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb,
+                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
-    result = mDispatch->schedule(cb,
-                                 {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch.schedule(cb,
+                                {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
@@ -878,16 +865,16 @@
     CountingCallback cb2(mDispatch);
 
     auto result =
-            mDispatch->schedule(cb1,
-                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb1,
+                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    result = mDispatch->schedule(cb2,
-                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch.schedule(cb2,
+                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1900, *result);
     mMockClock.advanceBy(80);
@@ -906,16 +893,16 @@
     CountingCallback cb(mDispatch);
 
     auto result =
-            mDispatch->schedule(cb,
-                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb,
+                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    result = mDispatch->schedule(cb,
-                                 {.workDuration = 370, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch.schedule(cb,
+                                {.workDuration = 370, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1630, *result);
     mMockClock.advanceBy(80);
@@ -932,19 +919,19 @@
     CountingCallback cb2(mDispatch);
 
     auto result =
-            mDispatch->schedule(cb1,
-                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb1,
+                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
-    result = mDispatch->schedule(cb2,
-                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch.schedule(cb2,
+                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1900, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    EXPECT_EQ(mDispatch->cancel(cb2), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch.cancel(cb2), CancelResult::Cancelled);
 
     mMockClock.advanceBy(80);
 
@@ -961,19 +948,19 @@
     CountingCallback cb2(mDispatch);
 
     auto result =
-            mDispatch->schedule(cb1,
-                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb1,
+                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
-    result = mDispatch->schedule(cb2,
-                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch.schedule(cb2,
+                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1900, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    EXPECT_EQ(mDispatch->cancel(cb1), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch.cancel(cb1), CancelResult::Cancelled);
 
     EXPECT_THAT(cb1.mCalls.size(), Eq(0));
     EXPECT_THAT(cb2.mCalls.size(), Eq(0));
@@ -988,21 +975,21 @@
     CountingCallback cb2(mDispatch);
 
     Sequence seq;
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
             .InSequence(seq)
             .WillOnce(Return(1000));
     EXPECT_CALL(mMockClock, alarmAt(_, 600)).InSequence(seq);
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
             .InSequence(seq)
             .WillOnce(Return(1000));
 
     auto result =
-            mDispatch->schedule(cb1,
-                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch.schedule(cb1,
+                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
-    result = mDispatch->schedule(cb2,
-                                 {.workDuration = 390, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch.schedule(cb2,
+                                {.workDuration = 390, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(610, *result);
 
@@ -1024,10 +1011,10 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 900));
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch->schedule(cb,
-                                            {.workDuration = 70,
-                                             .readyDuration = 30,
-                                             .earliestVsync = intended});
+    const auto result = mDispatch.schedule(cb,
+                                           {.workDuration = 70,
+                                            .readyDuration = 30,
+                                            .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
     advanceToNextCallback();
@@ -1046,8 +1033,8 @@
 
     CountingCallback cb(mDispatch);
 
-    mDispatch->schedule(cb, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch->schedule(cb, {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch.schedule(cb, {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
 
@@ -1065,8 +1052,7 @@
 protected:
     nsecs_t const mPeriod = 1000;
     nsecs_t const mVsyncMoveThreshold = 200;
-    std::shared_ptr<NiceMock<MockVSyncTracker>> mStubTracker =
-            std::make_shared<NiceMock<MockVSyncTracker>>(mPeriod);
+    NiceMock<MockVSyncTracker> mStubTracker{mPeriod};
 };
 
 TEST_F(VSyncDispatchTimerQueueEntryTest, stateAfterInitialization) {
@@ -1084,7 +1070,7 @@
 
     EXPECT_FALSE(entry.wakeupTime());
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1098,7 +1084,7 @@
     auto const duration = 500;
     auto const now = 8750;
 
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(now + duration))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(now + duration))
             .Times(1)
             .WillOnce(Return(10000));
     VSyncDispatchTimerQueueEntry entry(
@@ -1106,7 +1092,7 @@
 
     EXPECT_FALSE(entry.wakeupTime());
     EXPECT_TRUE(entry.schedule({.workDuration = 500, .readyDuration = 0, .earliestVsync = 994},
-                               *mStubTracker.get(), now)
+                               mStubTracker, now)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1129,7 +1115,7 @@
             mVsyncMoveThreshold);
 
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1151,7 +1137,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueEntryTest, updateCallback) {
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
             .Times(2)
             .WillOnce(Return(1000))
             .WillOnce(Return(1020));
@@ -1160,17 +1146,17 @@
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
 
     EXPECT_FALSE(entry.wakeupTime());
-    entry.update(*mStubTracker.get(), 0);
+    entry.update(mStubTracker, 0);
     EXPECT_FALSE(entry.wakeupTime());
 
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     auto wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
     EXPECT_THAT(wakeup, Eq(900));
 
-    entry.update(*mStubTracker.get(), 0);
+    entry.update(mStubTracker, 0);
     wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
     EXPECT_THAT(*wakeup, Eq(920));
@@ -1180,9 +1166,9 @@
     VSyncDispatchTimerQueueEntry entry(
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
-    entry.update(*mStubTracker.get(), 0);
+    entry.update(mStubTracker, 0);
 
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1193,24 +1179,24 @@
     VSyncDispatchTimerQueueEntry entry(
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     entry.executing(); // 1000 is executing
     // had 1000 not been executing, this could have been scheduled for time 800.
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
     EXPECT_THAT(*entry.readyTime(), Eq(2000));
 
     EXPECT_TRUE(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     EXPECT_THAT(*entry.wakeupTime(), Eq(1950));
     EXPECT_THAT(*entry.readyTime(), Eq(2000));
 
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 1001},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
     EXPECT_THAT(*entry.readyTime(), Eq(2000));
@@ -1222,24 +1208,24 @@
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
 
     Sequence seq;
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(500))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(500))
             .InSequence(seq)
             .WillOnce(Return(1000));
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(500))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(500))
             .InSequence(seq)
             .WillOnce(Return(1000));
-    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000 + mVsyncMoveThreshold))
+    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000 + mVsyncMoveThreshold))
             .InSequence(seq)
             .WillOnce(Return(2000));
 
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
 
     entry.executing(); // 1000 is executing
 
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
 }
 
@@ -1247,16 +1233,16 @@
     VSyncDispatchTimerQueueEntry entry(
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     EXPECT_TRUE(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     EXPECT_TRUE(entry.schedule({.workDuration = 1200, .readyDuration = 0, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
 }
 
@@ -1269,7 +1255,7 @@
     entry.addPendingWorkloadUpdate(
             {.workDuration = effectualOffset, .readyDuration = 0, .earliestVsync = 400});
     EXPECT_TRUE(entry.hasPendingWorkloadUpdate());
-    entry.update(*mStubTracker.get(), 0);
+    entry.update(mStubTracker, 0);
     EXPECT_FALSE(entry.hasPendingWorkloadUpdate());
     EXPECT_THAT(*entry.wakeupTime(), Eq(mPeriod - effectualOffset));
 }
@@ -1290,7 +1276,7 @@
             mVsyncMoveThreshold);
 
     EXPECT_TRUE(entry.schedule({.workDuration = 70, .readyDuration = 30, .earliestVsync = 500},
-                               *mStubTracker.get(), 0)
+                               mStubTracker, 0)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
index 7947a5e..3095e8a 100644
--- a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -55,7 +55,7 @@
     static constexpr size_t kOutlierTolerancePercent = 25;
     static constexpr nsecs_t mMaxRoundingError = 100;
 
-    VSyncPredictor tracker{"tracker", mPeriod, kHistorySize, kMinimumSamplesForPrediction,
+    VSyncPredictor tracker{mPeriod, kHistorySize, kMinimumSamplesForPrediction,
                            kOutlierTolerancePercent};
 };
 
@@ -376,8 +376,7 @@
 
 // See b/151146131
 TEST_F(VSyncPredictorTest, hasEnoughPrecision) {
-    VSyncPredictor tracker{"tracker", mPeriod, 20, kMinimumSamplesForPrediction,
-                           kOutlierTolerancePercent};
+    VSyncPredictor tracker{mPeriod, 20, kMinimumSamplesForPrediction, kOutlierTolerancePercent};
     std::vector<nsecs_t> const simulatedVsyncs{840873348817, 840890049444, 840906762675,
                                                840923581635, 840940161584, 840956868096,
                                                840973702473, 840990256277, 841007116851,
diff --git a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
index a2de136..1fb2709 100644
--- a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
@@ -96,8 +96,8 @@
     VSyncReactorTest()
           : mMockTracker(std::make_shared<NiceMock<MockVSyncTracker>>()),
             mMockClock(std::make_shared<NiceMock<MockClock>>()),
-            mReactor("reactor", std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
-                     kPendingLimit, false /* supportKernelIdleTimer */) {
+            mReactor(std::make_unique<ClockWrapper>(mMockClock), *mMockTracker, kPendingLimit,
+                     false /* supportKernelIdleTimer */) {
         ON_CALL(*mMockClock, now()).WillByDefault(Return(mFakeNow));
         ON_CALL(*mMockTracker, currentPeriod()).WillByDefault(Return(period));
     }
@@ -192,7 +192,7 @@
     mReactor.setIgnorePresentFences(true);
 
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(0, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -205,7 +205,7 @@
 TEST_F(VSyncReactorTest, setPeriodCalledOnceConfirmedChange) {
     nsecs_t const newPeriod = 5000;
     EXPECT_CALL(*mMockTracker, setPeriod(_)).Times(0);
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(10000, std::nullopt, &periodFlushed));
@@ -224,7 +224,7 @@
 TEST_F(VSyncReactorTest, changingPeriodBackAbortsConfirmationProcess) {
     nsecs_t sampleTime = 0;
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -232,7 +232,7 @@
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
 
-    mReactor.startPeriodTransition(period, false);
+    mReactor.startPeriodTransition(period);
     EXPECT_FALSE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
 }
@@ -242,13 +242,13 @@
     nsecs_t const secondPeriod = 5000;
     nsecs_t const thirdPeriod = 2000;
 
-    mReactor.startPeriodTransition(secondPeriod, false);
+    mReactor.startPeriodTransition(secondPeriod);
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
-    mReactor.startPeriodTransition(thirdPeriod, false);
+    mReactor.startPeriodTransition(thirdPeriod);
     EXPECT_TRUE(
             mReactor.addHwVsyncTimestamp(sampleTime += secondPeriod, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -289,14 +289,14 @@
 
 TEST_F(VSyncReactorTest, presentFenceAdditionDoesNotInterruptConfirmationProcess) {
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
     EXPECT_TRUE(mReactor.addPresentFence(generateSignalledFenceWithTime(0)));
 }
 
 TEST_F(VSyncReactorTest, setPeriodCalledFirstTwoEventsNewPeriod) {
     nsecs_t const newPeriod = 5000;
     EXPECT_CALL(*mMockTracker, setPeriod(_)).Times(0);
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(5000, std::nullopt, &periodFlushed));
@@ -321,7 +321,7 @@
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
 
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     auto time = 0;
     auto constexpr numTimestampSubmissions = 10;
@@ -346,7 +346,7 @@
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
 
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     auto time = 0;
     // If the power mode is not DOZE or DOZE_SUSPEND, it is still collecting timestamps.
@@ -363,7 +363,7 @@
     auto time = 0;
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     time += period;
     mReactor.addHwVsyncTimestamp(time, std::nullopt, &periodFlushed);
@@ -379,7 +379,7 @@
     auto time = 0;
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     static auto constexpr numSamplesWithNewPeriod = 4;
     Sequence seq;
@@ -406,7 +406,7 @@
     auto time = 0;
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     Sequence seq;
     EXPECT_CALL(*mMockTracker, needsMoreSamples())
@@ -426,7 +426,7 @@
     nsecs_t const newPeriod1 = 4000;
     nsecs_t const newPeriod2 = 7000;
 
-    mReactor.startPeriodTransition(newPeriod1, false);
+    mReactor.startPeriodTransition(newPeriod1);
 
     Sequence seq;
     EXPECT_CALL(*mMockTracker, needsMoreSamples())
@@ -445,7 +445,7 @@
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
 
-    mReactor.startPeriodTransition(newPeriod2, false);
+    mReactor.startPeriodTransition(newPeriod2);
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod2, std::nullopt, &periodFlushed));
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod2, std::nullopt, &periodFlushed));
@@ -458,7 +458,7 @@
     mReactor.setIgnorePresentFences(true);
 
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod, false);
+    mReactor.startPeriodTransition(newPeriod);
 
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(0, 0, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -472,9 +472,8 @@
 
 TEST_F(VSyncReactorTest, periodIsMeasuredIfIgnoringComposer) {
     // Create a reactor which supports the kernel idle timer
-    auto idleReactor =
-            VSyncReactor("reactor", std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
-                         kPendingLimit, true /* supportKernelIdleTimer */);
+    auto idleReactor = VSyncReactor(std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
+                                    kPendingLimit, true /* supportKernelIdleTimer */);
 
     bool periodFlushed = true;
     EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(4);
@@ -482,7 +481,7 @@
 
     // First, set the same period, which should only be confirmed when we receive two
     // matching callbacks
-    idleReactor.startPeriodTransition(10000, false);
+    idleReactor.startPeriodTransition(10000);
     EXPECT_TRUE(idleReactor.addHwVsyncTimestamp(0, 0, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
     // Correct period but incorrect timestamp delta
@@ -495,7 +494,7 @@
     // Then, set a new period, which should be confirmed as soon as we receive a callback
     // reporting the new period
     nsecs_t const newPeriod = 5000;
-    idleReactor.startPeriodTransition(newPeriod, false);
+    idleReactor.startPeriodTransition(newPeriod);
     // Incorrect timestamp delta and period
     EXPECT_TRUE(idleReactor.addHwVsyncTimestamp(20000, 10000, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
diff --git a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
index 3a6068a..f8567bd 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
@@ -29,29 +29,27 @@
     EventThread();
     ~EventThread() override;
 
-    MOCK_METHOD(sp<EventThreadConnection>, createEventConnection,
-                (ResyncCallback, EventRegistrationFlags), (const, override));
-    MOCK_METHOD(void, onScreenReleased, (), (override));
-    MOCK_METHOD(void, onScreenAcquired, (), (override));
-    MOCK_METHOD(void, onHotplugReceived, (PhysicalDisplayId, bool), (override));
-    MOCK_METHOD(void, onModeChanged, (const scheduler::FrameRateMode&), (override));
-    MOCK_METHOD(void, onFrameRateOverridesChanged,
-                (PhysicalDisplayId, std::vector<FrameRateOverride>), (override));
-    MOCK_METHOD(void, dump, (std::string&), (const, override));
-    MOCK_METHOD(void, setDuration,
-                (std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration),
-                (override));
-    MOCK_METHOD(status_t, registerDisplayEventConnection,
-                (const sp<android::EventThreadConnection>&), (override));
-    MOCK_METHOD(void, setVsyncRate, (uint32_t, const sp<android::EventThreadConnection>&),
-                (override));
-    MOCK_METHOD(void, requestNextVsync, (const sp<android::EventThreadConnection>&), (override));
+    MOCK_CONST_METHOD2(createEventConnection,
+                       sp<EventThreadConnection>(ResyncCallback, EventRegistrationFlags));
+    MOCK_METHOD0(onScreenReleased, void());
+    MOCK_METHOD0(onScreenAcquired, void());
+    MOCK_METHOD2(onHotplugReceived, void(PhysicalDisplayId, bool));
+    MOCK_METHOD1(onModeChanged, void(const scheduler::FrameRateMode &));
+    MOCK_METHOD2(onFrameRateOverridesChanged,
+                 void(PhysicalDisplayId, std::vector<FrameRateOverride>));
+    MOCK_CONST_METHOD1(dump, void(std::string&));
+    MOCK_METHOD2(setDuration,
+                 void(std::chrono::nanoseconds workDuration,
+                      std::chrono::nanoseconds readyDuration));
+    MOCK_METHOD1(registerDisplayEventConnection,
+                 status_t(const sp<android::EventThreadConnection> &));
+    MOCK_METHOD2(setVsyncRate, void(uint32_t, const sp<android::EventThreadConnection> &));
+    MOCK_METHOD1(requestNextVsync, void(const sp<android::EventThreadConnection> &));
     MOCK_METHOD(VsyncEventData, getLatestVsyncEventData,
-                (const sp<android::EventThreadConnection>&), (const, override));
-    MOCK_METHOD(void, requestLatestConfig, (const sp<android::EventThreadConnection>&));
-    MOCK_METHOD(void, pauseVsyncCallback, (bool));
-    MOCK_METHOD(size_t, getEventThreadConnectionCount, (), (override));
-    MOCK_METHOD(void, onNewVsyncSchedule, (std::shared_ptr<scheduler::VsyncSchedule>), (override));
+                (const sp<android::EventThreadConnection> &), (const));
+    MOCK_METHOD1(requestLatestConfig, void(const sp<android::EventThreadConnection> &));
+    MOCK_METHOD1(pauseVsyncCallback, void(bool));
+    MOCK_METHOD0(getEventThreadConnectionCount, size_t());
 };
 
 } // namespace android::mock
diff --git a/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h b/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
index a8eca21..7d4b159 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
@@ -18,19 +18,19 @@
 
 #include <gmock/gmock.h>
 
-#include "Scheduler/ISchedulerCallback.h"
+#include "Scheduler/Scheduler.h"
 
 namespace android::scheduler::mock {
 
 struct SchedulerCallback final : ISchedulerCallback {
-    MOCK_METHOD(void, setVsyncEnabled, (PhysicalDisplayId, bool), (override));
+    MOCK_METHOD(void, setVsyncEnabled, (bool), (override));
     MOCK_METHOD(void, requestDisplayModes, (std::vector<display::DisplayModeRequest>), (override));
     MOCK_METHOD(void, kernelTimerChanged, (bool), (override));
     MOCK_METHOD(void, triggerOnFrameRateOverridesChanged, (), (override));
 };
 
 struct NoOpSchedulerCallback final : ISchedulerCallback {
-    void setVsyncEnabled(PhysicalDisplayId, bool) override {}
+    void setVsyncEnabled(bool) override {}
     void requestDisplayModes(std::vector<display::DisplayModeRequest>) override {}
     void kernelTimerChanged(bool) override {}
     void triggerOnFrameRateOverridesChanged() override {}
diff --git a/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h b/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
index 69ec60a..4ef91da 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
@@ -28,12 +28,12 @@
     ~VsyncController() override;
 
     MOCK_METHOD(bool, addPresentFence, (std::shared_ptr<FenceTime>), (override));
-    MOCK_METHOD(bool, addHwVsyncTimestamp, (nsecs_t, std::optional<nsecs_t>, bool*), (override));
-    MOCK_METHOD(void, startPeriodTransition, (nsecs_t, bool), (override));
-    MOCK_METHOD(void, setIgnorePresentFences, (bool), (override));
+    MOCK_METHOD3(addHwVsyncTimestamp, bool(nsecs_t, std::optional<nsecs_t>, bool*));
+    MOCK_METHOD1(startPeriodTransition, void(nsecs_t));
+    MOCK_METHOD1(setIgnorePresentFences, void(bool));
     MOCK_METHOD(void, setDisplayPowerMode, (hal::PowerMode), (override));
 
-    MOCK_METHOD(void, dump, (std::string&), (const, override));
+    MOCK_CONST_METHOD1(dump, void(std::string&));
 };
 
 } // namespace android::mock
diff --git a/vulkan/vkjson/Android.bp b/vulkan/vkjson/Android.bp
index b6d3a0b..b544245 100644
--- a/vulkan/vkjson/Android.bp
+++ b/vulkan/vkjson/Android.bp
@@ -25,10 +25,8 @@
         ".",
     ],
     shared_libs: [
-        "libvulkan",
-    ],
-    whole_static_libs: [
         "libjsoncpp",
+        "libvulkan",
     ],
     export_shared_lib_headers: [
         "libvulkan",