Merge "jpegrecoverymap: Update XMP to match spec."
diff --git a/cmds/dumpstate/DumpstateService.cpp b/cmds/dumpstate/DumpstateService.cpp
index 42e9e0f..a7bc018 100644
--- a/cmds/dumpstate/DumpstateService.cpp
+++ b/cmds/dumpstate/DumpstateService.cpp
@@ -58,6 +58,13 @@
     exit(0);
 }
 
+[[noreturn]] static void* dumpstate_thread_retrieve(void* data) {
+    std::unique_ptr<DumpstateInfo> ds_info(static_cast<DumpstateInfo*>(data));
+    ds_info->ds->Retrieve(ds_info->calling_uid, ds_info->calling_package);
+    MYLOGD("Finished retrieving a bugreport. Exiting.\n");
+    exit(0);
+}
+
 [[noreturn]] static void signalErrorAndExit(sp<IDumpstateListener> listener, int error_code) {
     listener->onError(error_code);
     exit(0);
@@ -192,6 +199,41 @@
     return binder::Status::ok();
 }
 
+binder::Status DumpstateService::retrieveBugreport(
+    int32_t calling_uid, const std::string& calling_package,
+    android::base::unique_fd bugreport_fd,
+    const std::string& bugreport_file,
+    const sp<IDumpstateListener>& listener) {
+
+    ds_ = &(Dumpstate::GetInstance());
+    DumpstateInfo* ds_info = new DumpstateInfo();
+    ds_info->ds = ds_;
+    ds_info->calling_uid = calling_uid;
+    ds_info->calling_package = calling_package;
+    ds_->listener_ = listener;
+    std::unique_ptr<Dumpstate::DumpOptions> options = std::make_unique<Dumpstate::DumpOptions>();
+    // Use a /dev/null FD when initializing options since none is provided.
+    android::base::unique_fd devnull_fd(
+        TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
+
+    options->Initialize(Dumpstate::BugreportMode::BUGREPORT_DEFAULT,
+                        0, bugreport_fd, devnull_fd, false);
+
+    if (bugreport_fd.get() == -1) {
+        MYLOGE("Invalid filedescriptor");
+        signalErrorAndExit(listener, IDumpstateListener::BUGREPORT_ERROR_INVALID_INPUT);
+    }
+    ds_->SetOptions(std::move(options));
+    ds_->path_ = bugreport_file;
+    pthread_t thread;
+    status_t err = pthread_create(&thread, nullptr, dumpstate_thread_retrieve, ds_info);
+    if (err != 0) {
+        MYLOGE("Could not create a thread");
+        signalErrorAndExit(listener, IDumpstateListener::BUGREPORT_ERROR_RUNTIME_ERROR);
+    }
+    return binder::Status::ok();
+}
+
 status_t DumpstateService::dump(int fd, const Vector<String16>&) {
     std::lock_guard<std::mutex> lock(lock_);
     if (ds_ == nullptr) {
diff --git a/cmds/dumpstate/DumpstateService.h b/cmds/dumpstate/DumpstateService.h
index 997999c..dd73319 100644
--- a/cmds/dumpstate/DumpstateService.h
+++ b/cmds/dumpstate/DumpstateService.h
@@ -46,6 +46,13 @@
                                   int bugreport_flags, const sp<IDumpstateListener>& listener,
                                   bool is_screenshot_requested) override;
 
+    binder::Status retrieveBugreport(int32_t calling_uid,
+                                     const std::string& calling_package,
+                                     android::base::unique_fd bugreport_fd,
+                                     const std::string& bugreport_file,
+                                     const sp<IDumpstateListener>& listener)
+                                     override;
+
     binder::Status cancelBugreport(int32_t calling_uid,
                                    const std::string& calling_package) override;
 
diff --git a/cmds/dumpstate/binder/android/os/IDumpstate.aidl b/cmds/dumpstate/binder/android/os/IDumpstate.aidl
index d4323af..0dc8f5a 100644
--- a/cmds/dumpstate/binder/android/os/IDumpstate.aidl
+++ b/cmds/dumpstate/binder/android/os/IDumpstate.aidl
@@ -50,7 +50,10 @@
     const int BUGREPORT_MODE_DEFAULT = 6;
 
     // Use pre-dumped data.
-    const int BUGREPORT_FLAG_USE_PREDUMPED_UI_DATA = 1;
+    const int BUGREPORT_FLAG_USE_PREDUMPED_UI_DATA = 0x1;
+
+    // Defer user consent.
+    const int BUGREPORT_FLAG_DEFER_CONSENT = 0x2;
 
     /**
      * Speculatively pre-dumps UI data for a bugreport request that might come later.
@@ -100,4 +103,22 @@
      * @param callingPackage package of the original application that requested the cancellation.
      */
     void cancelBugreport(int callingUid, @utf8InCpp String callingPackage);
+
+    /**
+     * Retrieves a previously generated bugreport.
+     *
+     * <p>The caller must have previously generated a bugreport using
+     * {@link #startBugreport} with the {@link BUGREPORT_FLAG_DEFER_CONSENT}
+     * flag set.
+     *
+     * @param callingUid UID of the original application that requested the report.
+     * @param callingPackage package of the original application that requested the report.
+     * @param bugreportFd the file to which the zipped bugreport should be written
+     * @param bugreportFile the path of the bugreport file
+     * @param listener callback for updates; optional
+     */
+    void retrieveBugreport(int callingUid, @utf8InCpp String callingPackage,
+                           FileDescriptor bugreportFd,
+                           @utf8InCpp String bugreportFile,
+                           IDumpstateListener listener);
 }
diff --git a/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl b/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl
index 50c1624..e8891d3 100644
--- a/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl
+++ b/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl
@@ -50,6 +50,9 @@
     /* There is currently a bugreport running. The caller should try again later. */
     const int BUGREPORT_ERROR_ANOTHER_REPORT_IN_PROGRESS = 5;
 
+    /* There is no bugreport to retrieve for the given caller. */
+    const int BUGREPORT_ERROR_NO_BUGREPORT_TO_RETRIEVE = 6;
+
     /**
      * Called on an error condition with one of the error codes listed above.
      */
@@ -57,8 +60,10 @@
 
     /**
      * Called when taking bugreport finishes successfully.
+     *
+     * @param bugreportFile The location of the bugreport file
      */
-    oneway void onFinished();
+    oneway void onFinished(@utf8InCpp String bugreportFile);
 
     /**
      * Called when screenshot is taken.
diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp
index d77b458..ecafcfc 100644
--- a/cmds/dumpstate/dumpstate.cpp
+++ b/cmds/dumpstate/dumpstate.cpp
@@ -2825,6 +2825,7 @@
                                         const android::base::unique_fd& screenshot_fd_in,
                                         bool is_screenshot_requested) {
     this->use_predumped_ui_data = bugreport_flags & BugreportFlag::BUGREPORT_USE_PREDUMPED_UI_DATA;
+    this->is_consent_deferred = bugreport_flags & BugreportFlag::BUGREPORT_FLAG_DEFER_CONSENT;
     // Duplicate the fds because the passed in fds don't outlive the binder transaction.
     bugreport_fd.reset(fcntl(bugreport_fd_in.get(), F_DUPFD_CLOEXEC, 0));
     screenshot_fd.reset(fcntl(screenshot_fd_in.get(), F_DUPFD_CLOEXEC, 0));
@@ -2907,10 +2908,64 @@
 
 Dumpstate::RunStatus Dumpstate::Run(int32_t calling_uid, const std::string& calling_package) {
     Dumpstate::RunStatus status = RunInternal(calling_uid, calling_package);
-    if (listener_ != nullptr) {
+    HandleRunStatus(status);
+    return status;
+}
+
+Dumpstate::RunStatus Dumpstate::Retrieve(int32_t calling_uid, const std::string& calling_package) {
+    Dumpstate::RunStatus status = RetrieveInternal(calling_uid, calling_package);
+    HandleRunStatus(status);
+    return status;
+}
+
+Dumpstate::RunStatus  Dumpstate::RetrieveInternal(int32_t calling_uid,
+                                                  const std::string& calling_package) {
+  consent_callback_ = new ConsentCallback();
+  const String16 incidentcompanion("incidentcompanion");
+  sp<android::IBinder> ics(
+      defaultServiceManager()->checkService(incidentcompanion));
+  android::String16 package(calling_package.c_str());
+  if (ics != nullptr) {
+    MYLOGD("Checking user consent via incidentcompanion service\n");
+    android::interface_cast<android::os::IIncidentCompanion>(ics)->authorizeReport(
+        calling_uid, package, String16(), String16(),
+        0x1 /* FLAG_CONFIRMATION_DIALOG */, consent_callback_.get());
+  } else {
+    MYLOGD(
+        "Unable to check user consent; incidentcompanion service unavailable\n");
+    return RunStatus::USER_CONSENT_TIMED_OUT;
+  }
+  UserConsentResult consent_result = consent_callback_->getResult();
+  int timeout_ms = 30 * 1000;
+  while (consent_result == UserConsentResult::UNAVAILABLE &&
+      consent_callback_->getElapsedTimeMs() < timeout_ms) {
+    sleep(1);
+    consent_result = consent_callback_->getResult();
+  }
+  if (consent_result == UserConsentResult::DENIED) {
+    return RunStatus::USER_CONSENT_DENIED;
+  }
+  if (consent_result == UserConsentResult::UNAVAILABLE) {
+    MYLOGD("Canceling user consent request via incidentcompanion service\n");
+    android::interface_cast<android::os::IIncidentCompanion>(ics)->cancelAuthorization(
+        consent_callback_.get());
+    return RunStatus::USER_CONSENT_TIMED_OUT;
+  }
+
+  bool copy_succeeded =
+      android::os::CopyFileToFd(path_, options_->bugreport_fd.get());
+  if (copy_succeeded) {
+    android::os::UnlinkAndLogOnError(path_);
+  }
+  return copy_succeeded ? Dumpstate::RunStatus::OK
+                        : Dumpstate::RunStatus::ERROR;
+}
+
+void Dumpstate::HandleRunStatus(Dumpstate::RunStatus status) {
+      if (listener_ != nullptr) {
         switch (status) {
             case Dumpstate::RunStatus::OK:
-                listener_->onFinished();
+                listener_->onFinished(path_.c_str());
                 break;
             case Dumpstate::RunStatus::HELP:
                 break;
@@ -2928,9 +2983,7 @@
                 break;
         }
     }
-    return status;
 }
-
 void Dumpstate::Cancel() {
     CleanupTmpFiles();
     android::os::UnlinkAndLogOnError(log_path_);
@@ -3181,7 +3234,7 @@
 
     // Share the final file with the caller if the user has consented or Shell is the caller.
     Dumpstate::RunStatus status = Dumpstate::RunStatus::OK;
-    if (CalledByApi()) {
+    if (CalledByApi() && !options_->is_consent_deferred) {
         status = CopyBugreportIfUserConsented(calling_uid);
         if (status != Dumpstate::RunStatus::OK &&
             status != Dumpstate::RunStatus::USER_CONSENT_TIMED_OUT) {
@@ -3270,6 +3323,15 @@
         return;
     }
 
+    // Include the proto logging from WMShell.
+    RunCommand(
+        // Empty name because it's not intended to be classified as a bugreport section.
+        // Actual logging files can be found as "/data/misc/wmtrace/shell_log.winscope"
+        // in the bugreport.
+        "", {"dumpsys", "activity", "service", "SystemUIService",
+             "WMShell", "protolog", "save-for-bugreport"},
+        CommandOptions::WithTimeout(10).Always().DropRoot().RedirectStderr().Build());
+
     // Currently WindowManagerService and InputMethodManagerSerivice support WinScope protocol.
     for (const auto& service : {"input_method", "window"}) {
         RunCommand(
@@ -3326,9 +3388,11 @@
 }
 
 void Dumpstate::MaybeCheckUserConsent(int32_t calling_uid, const std::string& calling_package) {
-    if (multiuser_get_app_id(calling_uid) == AID_SHELL || !CalledByApi()) {
-        // No need to get consent for shell triggered dumpstates, or not through
-        // bugreporting API (i.e. no fd to copy back).
+    if (multiuser_get_app_id(calling_uid) == AID_SHELL ||
+        !CalledByApi() || options_->is_consent_deferred) {
+        // No need to get consent for shell triggered dumpstates, or not
+        // through bugreporting API (i.e. no fd to copy back), or when consent
+        // is deferred.
         return;
     }
     consent_callback_ = new ConsentCallback();
diff --git a/cmds/dumpstate/dumpstate.h b/cmds/dumpstate/dumpstate.h
index 9f894b5..8a31c31 100644
--- a/cmds/dumpstate/dumpstate.h
+++ b/cmds/dumpstate/dumpstate.h
@@ -207,7 +207,9 @@
     // The flags used to customize bugreport requests.
     enum BugreportFlag {
         BUGREPORT_USE_PREDUMPED_UI_DATA =
-          android::os::IDumpstate::BUGREPORT_FLAG_USE_PREDUMPED_UI_DATA
+          android::os::IDumpstate::BUGREPORT_FLAG_USE_PREDUMPED_UI_DATA,
+        BUGREPORT_FLAG_DEFER_CONSENT =
+          android::os::IDumpstate::BUGREPORT_FLAG_DEFER_CONSENT
     };
 
     static android::os::dumpstate::CommandOptions DEFAULT_DUMPSYS;
@@ -353,6 +355,15 @@
      */
     RunStatus Run(int32_t calling_uid, const std::string& calling_package);
 
+    /*
+     * Entry point for retrieving a previous-generated bugreport.
+     *
+     * Initialize() dumpstate before calling this method.
+     */
+    RunStatus Retrieve(int32_t calling_uid, const std::string& calling_package);
+
+
+
     RunStatus ParseCommandlineAndRun(int argc, char* argv[]);
 
     /* Deletes in-progress files */
@@ -396,6 +407,7 @@
         bool progress_updates_to_socket = false;
         bool do_screenshot = false;
         bool is_screenshot_copied = false;
+        bool is_consent_deferred = false;
         bool is_remote_mode = false;
         bool show_header_only = false;
         bool telephony_only = false;
@@ -548,6 +560,7 @@
 
   private:
     RunStatus RunInternal(int32_t calling_uid, const std::string& calling_package);
+    RunStatus RetrieveInternal(int32_t calling_uid, const std::string& calling_package);
 
     RunStatus DumpstateDefaultAfterCritical();
     RunStatus dumpstate();
@@ -572,6 +585,8 @@
 
     RunStatus HandleUserConsentDenied();
 
+    void HandleRunStatus(RunStatus status);
+
     // Copies bugreport artifacts over to the caller's directories provided there is user consent or
     // called by Shell.
     RunStatus CopyBugreportIfUserConsented(int32_t calling_uid);
diff --git a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
index b091c8e..ccf64fe 100644
--- a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
+++ b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
@@ -160,7 +160,7 @@
         return binder::Status::ok();
     }
 
-    binder::Status onFinished() override {
+    binder::Status onFinished([[maybe_unused]] const std::string& bugreport_file) override {
         std::lock_guard<std::mutex> lock(lock_);
         is_finished_ = true;
         dprintf(out_fd_, "\rFinished");
diff --git a/cmds/dumpstate/tests/dumpstate_test.cpp b/cmds/dumpstate/tests/dumpstate_test.cpp
index 1ffcafa..87f9254 100644
--- a/cmds/dumpstate/tests/dumpstate_test.cpp
+++ b/cmds/dumpstate/tests/dumpstate_test.cpp
@@ -71,7 +71,7 @@
   public:
     MOCK_METHOD1(onProgress, binder::Status(int32_t progress));
     MOCK_METHOD1(onError, binder::Status(int32_t error_code));
-    MOCK_METHOD0(onFinished, binder::Status());
+    MOCK_METHOD1(onFinished, binder::Status(const std::string& bugreport_file));
     MOCK_METHOD1(onScreenshotTaken, binder::Status(bool success));
     MOCK_METHOD0(onUiIntensiveBugreportDumpsFinished, binder::Status());
 
@@ -486,6 +486,20 @@
     EXPECT_TRUE(options_.ValidateOptions());
 }
 
+TEST_F(DumpOptionsTest, InitializeBugreportFlags) {
+    int flags = Dumpstate::BugreportFlag::BUGREPORT_USE_PREDUMPED_UI_DATA |
+                Dumpstate::BugreportFlag::BUGREPORT_FLAG_DEFER_CONSENT;
+    options_.Initialize(
+      Dumpstate::BugreportMode::BUGREPORT_FULL, flags, fd, fd, true);
+    EXPECT_TRUE(options_.is_consent_deferred);
+    EXPECT_TRUE(options_.use_predumped_ui_data);
+
+    options_.Initialize(
+      Dumpstate::BugreportMode::BUGREPORT_FULL, 0, fd, fd, true);
+    EXPECT_FALSE(options_.is_consent_deferred);
+    EXPECT_FALSE(options_.use_predumped_ui_data);
+}
+
 class DumpstateTest : public DumpstateBaseTest {
   public:
     void SetUp() {
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 695faf8..91bcb8d 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -39,6 +39,11 @@
 
 namespace android {
 
+bool is_multiuser_uid_isolated(uid_t uid) {
+    uid_t appid = multiuser_get_app_id(uid);
+    return appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END;
+}
+
 #ifndef VENDORSERVICEMANAGER
 
 struct ManifestWithDescription {
@@ -273,13 +278,8 @@
     if (auto it = mNameToService.find(name); it != mNameToService.end()) {
         service = &(it->second);
 
-        if (!service->allowIsolated) {
-            uid_t appid = multiuser_get_app_id(ctx.uid);
-            bool isIsolated = appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END;
-
-            if (isIsolated) {
-                return nullptr;
-            }
+        if (!service->allowIsolated && is_multiuser_uid_isolated(ctx.uid)) {
+            return nullptr;
         }
         out = service->binder;
     }
@@ -425,7 +425,17 @@
     auto ctx = mAccess->getCallingContext();
 
     if (!mAccess->canFind(ctx, name)) {
-        return Status::fromExceptionCode(Status::EX_SECURITY);
+        return Status::fromExceptionCode(Status::EX_SECURITY, "SELinux");
+    }
+
+    // note - we could allow isolated apps to get notifications if we
+    // keep track of isolated callbacks and non-isolated callbacks, but
+    // this is done since isolated apps shouldn't access lazy services
+    // so we should be able to use different APIs to keep things simple.
+    // Here, we disallow everything, because the service might not be
+    // registered yet.
+    if (is_multiuser_uid_isolated(ctx.uid)) {
+        return Status::fromExceptionCode(Status::EX_SECURITY, "isolated app");
     }
 
     if (!isValidServiceName(name)) {
diff --git a/cmds/servicemanager/test_sm.cpp b/cmds/servicemanager/test_sm.cpp
index 0fd8d8e..cae32e3 100644
--- a/cmds/servicemanager/test_sm.cpp
+++ b/cmds/servicemanager/test_sm.cpp
@@ -383,6 +383,22 @@
 
     sp<CallbackHistorian> cb = sp<CallbackHistorian>::make();
 
+    EXPECT_EQ(sm->registerForNotifications("foofoo", cb).exceptionCode(), Status::EX_SECURITY);
+}
+
+TEST(GetService, IsolatedCantRegister) {
+    std::unique_ptr<MockAccess> access = std::make_unique<NiceMock<MockAccess>>();
+
+    EXPECT_CALL(*access, getCallingContext())
+            .WillOnce(Return(Access::CallingContext{
+                    .uid = AID_ISOLATED_START,
+            }));
+    EXPECT_CALL(*access, canFind(_, _)).WillOnce(Return(true));
+
+    sp<ServiceManager> sm = sp<ServiceManager>::make(std::move(access));
+
+    sp<CallbackHistorian> cb = sp<CallbackHistorian>::make();
+
     EXPECT_EQ(sm->registerForNotifications("foofoo", cb).exceptionCode(),
         Status::EX_SECURITY);
 }
diff --git a/include/input/Input.h b/include/input/Input.h
index 7573282..608519b 100644
--- a/include/input/Input.h
+++ b/include/input/Input.h
@@ -1113,6 +1113,7 @@
 enum class PointerIconStyle : int32_t {
     TYPE_CUSTOM = -1,
     TYPE_NULL = 0,
+    TYPE_NOT_SPECIFIED = 1,
     TYPE_ARROW = 1000,
     TYPE_CONTEXT_MENU = 1001,
     TYPE_HAND = 1002,
diff --git a/include/private/performance_hint_private.h b/include/private/performance_hint_private.h
index eaf3b5e..d50c5f8 100644
--- a/include/private/performance_hint_private.h
+++ b/include/private/performance_hint_private.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_PRIVATE_NATIVE_PERFORMANCE_HINT_PRIVATE_H
 #define ANDROID_PRIVATE_NATIVE_PERFORMANCE_HINT_PRIVATE_H
 
+#include <stdint.h>
+
 __BEGIN_DECLS
 
 /**
@@ -27,7 +29,7 @@
 /**
  * Hints for the session used to signal upcoming changes in the mode or workload.
  */
-enum SessionHint {
+enum SessionHint: int32_t {
     /**
      * This hint indicates a sudden increase in CPU workload intensity. It means
      * that this hint session needs extra CPU resources immediately to meet the
@@ -61,7 +63,7 @@
  * @return 0 on success
  *         EPIPE if communication with the system service has failed.
  */
-int APerformanceHint_sendHint(void* session, int hint);
+int APerformanceHint_sendHint(void* session, SessionHint hint);
 
 /**
  * Return the list of thread ids, this API should only be used for testing only.
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index 7e7bba3..661f70c 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -287,6 +287,14 @@
     cflags: [
         "-DBINDER_WITH_KERNEL_IPC",
     ],
+    arch: {
+        // TODO(b/254713216): undefined symbol in BufferedTextOutput::getBuffer
+        riscv64: {
+            lto: {
+                thin: false,
+            },
+        },
+    },
 }
 
 cc_library {
diff --git a/libs/gui/BLASTBufferQueue.cpp b/libs/gui/BLASTBufferQueue.cpp
index 60603ba..5d12463 100644
--- a/libs/gui/BLASTBufferQueue.cpp
+++ b/libs/gui/BLASTBufferQueue.cpp
@@ -432,8 +432,8 @@
         mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
     }
 
-    const auto numPendingBuffersToHold =
-            isEGL ? std::max(0u, mMaxAcquiredBuffers - mCurrentMaxAcquiredBufferCount) : 0;
+    const uint32_t numPendingBuffersToHold =
+            isEGL ? std::max(0, mMaxAcquiredBuffers - (int32_t)mCurrentMaxAcquiredBufferCount) : 0;
 
     auto rb = ReleasedBuffer{id, releaseFence};
     if (std::find(mPendingRelease.begin(), mPendingRelease.end(), rb) == mPendingRelease.end()) {
diff --git a/libs/gui/Choreographer.cpp b/libs/gui/Choreographer.cpp
index 6b25b26..99bf6ba 100644
--- a/libs/gui/Choreographer.cpp
+++ b/libs/gui/Choreographer.cpp
@@ -101,8 +101,9 @@
     return gChoreographer;
 }
 
-Choreographer::Choreographer(const sp<Looper>& looper)
-      : DisplayEventDispatcher(looper, gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp),
+Choreographer::Choreographer(const sp<Looper>& looper, const sp<IBinder>& layerHandle)
+      : DisplayEventDispatcher(looper, gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp, {},
+                               layerHandle),
         mLooper(looper),
         mThreadId(std::this_thread::get_id()) {
     std::lock_guard<std::mutex> _l(gChoreographers.lock);
diff --git a/libs/gui/DisplayEventDispatcher.cpp b/libs/gui/DisplayEventDispatcher.cpp
index 501e69a..8a88377 100644
--- a/libs/gui/DisplayEventDispatcher.cpp
+++ b/libs/gui/DisplayEventDispatcher.cpp
@@ -37,9 +37,10 @@
 
 DisplayEventDispatcher::DisplayEventDispatcher(const sp<Looper>& looper,
                                                gui::ISurfaceComposer::VsyncSource vsyncSource,
-                                               EventRegistrationFlags eventRegistration)
+                                               EventRegistrationFlags eventRegistration,
+                                               const sp<IBinder>& layerHandle)
       : mLooper(looper),
-        mReceiver(vsyncSource, eventRegistration),
+        mReceiver(vsyncSource, eventRegistration, layerHandle),
         mWaitingForVsync(false),
         mLastVsyncCount(0),
         mLastScheduleVsyncTime(0) {
diff --git a/libs/gui/DisplayEventReceiver.cpp b/libs/gui/DisplayEventReceiver.cpp
index c52fb6b..6849a95 100644
--- a/libs/gui/DisplayEventReceiver.cpp
+++ b/libs/gui/DisplayEventReceiver.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "DisplayEventReceiver"
+
 #include <string.h>
 
 #include <utils/Errors.h>
@@ -32,7 +34,8 @@
 // ---------------------------------------------------------------------------
 
 DisplayEventReceiver::DisplayEventReceiver(gui::ISurfaceComposer::VsyncSource vsyncSource,
-                                           EventRegistrationFlags eventRegistration) {
+                                           EventRegistrationFlags eventRegistration,
+                                           const sp<IBinder>& layerHandle) {
     sp<gui::ISurfaceComposer> sf(ComposerServiceAIDL::getComposerService());
     if (sf != nullptr) {
         mEventConnection = nullptr;
@@ -41,8 +44,8 @@
                                                  static_cast<
                                                          gui::ISurfaceComposer::EventRegistration>(
                                                          eventRegistration.get()),
-                                                 &mEventConnection);
-        if (mEventConnection != nullptr) {
+                                                 layerHandle, &mEventConnection);
+        if (status.isOk() && mEventConnection != nullptr) {
             mDataChannel = std::make_unique<gui::BitTube>();
             status = mEventConnection->stealReceiveChannel(mDataChannel.get());
             if (!status.isOk()) {
@@ -51,6 +54,8 @@
                 mDataChannel.reset();
                 mEventConnection.clear();
             }
+        } else {
+            ALOGE("DisplayEventConnection creation failed: status=%s", status.toString8().c_str());
         }
     }
 }
diff --git a/libs/gui/SurfaceControl.cpp b/libs/gui/SurfaceControl.cpp
index 7aee882..c5f9c38 100644
--- a/libs/gui/SurfaceControl.cpp
+++ b/libs/gui/SurfaceControl.cpp
@@ -26,6 +26,7 @@
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/Log.h>
+#include <utils/Looper.h>
 #include <utils/threads.h>
 
 #include <binder/IPCThreadState.h>
@@ -34,8 +35,9 @@
 #include <ui/Rect.h>
 #include <ui/StaticDisplayInfo.h>
 
-#include <gui/BufferQueueCore.h>
 #include <gui/BLASTBufferQueue.h>
+#include <gui/BufferQueueCore.h>
+#include <gui/Choreographer.h>
 #include <gui/ISurfaceComposer.h>
 #include <gui/Surface.h>
 #include <gui/SurfaceComposerClient.h>
@@ -191,6 +193,24 @@
     return mName;
 }
 
+std::shared_ptr<Choreographer> SurfaceControl::getChoreographer() {
+    if (mChoreographer) {
+        return mChoreographer;
+    }
+    sp<Looper> looper = Looper::getForThread();
+    if (!looper.get()) {
+        ALOGE("%s: No looper prepared for thread", __func__);
+        return nullptr;
+    }
+    mChoreographer = std::make_shared<Choreographer>(looper, getHandle());
+    status_t result = mChoreographer->initialize();
+    if (result != OK) {
+        ALOGE("Failed to initialize choreographer");
+        mChoreographer = nullptr;
+    }
+    return mChoreographer;
+}
+
 sp<IGraphicBufferProducer> SurfaceControl::getIGraphicBufferProducer()
 {
     getSurface();
diff --git a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
index 597749a..9812142 100644
--- a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
+++ b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
@@ -68,9 +68,15 @@
 
     /**
      * Create a display event connection
+     *
+     * layerHandle
+     *     Optional binder handle representing a Layer in SF to associate the new
+     *     DisplayEventConnection with. This handle can be found inside a surface control after
+     *     surface creation, see ISurfaceComposerClient::createSurface. Set to null if no layer
+     *     association should be made.
      */
     @nullable IDisplayEventConnection createDisplayEventConnection(VsyncSource vsyncSource,
-            EventRegistration eventRegistration);
+            EventRegistration eventRegistration, @nullable IBinder layerHandle);
 
     /**
      * Create a connection with SurfaceFlinger.
diff --git a/libs/gui/fuzzer/libgui_fuzzer_utils.h b/libs/gui/fuzzer/libgui_fuzzer_utils.h
index 14a0e39..f01c2a9 100644
--- a/libs/gui/fuzzer/libgui_fuzzer_utils.h
+++ b/libs/gui/fuzzer/libgui_fuzzer_utils.h
@@ -64,7 +64,7 @@
     MOCK_METHOD(binder::Status, bootFinished, (), (override));
     MOCK_METHOD(binder::Status, createDisplayEventConnection,
                 (gui::ISurfaceComposer::VsyncSource, gui::ISurfaceComposer::EventRegistration,
-                 sp<gui::IDisplayEventConnection>*),
+                 const sp<IBinder>& /*layerHandle*/, sp<gui::IDisplayEventConnection>*),
                 (override));
     MOCK_METHOD(binder::Status, createConnection, (sp<gui::ISurfaceComposerClient>*), (override));
     MOCK_METHOD(binder::Status, createDisplay, (const std::string&, bool, float, sp<IBinder>*),
diff --git a/libs/gui/include/gui/Choreographer.h b/libs/gui/include/gui/Choreographer.h
index 89a7058..1df9b11 100644
--- a/libs/gui/include/gui/Choreographer.h
+++ b/libs/gui/include/gui/Choreographer.h
@@ -73,7 +73,8 @@
     };
     static Context gChoreographers;
 
-    explicit Choreographer(const sp<Looper>& looper) EXCLUDES(gChoreographers.lock);
+    explicit Choreographer(const sp<Looper>& looper, const sp<IBinder>& layerHandle = nullptr)
+            EXCLUDES(gChoreographers.lock);
     void postFrameCallbackDelayed(AChoreographer_frameCallback cb,
                                   AChoreographer_frameCallback64 cb64,
                                   AChoreographer_vsyncCallback vsyncCallback, void* data,
diff --git a/libs/gui/include/gui/DisplayEventDispatcher.h b/libs/gui/include/gui/DisplayEventDispatcher.h
index bf3a07b..140efa6 100644
--- a/libs/gui/include/gui/DisplayEventDispatcher.h
+++ b/libs/gui/include/gui/DisplayEventDispatcher.h
@@ -26,7 +26,8 @@
     explicit DisplayEventDispatcher(const sp<Looper>& looper,
                                     gui::ISurfaceComposer::VsyncSource vsyncSource =
                                             gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
-                                    EventRegistrationFlags eventRegistration = {});
+                                    EventRegistrationFlags eventRegistration = {},
+                                    const sp<IBinder>& layerHandle = nullptr);
 
     status_t initialize();
     void dispose();
diff --git a/libs/gui/include/gui/DisplayEventReceiver.h b/libs/gui/include/gui/DisplayEventReceiver.h
index 0f4907f..7fd6c35 100644
--- a/libs/gui/include/gui/DisplayEventReceiver.h
+++ b/libs/gui/include/gui/DisplayEventReceiver.h
@@ -119,7 +119,8 @@
      */
     explicit DisplayEventReceiver(gui::ISurfaceComposer::VsyncSource vsyncSource =
                                           gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
-                                  EventRegistrationFlags eventRegistration = {});
+                                  EventRegistrationFlags eventRegistration = {},
+                                  const sp<IBinder>& layerHandle = nullptr);
 
     /*
      * ~DisplayEventReceiver severs the connection with SurfaceFlinger, new events
diff --git a/libs/gui/include/gui/SurfaceControl.h b/libs/gui/include/gui/SurfaceControl.h
index 1d4fc7f..344b957 100644
--- a/libs/gui/include/gui/SurfaceControl.h
+++ b/libs/gui/include/gui/SurfaceControl.h
@@ -36,6 +36,7 @@
 
 // ---------------------------------------------------------------------------
 
+class Choreographer;
 class IGraphicBufferProducer;
 class Surface;
 class SurfaceComposerClient;
@@ -80,6 +81,9 @@
     int32_t getLayerId() const;
     const std::string& getName() const;
 
+    // TODO(b/267195698): Consider renaming.
+    std::shared_ptr<Choreographer> getChoreographer();
+
     sp<IGraphicBufferProducer> getIGraphicBufferProducer();
 
     status_t clearLayerFrameStats() const;
@@ -130,6 +134,7 @@
     PixelFormat mFormat = PIXEL_FORMAT_NONE;
     uint32_t mCreateFlags = 0;
     uint64_t mFallbackFrameNumber = 100;
+    std::shared_ptr<Choreographer> mChoreographer;
 };
 
 }; // namespace android
diff --git a/libs/gui/tests/Surface_test.cpp b/libs/gui/tests/Surface_test.cpp
index 9b2bf7f..babc197 100644
--- a/libs/gui/tests/Surface_test.cpp
+++ b/libs/gui/tests/Surface_test.cpp
@@ -725,6 +725,7 @@
 
     binder::Status createDisplayEventConnection(
             VsyncSource /*vsyncSource*/, EventRegistration /*eventRegistration*/,
+            const sp<IBinder>& /*layerHandle*/,
             sp<gui::IDisplayEventConnection>* outConnection) override {
         *outConnection = nullptr;
         return binder::Status::ok();
diff --git a/opengl/libs/Android.bp b/opengl/libs/Android.bp
index 750338b..49e1cba 100644
--- a/opengl/libs/Android.bp
+++ b/opengl/libs/Android.bp
@@ -144,6 +144,7 @@
     srcs: [
         "EGL/BlobCache.cpp",
         "EGL/FileBlobCache.cpp",
+        "EGL/MultifileBlobCache.cpp",
     ],
     export_include_dirs: ["EGL"],
 }
@@ -160,7 +161,6 @@
     srcs: [
         "EGL/egl_tls.cpp",
         "EGL/egl_cache.cpp",
-        "EGL/egl_cache_multifile.cpp",
         "EGL/egl_display.cpp",
         "EGL/egl_object.cpp",
         "EGL/egl_layers.cpp",
@@ -205,6 +205,11 @@
     srcs: [
         "EGL/BlobCache.cpp",
         "EGL/BlobCache_test.cpp",
+        "EGL/MultifileBlobCache.cpp",
+        "EGL/MultifileBlobCache_test.cpp",
+    ],
+    shared_libs: [
+        "libutils",
     ],
 }
 
diff --git a/opengl/libs/EGL/MultifileBlobCache.cpp b/opengl/libs/EGL/MultifileBlobCache.cpp
new file mode 100644
index 0000000..48b184b
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.cpp
@@ -0,0 +1,668 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ **     http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+
+#include "MultifileBlobCache.h"
+
+#include <android-base/properties.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <log/log.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+#include <utime.h>
+
+#include <algorithm>
+#include <chrono>
+#include <limits>
+#include <locale>
+
+#include <utils/JenkinsHash.h>
+
+using namespace std::literals;
+
+namespace {
+
+// Open the file and determine the size of the value it contains
+size_t getValueSizeFromFile(int fd, const std::string& entryPath) {
+    // Read the beginning of the file to get header
+    android::MultifileHeader header;
+    size_t result = read(fd, static_cast<void*>(&header), sizeof(android::MultifileHeader));
+    if (result != sizeof(android::MultifileHeader)) {
+        ALOGE("Error reading MultifileHeader from cache entry (%s): %s", entryPath.c_str(),
+              std::strerror(errno));
+        return 0;
+    }
+
+    return header.valueSize;
+}
+
+// Helper function to close entries or free them
+void freeHotCacheEntry(android::MultifileHotCache& entry) {
+    if (entry.entryFd != -1) {
+        // If we have an fd, then this entry was added to hot cache via INIT or GET
+        // We need to unmap and close the entry
+        munmap(entry.entryBuffer, entry.entrySize);
+        close(entry.entryFd);
+    } else {
+        // Otherwise, this was added to hot cache during SET, so it was never mapped
+        // and fd was only on the deferred thread.
+        delete[] entry.entryBuffer;
+    }
+}
+
+} // namespace
+
+namespace android {
+
+MultifileBlobCache::MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize,
+                                       const std::string& baseDir)
+      : mInitialized(false),
+        mMaxTotalSize(maxTotalSize),
+        mTotalCacheSize(0),
+        mHotCacheLimit(maxHotCacheSize),
+        mHotCacheSize(0),
+        mWorkerThreadIdle(true) {
+    if (baseDir.empty()) {
+        return;
+    }
+
+    // Establish the name of our multifile directory
+    mMultifileDirName = baseDir + ".multifile";
+
+    // Set a limit for max key and value, ensuring at least one entry can always fit in hot cache
+    mMaxKeySize = mHotCacheLimit / 4;
+    mMaxValueSize = mHotCacheLimit / 2;
+
+    // Initialize our cache with the contents of the directory
+    mTotalCacheSize = 0;
+
+    // See if the dir exists, and initialize using its contents
+    struct stat st;
+    if (stat(mMultifileDirName.c_str(), &st) == 0) {
+        // Read all the files and gather details, then preload their contents
+        DIR* dir;
+        struct dirent* entry;
+        if ((dir = opendir(mMultifileDirName.c_str())) != nullptr) {
+            while ((entry = readdir(dir)) != nullptr) {
+                if (entry->d_name == "."s || entry->d_name == ".."s) {
+                    continue;
+                }
+
+                std::string entryName = entry->d_name;
+                std::string fullPath = mMultifileDirName + "/" + entryName;
+
+                // The filename is the same as the entryHash
+                uint32_t entryHash = static_cast<uint32_t>(strtoul(entry->d_name, nullptr, 10));
+
+                // Look up the details of the file
+                struct stat st;
+                if (stat(fullPath.c_str(), &st) != 0) {
+                    ALOGE("Failed to stat %s", fullPath.c_str());
+                    return;
+                }
+
+                // Open the file so we can read its header
+                int fd = open(fullPath.c_str(), O_RDONLY);
+                if (fd == -1) {
+                    ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+                          std::strerror(errno));
+                    return;
+                }
+
+                // Look up the details we track about each file
+                size_t valueSize = getValueSizeFromFile(fd, fullPath);
+                size_t fileSize = st.st_size;
+                time_t accessTime = st.st_atime;
+
+                // If the cache entry is damaged or no good, remove it
+                // TODO: Perform any other checks
+                if (valueSize <= 0 || fileSize <= 0 || accessTime <= 0) {
+                    if (remove(fullPath.c_str()) != 0) {
+                        ALOGE("Error removing %s: %s", fullPath.c_str(), std::strerror(errno));
+                    }
+                    continue;
+                }
+
+                // Track details for rapid lookup later
+                trackEntry(entryHash, valueSize, fileSize, accessTime);
+
+                // Track the total size
+                increaseTotalCacheSize(fileSize);
+
+                // Preload the entry for fast retrieval
+                if ((mHotCacheSize + fileSize) < mHotCacheLimit) {
+                    // Memory map the file
+                    uint8_t* mappedEntry = reinterpret_cast<uint8_t*>(
+                            mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+                    if (mappedEntry == MAP_FAILED) {
+                        ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+                    }
+
+                    ALOGV("INIT: Populating hot cache with fd = %i, cacheEntry = %p for "
+                          "entryHash %u",
+                          fd, mappedEntry, entryHash);
+
+                    // Track the details of the preload so they can be retrieved later
+                    if (!addToHotCache(entryHash, fd, mappedEntry, fileSize)) {
+                        ALOGE("INIT Failed to add %u to hot cache", entryHash);
+                        munmap(mappedEntry, fileSize);
+                        close(fd);
+                        return;
+                    }
+                } else {
+                    close(fd);
+                }
+            }
+            closedir(dir);
+        } else {
+            ALOGE("Unable to open filename: %s", mMultifileDirName.c_str());
+        }
+    } else {
+        // If the multifile directory does not exist, create it and start from scratch
+        if (mkdir(mMultifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
+            ALOGE("Unable to create directory (%s), errno (%i)", mMultifileDirName.c_str(), errno);
+        }
+    }
+
+    mTaskThread = std::thread(&MultifileBlobCache::processTasks, this);
+
+    mInitialized = true;
+}
+
+MultifileBlobCache::~MultifileBlobCache() {
+    // Inform the worker thread we're done
+    ALOGV("DESCTRUCTOR: Shutting down worker thread");
+    DeferredTask task(TaskCommand::Exit);
+    queueTask(std::move(task));
+
+    // Wait for it to complete
+    ALOGV("DESCTRUCTOR: Waiting for worker thread to complete");
+    waitForWorkComplete();
+    mTaskThread.join();
+}
+
+// Set will add the entry to hot cache and start a deferred process to write it to disk
+void MultifileBlobCache::set(const void* key, EGLsizeiANDROID keySize, const void* value,
+                             EGLsizeiANDROID valueSize) {
+    if (!mInitialized) {
+        return;
+    }
+
+    // Ensure key and value are under their limits
+    if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+        ALOGV("SET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+              valueSize, mMaxValueSize);
+        return;
+    }
+
+    // Generate a hash of the key and use it to track this entry
+    uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+    size_t fileSize = sizeof(MultifileHeader) + keySize + valueSize;
+
+    // If we're going to be over the cache limit, kick off a trim to clear space
+    if (getTotalSize() + fileSize > mMaxTotalSize) {
+        ALOGV("SET: Cache is full, calling trimCache to clear space");
+        trimCache(mMaxTotalSize);
+    }
+
+    ALOGV("SET: Add %u to cache", entryHash);
+
+    uint8_t* buffer = new uint8_t[fileSize];
+
+    // Write the key and value after the header
+    android::MultifileHeader header = {keySize, valueSize};
+    memcpy(static_cast<void*>(buffer), static_cast<const void*>(&header),
+           sizeof(android::MultifileHeader));
+    memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader)), static_cast<const void*>(key),
+           keySize);
+    memcpy(static_cast<void*>(buffer + sizeof(MultifileHeader) + keySize),
+           static_cast<const void*>(value), valueSize);
+
+    std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+    // Track the size and access time for quick recall
+    trackEntry(entryHash, valueSize, fileSize, time(0));
+
+    // Update the overall cache size
+    increaseTotalCacheSize(fileSize);
+
+    // Keep the entry in hot cache for quick retrieval
+    ALOGV("SET: Adding %u to hot cache.", entryHash);
+
+    // Sending -1 as the fd indicates we don't have an fd for this
+    if (!addToHotCache(entryHash, -1, buffer, fileSize)) {
+        ALOGE("GET: Failed to add %u to hot cache", entryHash);
+        return;
+    }
+
+    // Track that we're creating a pending write for this entry
+    // Include the buffer to handle the case when multiple writes are pending for an entry
+    mDeferredWrites.insert(std::make_pair(entryHash, buffer));
+
+    // Create deferred task to write to storage
+    ALOGV("SET: Adding task to queue.");
+    DeferredTask task(TaskCommand::WriteToDisk);
+    task.initWriteToDisk(fullPath, buffer, fileSize);
+    queueTask(std::move(task));
+}
+
+// Get will check the hot cache, then load it from disk if needed
+EGLsizeiANDROID MultifileBlobCache::get(const void* key, EGLsizeiANDROID keySize, void* value,
+                                        EGLsizeiANDROID valueSize) {
+    if (!mInitialized) {
+        return 0;
+    }
+
+    // Ensure key and value are under their limits
+    if (keySize > mMaxKeySize || valueSize > mMaxValueSize) {
+        ALOGV("GET: keySize (%lu vs %zu) or valueSize (%lu vs %zu) too large", keySize, mMaxKeySize,
+              valueSize, mMaxValueSize);
+        return 0;
+    }
+
+    // Generate a hash of the key and use it to track this entry
+    uint32_t entryHash = android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
+
+    // See if we have this file
+    if (!contains(entryHash)) {
+        ALOGV("GET: Cache MISS - cache does not contain entry: %u", entryHash);
+        return 0;
+    }
+
+    // Look up the data for this entry
+    MultifileEntryStats entryStats = getEntryStats(entryHash);
+
+    size_t cachedValueSize = entryStats.valueSize;
+    if (cachedValueSize > valueSize) {
+        ALOGV("GET: Cache MISS - valueSize not large enough (%lu) for entry %u, returning required"
+              "size (%zu)",
+              valueSize, entryHash, cachedValueSize);
+        return cachedValueSize;
+    }
+
+    // We have the file and have enough room to write it out, return the entry
+    ALOGV("GET: Cache HIT - cache contains entry: %u", entryHash);
+
+    // Look up the size of the file
+    size_t fileSize = entryStats.fileSize;
+    if (keySize > fileSize) {
+        ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
+              "file",
+              keySize, fileSize);
+        return 0;
+    }
+
+    std::string fullPath = mMultifileDirName + "/" + std::to_string(entryHash);
+
+    // Open the hashed filename path
+    uint8_t* cacheEntry = 0;
+
+    // Check hot cache
+    if (mHotCache.find(entryHash) != mHotCache.end()) {
+        ALOGV("GET: HotCache HIT for entry %u", entryHash);
+        cacheEntry = mHotCache[entryHash].entryBuffer;
+    } else {
+        ALOGV("GET: HotCache MISS for entry: %u", entryHash);
+
+        if (mDeferredWrites.find(entryHash) != mDeferredWrites.end()) {
+            // Wait for writes to complete if there is an outstanding write for this entry
+            ALOGV("GET: Waiting for write to complete for %u", entryHash);
+            waitForWorkComplete();
+        }
+
+        // Open the entry file
+        int fd = open(fullPath.c_str(), O_RDONLY);
+        if (fd == -1) {
+            ALOGE("Cache error - failed to open fullPath: %s, error: %s", fullPath.c_str(),
+                  std::strerror(errno));
+            return 0;
+        }
+
+        // Memory map the file
+        cacheEntry =
+                reinterpret_cast<uint8_t*>(mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0));
+        if (cacheEntry == MAP_FAILED) {
+            ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
+            close(fd);
+            return 0;
+        }
+
+        ALOGV("GET: Adding %u to hot cache", entryHash);
+        if (!addToHotCache(entryHash, fd, cacheEntry, fileSize)) {
+            ALOGE("GET: Failed to add %u to hot cache", entryHash);
+            return 0;
+        }
+
+        cacheEntry = mHotCache[entryHash].entryBuffer;
+    }
+
+    // Ensure the header matches
+    MultifileHeader* header = reinterpret_cast<MultifileHeader*>(cacheEntry);
+    if (header->keySize != keySize || header->valueSize != valueSize) {
+        ALOGW("Mismatch on keySize(%ld vs. cached %ld) or valueSize(%ld vs. cached %ld) compared "
+              "to cache header values for fullPath: %s",
+              keySize, header->keySize, valueSize, header->valueSize, fullPath.c_str());
+        removeFromHotCache(entryHash);
+        return 0;
+    }
+
+    // Compare the incoming key with our stored version (the beginning of the entry)
+    uint8_t* cachedKey = cacheEntry + sizeof(MultifileHeader);
+    int compare = memcmp(cachedKey, key, keySize);
+    if (compare != 0) {
+        ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
+        removeFromHotCache(entryHash);
+        return 0;
+    }
+
+    // Remaining entry following the key is the value
+    uint8_t* cachedValue = cacheEntry + (keySize + sizeof(MultifileHeader));
+    memcpy(value, cachedValue, cachedValueSize);
+
+    return cachedValueSize;
+}
+
+void MultifileBlobCache::finish() {
+    // Wait for all deferred writes to complete
+    ALOGV("FINISH: Waiting for work to complete.");
+    waitForWorkComplete();
+
+    // Close all entries in the hot cache
+    for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+        uint32_t entryHash = hotCacheIter->first;
+        MultifileHotCache entry = hotCacheIter->second;
+
+        ALOGV("FINISH: Closing hot cache entry for %u", entryHash);
+        freeHotCacheEntry(entry);
+
+        mHotCache.erase(hotCacheIter++);
+    }
+}
+
+void MultifileBlobCache::trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+                                    time_t accessTime) {
+    mEntries.insert(entryHash);
+    mEntryStats[entryHash] = {valueSize, fileSize, accessTime};
+}
+
+bool MultifileBlobCache::contains(uint32_t hashEntry) const {
+    return mEntries.find(hashEntry) != mEntries.end();
+}
+
+MultifileEntryStats MultifileBlobCache::getEntryStats(uint32_t entryHash) {
+    return mEntryStats[entryHash];
+}
+
+void MultifileBlobCache::increaseTotalCacheSize(size_t fileSize) {
+    mTotalCacheSize += fileSize;
+}
+
+void MultifileBlobCache::decreaseTotalCacheSize(size_t fileSize) {
+    mTotalCacheSize -= fileSize;
+}
+
+bool MultifileBlobCache::addToHotCache(uint32_t newEntryHash, int newFd, uint8_t* newEntryBuffer,
+                                       size_t newEntrySize) {
+    ALOGV("HOTCACHE(ADD): Adding %u to hot cache", newEntryHash);
+
+    // Clear space if we need to
+    if ((mHotCacheSize + newEntrySize) > mHotCacheLimit) {
+        ALOGV("HOTCACHE(ADD): mHotCacheSize (%zu) + newEntrySize (%zu) is to big for "
+              "mHotCacheLimit "
+              "(%zu), freeing up space for %u",
+              mHotCacheSize, newEntrySize, mHotCacheLimit, newEntryHash);
+
+        // Wait for all the files to complete writing so our hot cache is accurate
+        waitForWorkComplete();
+
+        // Free up old entries until under the limit
+        for (auto hotCacheIter = mHotCache.begin(); hotCacheIter != mHotCache.end();) {
+            uint32_t oldEntryHash = hotCacheIter->first;
+            MultifileHotCache oldEntry = hotCacheIter->second;
+
+            // Move our iterator before deleting the entry
+            hotCacheIter++;
+            if (!removeFromHotCache(oldEntryHash)) {
+                ALOGE("HOTCACHE(ADD): Unable to remove entry %u", oldEntryHash);
+                return false;
+            }
+
+            // Clear at least half the hot cache
+            if ((mHotCacheSize + newEntrySize) <= mHotCacheLimit / 2) {
+                ALOGV("HOTCACHE(ADD): Freed enough space for %zu", mHotCacheSize);
+                break;
+            }
+        }
+    }
+
+    // Track it
+    mHotCache[newEntryHash] = {newFd, newEntryBuffer, newEntrySize};
+    mHotCacheSize += newEntrySize;
+
+    ALOGV("HOTCACHE(ADD): New hot cache size: %zu", mHotCacheSize);
+
+    return true;
+}
+
+bool MultifileBlobCache::removeFromHotCache(uint32_t entryHash) {
+    if (mHotCache.find(entryHash) != mHotCache.end()) {
+        ALOGV("HOTCACHE(REMOVE): Removing %u from hot cache", entryHash);
+
+        // Wait for all the files to complete writing so our hot cache is accurate
+        waitForWorkComplete();
+
+        ALOGV("HOTCACHE(REMOVE): Closing hot cache entry for %u", entryHash);
+        MultifileHotCache entry = mHotCache[entryHash];
+        freeHotCacheEntry(entry);
+
+        // Delete the entry from our tracking
+        mHotCacheSize -= entry.entrySize;
+        size_t count = mHotCache.erase(entryHash);
+
+        return true;
+    }
+
+    return false;
+}
+
+bool MultifileBlobCache::applyLRU(size_t cacheLimit) {
+    // Walk through our map of sorted last access times and remove files until under the limit
+    for (auto cacheEntryIter = mEntryStats.begin(); cacheEntryIter != mEntryStats.end();) {
+        uint32_t entryHash = cacheEntryIter->first;
+
+        ALOGV("LRU: Removing entryHash %u", entryHash);
+
+        // Track the overall size
+        MultifileEntryStats entryStats = getEntryStats(entryHash);
+        decreaseTotalCacheSize(entryStats.fileSize);
+
+        // Remove it from hot cache if present
+        removeFromHotCache(entryHash);
+
+        // Remove it from the system
+        std::string entryPath = mMultifileDirName + "/" + std::to_string(entryHash);
+        if (remove(entryPath.c_str()) != 0) {
+            ALOGE("LRU: Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
+            return false;
+        }
+
+        // Increment the iterator before clearing the entry
+        cacheEntryIter++;
+
+        // Delete the entry from our tracking
+        size_t count = mEntryStats.erase(entryHash);
+        if (count != 1) {
+            ALOGE("LRU: Failed to remove entryHash (%u) from mEntryStats", entryHash);
+            return false;
+        }
+
+        // See if it has been reduced enough
+        size_t totalCacheSize = getTotalSize();
+        if (totalCacheSize <= cacheLimit) {
+            // Success
+            ALOGV("LRU: Reduced cache to %zu", totalCacheSize);
+            return true;
+        }
+    }
+
+    ALOGV("LRU: Cache is emptry");
+    return false;
+}
+
+// When removing files, what fraction of the overall limit should be reached when removing files
+// A divisor of two will decrease the cache to 50%, four to 25% and so on
+constexpr uint32_t kCacheLimitDivisor = 2;
+
+// Calculate the cache size and remove old entries until under the limit
+void MultifileBlobCache::trimCache(size_t cacheByteLimit) {
+    // Start with the value provided by egl_cache
+    size_t limit = cacheByteLimit;
+
+    // Wait for all deferred writes to complete
+    waitForWorkComplete();
+
+    size_t size = getTotalSize();
+
+    // If size is larger than the threshold, remove files using LRU
+    if (size > limit) {
+        ALOGV("TRIM: Multifile cache size is larger than %zu, removing old entries",
+              cacheByteLimit);
+        if (!applyLRU(limit / kCacheLimitDivisor)) {
+            ALOGE("Error when clearing multifile shader cache");
+            return;
+        }
+    }
+}
+
+// This function performs a task.  It only knows how to write files to disk,
+// but it could be expanded if needed.
+void MultifileBlobCache::processTask(DeferredTask& task) {
+    switch (task.getTaskCommand()) {
+        case TaskCommand::Exit: {
+            ALOGV("DEFERRED: Shutting down");
+            return;
+        }
+        case TaskCommand::WriteToDisk: {
+            uint32_t entryHash = task.getEntryHash();
+            std::string& fullPath = task.getFullPath();
+            uint8_t* buffer = task.getBuffer();
+            size_t bufferSize = task.getBufferSize();
+
+            // Create the file or reset it if already present, read+write for user only
+            int fd = open(fullPath.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+            if (fd == -1) {
+                ALOGE("Cache error in SET - failed to open fullPath: %s, error: %s",
+                      fullPath.c_str(), std::strerror(errno));
+                return;
+            }
+
+            ALOGV("DEFERRED: Opened fd %i from %s", fd, fullPath.c_str());
+
+            ssize_t result = write(fd, buffer, bufferSize);
+            if (result != bufferSize) {
+                ALOGE("Error writing fileSize to cache entry (%s): %s", fullPath.c_str(),
+                      std::strerror(errno));
+                return;
+            }
+
+            ALOGV("DEFERRED: Completed write for: %s", fullPath.c_str());
+            close(fd);
+
+            // Erase the entry from mDeferredWrites
+            // Since there could be multiple outstanding writes for an entry, find the matching one
+            typedef std::multimap<uint32_t, uint8_t*>::iterator entryIter;
+            std::pair<entryIter, entryIter> iterPair = mDeferredWrites.equal_range(entryHash);
+            for (entryIter it = iterPair.first; it != iterPair.second; ++it) {
+                if (it->second == buffer) {
+                    ALOGV("DEFERRED: Marking write complete for %u at %p", it->first, it->second);
+                    mDeferredWrites.erase(it);
+                    break;
+                }
+            }
+
+            return;
+        }
+        default: {
+            ALOGE("DEFERRED: Unhandled task type");
+            return;
+        }
+    }
+}
+
+// This function will wait until tasks arrive, then execute them
+// If the exit command is submitted, the loop will terminate
+void MultifileBlobCache::processTasksImpl(bool* exitThread) {
+    while (true) {
+        std::unique_lock<std::mutex> lock(mWorkerMutex);
+        if (mTasks.empty()) {
+            ALOGV("WORKER: No tasks available, waiting");
+            mWorkerThreadIdle = true;
+            mWorkerIdleCondition.notify_all();
+            // Only wake if notified and command queue is not empty
+            mWorkAvailableCondition.wait(lock, [this] { return !mTasks.empty(); });
+        }
+
+        ALOGV("WORKER: Task available, waking up.");
+        mWorkerThreadIdle = false;
+        DeferredTask task = std::move(mTasks.front());
+        mTasks.pop();
+
+        if (task.getTaskCommand() == TaskCommand::Exit) {
+            ALOGV("WORKER: Exiting work loop.");
+            *exitThread = true;
+            mWorkerThreadIdle = true;
+            mWorkerIdleCondition.notify_one();
+            return;
+        }
+
+        lock.unlock();
+        processTask(task);
+    }
+}
+
+// Process tasks until the exit task is submitted
+void MultifileBlobCache::processTasks() {
+    while (true) {
+        bool exitThread = false;
+        processTasksImpl(&exitThread);
+        if (exitThread) {
+            break;
+        }
+    }
+}
+
+// Add a task to the queue to be processed by the worker thread
+void MultifileBlobCache::queueTask(DeferredTask&& task) {
+    std::lock_guard<std::mutex> queueLock(mWorkerMutex);
+    mTasks.emplace(std::move(task));
+    mWorkAvailableCondition.notify_one();
+}
+
+// Wait until all tasks have been completed
+void MultifileBlobCache::waitForWorkComplete() {
+    std::unique_lock<std::mutex> lock(mWorkerMutex);
+    mWorkerIdleCondition.wait(lock, [this] { return (mTasks.empty() && mWorkerThreadIdle); });
+}
+
+}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/MultifileBlobCache.h b/opengl/libs/EGL/MultifileBlobCache.h
new file mode 100644
index 0000000..dcdfe47
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache.h
@@ -0,0 +1,164 @@
+/*
+ ** Copyright 2022, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ **     http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#ifndef ANDROID_MULTIFILE_BLOB_CACHE_H
+#define ANDROID_MULTIFILE_BLOB_CACHE_H
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+
+#include <future>
+#include <map>
+#include <queue>
+#include <string>
+#include <thread>
+#include <unordered_map>
+#include <unordered_set>
+
+namespace android {
+
+struct MultifileHeader {
+    EGLsizeiANDROID keySize;
+    EGLsizeiANDROID valueSize;
+};
+
+struct MultifileEntryStats {
+    EGLsizeiANDROID valueSize;
+    size_t fileSize;
+    time_t accessTime;
+};
+
+struct MultifileHotCache {
+    int entryFd;
+    uint8_t* entryBuffer;
+    size_t entrySize;
+};
+
+enum class TaskCommand {
+    Invalid = 0,
+    WriteToDisk,
+    Exit,
+};
+
+class DeferredTask {
+public:
+    DeferredTask(TaskCommand command) : mCommand(command) {}
+
+    TaskCommand getTaskCommand() { return mCommand; }
+
+    void initWriteToDisk(std::string fullPath, uint8_t* buffer, size_t bufferSize) {
+        mCommand = TaskCommand::WriteToDisk;
+        mFullPath = fullPath;
+        mBuffer = buffer;
+        mBufferSize = bufferSize;
+    }
+
+    uint32_t getEntryHash() { return mEntryHash; }
+    std::string& getFullPath() { return mFullPath; }
+    uint8_t* getBuffer() { return mBuffer; }
+    size_t getBufferSize() { return mBufferSize; };
+
+private:
+    TaskCommand mCommand;
+
+    // Parameters for WriteToDisk
+    uint32_t mEntryHash;
+    std::string mFullPath;
+    uint8_t* mBuffer;
+    size_t mBufferSize;
+};
+
+class MultifileBlobCache {
+public:
+    MultifileBlobCache(size_t maxTotalSize, size_t maxHotCacheSize, const std::string& baseDir);
+    ~MultifileBlobCache();
+
+    void set(const void* key, EGLsizeiANDROID keySize, const void* value,
+             EGLsizeiANDROID valueSize);
+    EGLsizeiANDROID get(const void* key, EGLsizeiANDROID keySize, void* value,
+                        EGLsizeiANDROID valueSize);
+
+    void finish();
+
+    size_t getTotalSize() const { return mTotalCacheSize; }
+    void trimCache(size_t cacheByteLimit);
+
+private:
+    void trackEntry(uint32_t entryHash, EGLsizeiANDROID valueSize, size_t fileSize,
+                    time_t accessTime);
+    bool contains(uint32_t entryHash) const;
+    bool removeEntry(uint32_t entryHash);
+    MultifileEntryStats getEntryStats(uint32_t entryHash);
+
+    size_t getFileSize(uint32_t entryHash);
+    size_t getValueSize(uint32_t entryHash);
+
+    void increaseTotalCacheSize(size_t fileSize);
+    void decreaseTotalCacheSize(size_t fileSize);
+
+    bool addToHotCache(uint32_t entryHash, int fd, uint8_t* entryBufer, size_t entrySize);
+    bool removeFromHotCache(uint32_t entryHash);
+
+    bool applyLRU(size_t cacheLimit);
+
+    bool mInitialized;
+    std::string mMultifileDirName;
+
+    std::unordered_set<uint32_t> mEntries;
+    std::unordered_map<uint32_t, MultifileEntryStats> mEntryStats;
+    std::unordered_map<uint32_t, MultifileHotCache> mHotCache;
+
+    size_t mMaxKeySize;
+    size_t mMaxValueSize;
+    size_t mMaxTotalSize;
+    size_t mTotalCacheSize;
+    size_t mHotCacheLimit;
+    size_t mHotCacheEntryLimit;
+    size_t mHotCacheSize;
+
+    // Below are the components used to allow a deferred write
+
+    // Track whether we have pending writes for an entry
+    std::multimap<uint32_t, uint8_t*> mDeferredWrites;
+
+    // Functions to work through tasks in the queue
+    void processTasks();
+    void processTasksImpl(bool* exitThread);
+    void processTask(DeferredTask& task);
+
+    // Used by main thread to create work for the worker thread
+    void queueTask(DeferredTask&& task);
+
+    // Used by main thread to wait for worker thread to complete all outstanding work.
+    void waitForWorkComplete();
+
+    std::thread mTaskThread;
+    std::queue<DeferredTask> mTasks;
+    std::mutex mWorkerMutex;
+
+    // This condition will block the worker thread until a task is queued
+    std::condition_variable mWorkAvailableCondition;
+
+    // This condition will block the main thread while the worker thread still has tasks
+    std::condition_variable mWorkerIdleCondition;
+
+    // This bool will track whether all tasks have been completed
+    bool mWorkerThreadIdle;
+};
+
+}; // namespace android
+
+#endif // ANDROID_MULTIFILE_BLOB_CACHE_H
diff --git a/opengl/libs/EGL/MultifileBlobCache_test.cpp b/opengl/libs/EGL/MultifileBlobCache_test.cpp
new file mode 100644
index 0000000..1a55a4f
--- /dev/null
+++ b/opengl/libs/EGL/MultifileBlobCache_test.cpp
@@ -0,0 +1,200 @@
+/*
+ ** Copyright 2023, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ **     http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#include "MultifileBlobCache.h"
+
+#include <android-base/test_utils.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <stdio.h>
+
+#include <memory>
+
+namespace android {
+
+template <typename T>
+using sp = std::shared_ptr<T>;
+
+constexpr size_t kMaxTotalSize = 32 * 1024;
+constexpr size_t kMaxPreloadSize = 8 * 1024;
+
+constexpr size_t kMaxKeySize = kMaxPreloadSize / 4;
+constexpr size_t kMaxValueSize = kMaxPreloadSize / 2;
+
+class MultifileBlobCacheTest : public ::testing::Test {
+protected:
+    virtual void SetUp() {
+        mTempFile.reset(new TemporaryFile());
+        mMBC.reset(new MultifileBlobCache(kMaxTotalSize, kMaxPreloadSize, &mTempFile->path[0]));
+    }
+
+    virtual void TearDown() { mMBC.reset(); }
+
+    std::unique_ptr<TemporaryFile> mTempFile;
+    std::unique_ptr<MultifileBlobCache> mMBC;
+};
+
+TEST_F(MultifileBlobCacheTest, CacheSingleValueSucceeds) {
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+    ASSERT_EQ('e', buf[0]);
+    ASSERT_EQ('f', buf[1]);
+    ASSERT_EQ('g', buf[2]);
+    ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheTwoValuesSucceeds) {
+    unsigned char buf[2] = {0xee, 0xee};
+    mMBC->set("ab", 2, "cd", 2);
+    mMBC->set("ef", 2, "gh", 2);
+    ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+    ASSERT_EQ('c', buf[0]);
+    ASSERT_EQ('d', buf[1]);
+    ASSERT_EQ(size_t(2), mMBC->get("ef", 2, buf, 2));
+    ASSERT_EQ('g', buf[0]);
+    ASSERT_EQ('h', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetSetTwiceSucceeds) {
+    unsigned char buf[2] = {0xee, 0xee};
+    mMBC->set("ab", 2, "cd", 2);
+    ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+    ASSERT_EQ('c', buf[0]);
+    ASSERT_EQ('d', buf[1]);
+    // Use the same key, but different value
+    mMBC->set("ab", 2, "ef", 2);
+    ASSERT_EQ(size_t(2), mMBC->get("ab", 2, buf, 2));
+    ASSERT_EQ('e', buf[0]);
+    ASSERT_EQ('f', buf[1]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesInsideBounds) {
+    unsigned char buf[6] = {0xee, 0xee, 0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf + 1, 4));
+    ASSERT_EQ(0xee, buf[0]);
+    ASSERT_EQ('e', buf[1]);
+    ASSERT_EQ('f', buf[2]);
+    ASSERT_EQ('g', buf[3]);
+    ASSERT_EQ('h', buf[4]);
+    ASSERT_EQ(0xee, buf[5]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetOnlyWritesIfBufferIsLargeEnough) {
+    unsigned char buf[3] = {0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 3));
+    ASSERT_EQ(0xee, buf[0]);
+    ASSERT_EQ(0xee, buf[1]);
+    ASSERT_EQ(0xee, buf[2]);
+}
+
+TEST_F(MultifileBlobCacheTest, GetDoesntAccessNullBuffer) {
+    mMBC->set("abcd", 4, "efgh", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, nullptr, 0));
+}
+
+TEST_F(MultifileBlobCacheTest, MultipleSetsCacheLatestValue) {
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    mMBC->set("abcd", 4, "ijkl", 4);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+    ASSERT_EQ('i', buf[0]);
+    ASSERT_EQ('j', buf[1]);
+    ASSERT_EQ('k', buf[2]);
+    ASSERT_EQ('l', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, SecondSetKeepsFirstValueIfTooLarge) {
+    unsigned char buf[kMaxValueSize + 1] = {0xee, 0xee, 0xee, 0xee};
+    mMBC->set("abcd", 4, "efgh", 4);
+    mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+    ASSERT_EQ(size_t(4), mMBC->get("abcd", 4, buf, 4));
+    ASSERT_EQ('e', buf[0]);
+    ASSERT_EQ('f', buf[1]);
+    ASSERT_EQ('g', buf[2]);
+    ASSERT_EQ('h', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfKeyIsTooBig) {
+    char key[kMaxKeySize + 1];
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    for (int i = 0; i < kMaxKeySize + 1; i++) {
+        key[i] = 'a';
+    }
+    mMBC->set(key, kMaxKeySize + 1, "bbbb", 4);
+    ASSERT_EQ(size_t(0), mMBC->get(key, kMaxKeySize + 1, buf, 4));
+    ASSERT_EQ(0xee, buf[0]);
+    ASSERT_EQ(0xee, buf[1]);
+    ASSERT_EQ(0xee, buf[2]);
+    ASSERT_EQ(0xee, buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, DoesntCacheIfValueIsTooBig) {
+    char buf[kMaxValueSize + 1];
+    for (int i = 0; i < kMaxValueSize + 1; i++) {
+        buf[i] = 'b';
+    }
+    mMBC->set("abcd", 4, buf, kMaxValueSize + 1);
+    for (int i = 0; i < kMaxValueSize + 1; i++) {
+        buf[i] = 0xee;
+    }
+    ASSERT_EQ(size_t(0), mMBC->get("abcd", 4, buf, kMaxValueSize + 1));
+    for (int i = 0; i < kMaxValueSize + 1; i++) {
+        SCOPED_TRACE(i);
+        ASSERT_EQ(0xee, buf[i]);
+    }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxKeySizeSucceeds) {
+    char key[kMaxKeySize];
+    unsigned char buf[4] = {0xee, 0xee, 0xee, 0xee};
+    for (int i = 0; i < kMaxKeySize; i++) {
+        key[i] = 'a';
+    }
+    mMBC->set(key, kMaxKeySize, "wxyz", 4);
+    ASSERT_EQ(size_t(4), mMBC->get(key, kMaxKeySize, buf, 4));
+    ASSERT_EQ('w', buf[0]);
+    ASSERT_EQ('x', buf[1]);
+    ASSERT_EQ('y', buf[2]);
+    ASSERT_EQ('z', buf[3]);
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMaxValueSizeSucceeds) {
+    char buf[kMaxValueSize];
+    for (int i = 0; i < kMaxValueSize; i++) {
+        buf[i] = 'b';
+    }
+    mMBC->set("abcd", 4, buf, kMaxValueSize);
+    for (int i = 0; i < kMaxValueSize; i++) {
+        buf[i] = 0xee;
+    }
+    mMBC->get("abcd", 4, buf, kMaxValueSize);
+    for (int i = 0; i < kMaxValueSize; i++) {
+        SCOPED_TRACE(i);
+        ASSERT_EQ('b', buf[i]);
+    }
+}
+
+TEST_F(MultifileBlobCacheTest, CacheMinKeyAndValueSizeSucceeds) {
+    unsigned char buf[1] = {0xee};
+    mMBC->set("x", 1, "y", 1);
+    ASSERT_EQ(size_t(1), mMBC->get("x", 1, buf, 1));
+    ASSERT_EQ('y', buf[0]);
+}
+
+} // namespace android
diff --git a/opengl/libs/EGL/egl_cache.cpp b/opengl/libs/EGL/egl_cache.cpp
index 1e8a348..b00ee33 100644
--- a/opengl/libs/EGL/egl_cache.cpp
+++ b/opengl/libs/EGL/egl_cache.cpp
@@ -14,6 +14,8 @@
  ** limitations under the License.
  */
 
+// #define LOG_NDEBUG 0
+
 #include "egl_cache.h"
 
 #include <android-base/properties.h>
@@ -25,22 +27,19 @@
 #include <thread>
 
 #include "../egl_impl.h"
-#include "egl_cache_multifile.h"
 #include "egl_display.h"
 
 // Monolithic cache size limits.
-static const size_t maxKeySize = 12 * 1024;
-static const size_t maxValueSize = 64 * 1024;
-static const size_t maxTotalSize = 32 * 1024 * 1024;
+static const size_t kMaxMonolithicKeySize = 12 * 1024;
+static const size_t kMaxMonolithicValueSize = 64 * 1024;
+static const size_t kMaxMonolithicTotalSize = 2 * 1024 * 1024;
 
 // The time in seconds to wait before saving newly inserted monolithic cache entries.
-static const unsigned int deferredSaveDelay = 4;
+static const unsigned int kDeferredMonolithicSaveDelay = 4;
 
-// Multifile cache size limit
-constexpr size_t kMultifileCacheByteLimit = 64 * 1024 * 1024;
-
-// Delay before cleaning up multifile cache entries
-static const unsigned int deferredMultifileCleanupDelaySeconds = 1;
+// Multifile cache size limits
+constexpr uint32_t kMultifileHotCacheLimit = 8 * 1024 * 1024;
+constexpr uint32_t kMultifileCacheByteLimit = 32 * 1024 * 1024;
 
 namespace android {
 
@@ -68,10 +67,7 @@
 // egl_cache_t definition
 //
 egl_cache_t::egl_cache_t()
-      : mInitialized(false),
-        mMultifileMode(false),
-        mCacheByteLimit(maxTotalSize),
-        mMultifileCleanupPending(false) {}
+      : mInitialized(false), mMultifileMode(false), mCacheByteLimit(kMaxMonolithicTotalSize) {}
 
 egl_cache_t::~egl_cache_t() {}
 
@@ -85,7 +81,7 @@
     std::lock_guard<std::mutex> lock(mMutex);
 
     egl_connection_t* const cnx = &gEGLImpl;
-    if (cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
+    if (display && cnx->dso && cnx->major >= 0 && cnx->minor >= 0) {
         const char* exts = display->disp.queryString.extensions;
         size_t bcExtLen = strlen(BC_EXT_STR);
         size_t extsLen = strlen(exts);
@@ -114,14 +110,36 @@
         }
     }
 
-    // Allow forcing monolithic cache for debug purposes
-    if (base::GetProperty("debug.egl.blobcache.multifilemode", "") == "false") {
-        ALOGD("Forcing monolithic cache due to debug.egl.blobcache.multifilemode == \"false\"");
+    // Check the device config to decide whether multifile should be used
+    if (base::GetBoolProperty("ro.egl.blobcache.multifile", false)) {
+        mMultifileMode = true;
+        ALOGV("Using multifile EGL blobcache");
+    }
+
+    // Allow forcing the mode for debug purposes
+    std::string mode = base::GetProperty("debug.egl.blobcache.multifile", "");
+    if (mode == "true") {
+        ALOGV("Forcing multifile cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
+        mMultifileMode = true;
+    } else if (mode == "false") {
+        ALOGV("Forcing monolithic cache due to debug.egl.blobcache.multifile == %s", mode.c_str());
         mMultifileMode = false;
     }
 
     if (mMultifileMode) {
-        mCacheByteLimit = kMultifileCacheByteLimit;
+        mCacheByteLimit = static_cast<size_t>(
+                base::GetUintProperty<uint32_t>("ro.egl.blobcache.multifile_limit",
+                                                kMultifileCacheByteLimit));
+
+        // Check for a debug value
+        int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.multifile_limit", -1);
+        if (debugCacheSize >= 0) {
+            ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.multifile_limit",
+                  mCacheByteLimit, debugCacheSize);
+            mCacheByteLimit = debugCacheSize;
+        }
+
+        ALOGV("Using multifile EGL blobcache limit of %zu bytes", mCacheByteLimit);
     }
 
     mInitialized = true;
@@ -133,10 +151,10 @@
         mBlobCache->writeToFile();
     }
     mBlobCache = nullptr;
-    if (mMultifileMode) {
-        checkMultifileCacheSize(mCacheByteLimit);
+    if (mMultifileBlobCache) {
+        mMultifileBlobCache->finish();
     }
-    mMultifileMode = false;
+    mMultifileBlobCache = nullptr;
     mInitialized = false;
 }
 
@@ -151,20 +169,8 @@
 
     if (mInitialized) {
         if (mMultifileMode) {
-            setBlobMultifile(key, keySize, value, valueSize, mFilename);
-
-            if (!mMultifileCleanupPending) {
-                mMultifileCleanupPending = true;
-                // Kick off a thread to cull cache files below limit
-                std::thread deferredMultifileCleanupThread([this]() {
-                    sleep(deferredMultifileCleanupDelaySeconds);
-                    std::lock_guard<std::mutex> lock(mMutex);
-                    // Check the size of cache and remove entries to stay under limit
-                    checkMultifileCacheSize(mCacheByteLimit);
-                    mMultifileCleanupPending = false;
-                });
-                deferredMultifileCleanupThread.detach();
-            }
+            MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+            mbc->set(key, keySize, value, valueSize);
         } else {
             BlobCache* bc = getBlobCacheLocked();
             bc->set(key, keySize, value, valueSize);
@@ -172,7 +178,7 @@
             if (!mSavePending) {
                 mSavePending = true;
                 std::thread deferredSaveThread([this]() {
-                    sleep(deferredSaveDelay);
+                    sleep(kDeferredMonolithicSaveDelay);
                     std::lock_guard<std::mutex> lock(mMutex);
                     if (mInitialized && mBlobCache) {
                         mBlobCache->writeToFile();
@@ -196,15 +202,21 @@
 
     if (mInitialized) {
         if (mMultifileMode) {
-            return getBlobMultifile(key, keySize, value, valueSize, mFilename);
+            MultifileBlobCache* mbc = getMultifileBlobCacheLocked();
+            return mbc->get(key, keySize, value, valueSize);
         } else {
             BlobCache* bc = getBlobCacheLocked();
             return bc->get(key, keySize, value, valueSize);
         }
     }
+
     return 0;
 }
 
+void egl_cache_t::setCacheMode(EGLCacheMode cacheMode) {
+    mMultifileMode = (cacheMode == EGLCacheMode::Multifile);
+}
+
 void egl_cache_t::setCacheFilename(const char* filename) {
     std::lock_guard<std::mutex> lock(mMutex);
     mFilename = filename;
@@ -216,7 +228,7 @@
     if (!mMultifileMode) {
         // If we're not in multifile mode, ensure the cache limit is only being lowered,
         // not increasing above the hard coded platform limit
-        if (cacheByteLimit > maxTotalSize) {
+        if (cacheByteLimit > kMaxMonolithicTotalSize) {
             return;
         }
     }
@@ -226,8 +238,8 @@
 
 size_t egl_cache_t::getCacheSize() {
     std::lock_guard<std::mutex> lock(mMutex);
-    if (mMultifileMode) {
-        return getMultifileCacheSize();
+    if (mMultifileBlobCache) {
+        return mMultifileBlobCache->getTotalSize();
     }
     if (mBlobCache) {
         return mBlobCache->getSize();
@@ -237,9 +249,18 @@
 
 BlobCache* egl_cache_t::getBlobCacheLocked() {
     if (mBlobCache == nullptr) {
-        mBlobCache.reset(new FileBlobCache(maxKeySize, maxValueSize, mCacheByteLimit, mFilename));
+        mBlobCache.reset(new FileBlobCache(kMaxMonolithicKeySize, kMaxMonolithicValueSize,
+                                           mCacheByteLimit, mFilename));
     }
     return mBlobCache.get();
 }
 
+MultifileBlobCache* egl_cache_t::getMultifileBlobCacheLocked() {
+    if (mMultifileBlobCache == nullptr) {
+        mMultifileBlobCache.reset(
+                new MultifileBlobCache(mCacheByteLimit, kMultifileHotCacheLimit, mFilename));
+    }
+    return mMultifileBlobCache.get();
+}
+
 }; // namespace android
diff --git a/opengl/libs/EGL/egl_cache.h b/opengl/libs/EGL/egl_cache.h
index 2dcd803..1399368 100644
--- a/opengl/libs/EGL/egl_cache.h
+++ b/opengl/libs/EGL/egl_cache.h
@@ -25,6 +25,7 @@
 #include <string>
 
 #include "FileBlobCache.h"
+#include "MultifileBlobCache.h"
 
 namespace android {
 
@@ -32,6 +33,11 @@
 
 class EGLAPI egl_cache_t {
 public:
+    enum class EGLCacheMode {
+        Monolithic,
+        Multifile,
+    };
+
     // get returns a pointer to the singleton egl_cache_t object.  This
     // singleton object will never be destroyed.
     static egl_cache_t* get();
@@ -64,6 +70,9 @@
     // cache contents from one program invocation to another.
     void setCacheFilename(const char* filename);
 
+    // Allow setting monolithic or multifile modes
+    void setCacheMode(EGLCacheMode cacheMode);
+
     // Allow the fixed cache limit to be overridden
     void setCacheLimit(int64_t cacheByteLimit);
 
@@ -85,6 +94,9 @@
     // possible.
     BlobCache* getBlobCacheLocked();
 
+    // Get or create the multifile blobcache
+    MultifileBlobCache* getMultifileBlobCacheLocked();
+
     // mInitialized indicates whether the egl_cache_t is in the initialized
     // state.  It is initialized to false at construction time, and gets set to
     // true when initialize is called.  It is set back to false when terminate
@@ -98,6 +110,9 @@
     // first time it's needed.
     std::unique_ptr<FileBlobCache> mBlobCache;
 
+    // The multifile version of blobcache allowing larger contents to be stored
+    std::unique_ptr<MultifileBlobCache> mMultifileBlobCache;
+
     // mFilename is the name of the file for storing cache contents in between
     // program invocations.  It is initialized to an empty string at
     // construction time, and can be set with the setCacheFilename method.  An
@@ -123,11 +138,7 @@
     bool mMultifileMode;
 
     // Cache limit
-    int64_t mCacheByteLimit;
-
-    // Whether we've kicked off a side thread that will check the multifile
-    // cache size and remove entries if needed.
-    bool mMultifileCleanupPending;
+    size_t mCacheByteLimit;
 };
 
 }; // namespace android
diff --git a/opengl/libs/EGL/egl_cache_multifile.cpp b/opengl/libs/EGL/egl_cache_multifile.cpp
deleted file mode 100644
index 48e557f..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- **     http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-// #define LOG_NDEBUG 0
-
-#include "egl_cache_multifile.h"
-
-#include <android-base/properties.h>
-#include <dirent.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <log/log.h>
-#include <stdio.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <utime.h>
-
-#include <algorithm>
-#include <chrono>
-#include <fstream>
-#include <limits>
-#include <locale>
-#include <map>
-#include <sstream>
-#include <unordered_map>
-
-#include <utils/JenkinsHash.h>
-
-static std::string multifileDirName = "";
-
-using namespace std::literals;
-
-namespace {
-
-// Create a directory for tracking multiple files
-void setupMultifile(const std::string& baseDir) {
-    // If we've already set up the multifile dir in this base directory, we're done
-    if (!multifileDirName.empty() && multifileDirName.find(baseDir) != std::string::npos) {
-        return;
-    }
-
-    // Otherwise, create it
-    multifileDirName = baseDir + ".multifile";
-    if (mkdir(multifileDirName.c_str(), 0755) != 0 && (errno != EEXIST)) {
-        ALOGW("Unable to create directory (%s), errno (%i)", multifileDirName.c_str(), errno);
-    }
-}
-
-// Create a filename that is based on the hash of the key
-std::string getCacheEntryFilename(const void* key, EGLsizeiANDROID keySize,
-                                  const std::string& baseDir) {
-    // Hash the key into a string
-    std::stringstream keyName;
-    keyName << android::JenkinsHashMixBytes(0, static_cast<const uint8_t*>(key), keySize);
-
-    // Build a filename using dir and hash
-    return baseDir + "/" + keyName.str();
-}
-
-// Determine file age based on stat modification time
-// Newer files have a higher age (time since epoch)
-time_t getFileAge(const std::string& filePath) {
-    struct stat st;
-    if (stat(filePath.c_str(), &st) == 0) {
-        ALOGD("getFileAge returning %" PRId64 " for file age", static_cast<uint64_t>(st.st_mtime));
-        return st.st_mtime;
-    } else {
-        ALOGW("Failed to stat %s", filePath.c_str());
-        return 0;
-    }
-}
-
-size_t getFileSize(const std::string& filePath) {
-    struct stat st;
-    if (stat(filePath.c_str(), &st) != 0) {
-        ALOGE("Unable to stat %s", filePath.c_str());
-        return 0;
-    }
-    return st.st_size;
-}
-
-// Walk through directory entries and track age and size
-// Then iterate through the entries, oldest first, and remove them until under the limit.
-// This will need to be updated if we move to a multilevel cache dir.
-bool applyLRU(size_t cacheLimit) {
-    // Build a multimap of files indexed by age.
-    // They will be automatically sorted smallest (oldest) to largest (newest)
-    std::multimap<time_t, std::string> agesToFiles;
-
-    // Map files to sizes
-    std::unordered_map<std::string, size_t> filesToSizes;
-
-    size_t totalCacheSize = 0;
-
-    DIR* dir;
-    struct dirent* entry;
-    if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
-        while ((entry = readdir(dir)) != nullptr) {
-            if (entry->d_name == "."s || entry->d_name == ".."s) {
-                continue;
-            }
-
-            // Look up each file age
-            std::string fullPath = multifileDirName + "/" + entry->d_name;
-            time_t fileAge = getFileAge(fullPath);
-
-            // Track the files, sorted by age
-            agesToFiles.insert(std::make_pair(fileAge, fullPath));
-
-            // Also track the size so we know how much room we have freed
-            size_t fileSize = getFileSize(fullPath);
-            filesToSizes[fullPath] = fileSize;
-            totalCacheSize += fileSize;
-        }
-        closedir(dir);
-    } else {
-        ALOGE("Unable to open filename: %s", multifileDirName.c_str());
-        return false;
-    }
-
-    if (totalCacheSize <= cacheLimit) {
-        // If LRU was called on a sufficiently small cache, no need to remove anything
-        return true;
-    }
-
-    // Walk through the map of files until we're under the cache size
-    for (const auto& cacheEntryIter : agesToFiles) {
-        time_t entryAge = cacheEntryIter.first;
-        const std::string entryPath = cacheEntryIter.second;
-
-        ALOGD("Removing %s with age %ld", entryPath.c_str(), entryAge);
-        if (std::remove(entryPath.c_str()) != 0) {
-            ALOGE("Error removing %s: %s", entryPath.c_str(), std::strerror(errno));
-            return false;
-        }
-
-        totalCacheSize -= filesToSizes[entryPath];
-        if (totalCacheSize <= cacheLimit) {
-            // Success
-            ALOGV("Reduced cache to %zu", totalCacheSize);
-            return true;
-        } else {
-            ALOGD("Cache size is still too large (%zu), removing more files", totalCacheSize);
-        }
-    }
-
-    // Should never reach this return
-    return false;
-}
-
-} // namespace
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
-                      EGLsizeiANDROID valueSize, const std::string& baseDir) {
-    if (baseDir.empty()) {
-        return;
-    }
-
-    setupMultifile(baseDir);
-    std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
-    ALOGD("Attempting to open filename for set: %s", filename.c_str());
-    std::ofstream outfile(filename, std::ofstream::binary);
-    if (outfile.fail()) {
-        ALOGW("Unable to open filename: %s", filename.c_str());
-        return;
-    }
-
-    // First write the key
-    outfile.write(static_cast<const char*>(key), keySize);
-    if (outfile.bad()) {
-        ALOGW("Unable to write key to filename: %s", filename.c_str());
-        outfile.close();
-        return;
-    }
-    ALOGD("Wrote %i bytes to out file for key", static_cast<int>(outfile.tellp()));
-
-    // Then write the value
-    outfile.write(static_cast<const char*>(value), valueSize);
-    if (outfile.bad()) {
-        ALOGW("Unable to write value to filename: %s", filename.c_str());
-        outfile.close();
-        return;
-    }
-    ALOGD("Wrote %i bytes to out file for full entry", static_cast<int>(outfile.tellp()));
-
-    outfile.close();
-}
-
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
-                                 EGLsizeiANDROID valueSize, const std::string& baseDir) {
-    if (baseDir.empty()) {
-        return 0;
-    }
-
-    setupMultifile(baseDir);
-    std::string filename = getCacheEntryFilename(key, keySize, multifileDirName);
-
-    // Open the hashed filename path
-    ALOGD("Attempting to open filename for get: %s", filename.c_str());
-    int fd = open(filename.c_str(), O_RDONLY);
-
-    // File doesn't exist, this is a MISS, return zero bytes read
-    if (fd == -1) {
-        ALOGD("Cache MISS - failed to open filename: %s, error: %s", filename.c_str(),
-              std::strerror(errno));
-        return 0;
-    }
-
-    ALOGD("Cache HIT - opened filename: %s", filename.c_str());
-
-    // Get the size of the file
-    size_t entrySize = getFileSize(filename);
-    if (keySize > entrySize) {
-        ALOGW("keySize (%lu) is larger than entrySize (%zu). This is a hash collision or modified "
-              "file",
-              keySize, entrySize);
-        close(fd);
-        return 0;
-    }
-
-    // Memory map the file
-    uint8_t* cacheEntry =
-            reinterpret_cast<uint8_t*>(mmap(nullptr, entrySize, PROT_READ, MAP_PRIVATE, fd, 0));
-    if (cacheEntry == MAP_FAILED) {
-        ALOGE("Failed to mmap cacheEntry, error: %s", std::strerror(errno));
-        close(fd);
-        return 0;
-    }
-
-    // Compare the incoming key with our stored version (the beginning of the entry)
-    int compare = memcmp(cacheEntry, key, keySize);
-    if (compare != 0) {
-        ALOGW("Cached key and new key do not match! This is a hash collision or modified file");
-        munmap(cacheEntry, entrySize);
-        close(fd);
-        return 0;
-    }
-
-    // Keys matched, so remaining cache is value size
-    size_t cachedValueSize = entrySize - keySize;
-
-    // Return actual value size if valueSize is not large enough
-    if (cachedValueSize > valueSize) {
-        ALOGD("Skipping file read, not enough room provided (valueSize): %lu, "
-              "returning required space as %zu",
-              valueSize, cachedValueSize);
-        munmap(cacheEntry, entrySize);
-        close(fd);
-        return cachedValueSize;
-    }
-
-    // Remaining entry following the key is the value
-    uint8_t* cachedValue = cacheEntry + keySize;
-    memcpy(value, cachedValue, cachedValueSize);
-    munmap(cacheEntry, entrySize);
-    close(fd);
-
-    ALOGD("Read %zu bytes from %s", cachedValueSize, filename.c_str());
-    return cachedValueSize;
-}
-
-// Walk through the files in our flat directory, checking the size of each one.
-// Return the total size of normal files in the directory.
-// This will need to be updated if we move to a multilevel cache dir.
-size_t getMultifileCacheSize() {
-    if (multifileDirName.empty()) {
-        return 0;
-    }
-
-    DIR* dir;
-    struct dirent* entry;
-    size_t size = 0;
-
-    ALOGD("Using %s as the multifile cache dir ", multifileDirName.c_str());
-
-    if ((dir = opendir(multifileDirName.c_str())) != nullptr) {
-        while ((entry = readdir(dir)) != nullptr) {
-            if (entry->d_name == "."s || entry->d_name == ".."s) {
-                continue;
-            }
-
-            // Add up the size of all files in the dir
-            std::string fullPath = multifileDirName + "/" + entry->d_name;
-            size += getFileSize(fullPath);
-        }
-        closedir(dir);
-    } else {
-        ALOGW("Unable to open filename: %s", multifileDirName.c_str());
-        return 0;
-    }
-
-    return size;
-}
-
-// When removing files, what fraction of the overall limit should be reached when removing files
-// A divisor of two will decrease the cache to 50%, four to 25% and so on
-constexpr uint32_t kCacheLimitDivisor = 2;
-
-// Calculate the cache size and remove old entries until under the limit
-void checkMultifileCacheSize(size_t cacheByteLimit) {
-    // Start with the value provided by egl_cache
-    size_t limit = cacheByteLimit;
-
-    // Check for a debug value
-    int debugCacheSize = base::GetIntProperty("debug.egl.blobcache.bytelimit", -1);
-    if (debugCacheSize >= 0) {
-        ALOGV("Overriding cache limit %zu with %i from debug.egl.blobcache.bytelimit", limit,
-              debugCacheSize);
-        limit = debugCacheSize;
-    }
-
-    // Tally up the initial amount of cache in use
-    size_t size = getMultifileCacheSize();
-    ALOGD("Multifile cache dir size: %zu", size);
-
-    // If size is larger than the threshold, remove files using LRU
-    if (size > limit) {
-        ALOGV("Multifile cache size is larger than %zu, removing old entries", cacheByteLimit);
-        if (!applyLRU(limit / kCacheLimitDivisor)) {
-            ALOGE("Error when clearing multifile shader cache");
-            return;
-        }
-    }
-    ALOGD("Multifile cache size after reduction: %zu", getMultifileCacheSize());
-}
-
-}; // namespace android
\ No newline at end of file
diff --git a/opengl/libs/EGL/egl_cache_multifile.h b/opengl/libs/EGL/egl_cache_multifile.h
deleted file mode 100644
index ee5fe81..0000000
--- a/opengl/libs/EGL/egl_cache_multifile.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- ** Copyright 2022, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- **     http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-#ifndef ANDROID_EGL_CACHE_MULTIFILE_H
-#define ANDROID_EGL_CACHE_MULTIFILE_H
-
-#include <EGL/egl.h>
-#include <EGL/eglext.h>
-
-#include <string>
-
-namespace android {
-
-void setBlobMultifile(const void* key, EGLsizeiANDROID keySize, const void* value,
-                      EGLsizeiANDROID valueSize, const std::string& baseDir);
-EGLsizeiANDROID getBlobMultifile(const void* key, EGLsizeiANDROID keySize, void* value,
-                                 EGLsizeiANDROID valueSize, const std::string& baseDir);
-size_t getMultifileCacheSize();
-void checkMultifileCacheSize(size_t cacheByteLimit);
-
-}; // namespace android
-
-#endif // ANDROID_EGL_CACHE_MULTIFILE_H
diff --git a/opengl/tests/EGLTest/egl_cache_test.cpp b/opengl/tests/EGLTest/egl_cache_test.cpp
index 265bec4..32e408c 100644
--- a/opengl/tests/EGLTest/egl_cache_test.cpp
+++ b/opengl/tests/EGLTest/egl_cache_test.cpp
@@ -24,7 +24,7 @@
 #include <android-base/test_utils.h>
 
 #include "egl_cache.h"
-#include "egl_cache_multifile.h"
+#include "MultifileBlobCache.h"
 #include "egl_display.h"
 
 #include <memory>
@@ -33,12 +33,16 @@
 
 namespace android {
 
-class EGLCacheTest : public ::testing::Test {
+class EGLCacheTest : public ::testing::TestWithParam<egl_cache_t::EGLCacheMode> {
 protected:
     virtual void SetUp() {
-        mCache = egl_cache_t::get();
+        // Terminate to clean up any previous cache in this process
+        mCache->terminate();
+
         mTempFile.reset(new TemporaryFile());
         mCache->setCacheFilename(&mTempFile->path[0]);
+        mCache->setCacheLimit(1024);
+        mCache->setCacheMode(mCacheMode);
     }
 
     virtual void TearDown() {
@@ -49,11 +53,12 @@
 
     std::string getCachefileName();
 
-    egl_cache_t* mCache;
+    egl_cache_t* mCache = egl_cache_t::get();
     std::unique_ptr<TemporaryFile> mTempFile;
+    egl_cache_t::EGLCacheMode mCacheMode = GetParam();
 };
 
-TEST_F(EGLCacheTest, UninitializedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, UninitializedCacheAlwaysMisses) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->setBlob("abcd", 4, "efgh", 4);
     ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf, 4));
@@ -63,7 +68,7 @@
     ASSERT_EQ(0xee, buf[3]);
 }
 
-TEST_F(EGLCacheTest, InitializedCacheAlwaysHits) {
+TEST_P(EGLCacheTest, InitializedCacheAlwaysHits) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     mCache->setBlob("abcd", 4, "efgh", 4);
@@ -74,7 +79,7 @@
     ASSERT_EQ('h', buf[3]);
 }
 
-TEST_F(EGLCacheTest, TerminatedCacheAlwaysMisses) {
+TEST_P(EGLCacheTest, TerminatedCacheAlwaysMisses) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     mCache->setBlob("abcd", 4, "efgh", 4);
@@ -86,7 +91,7 @@
     ASSERT_EQ(0xee, buf[3]);
 }
 
-TEST_F(EGLCacheTest, ReinitializedCacheContainsValues) {
+TEST_P(EGLCacheTest, ReinitializedCacheContainsValues) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     mCache->setBlob("abcd", 4, "efgh", 4);
@@ -101,12 +106,12 @@
 
 std::string EGLCacheTest::getCachefileName() {
     // Return the monolithic filename unless we find the multifile dir
-    std::string cachefileName = &mTempFile->path[0];
-    std::string multifileDirName = cachefileName + ".multifile";
+    std::string cachePath = &mTempFile->path[0];
+    std::string multifileDirName = cachePath + ".multifile";
+    std::string cachefileName = "";
 
     struct stat info;
     if (stat(multifileDirName.c_str(), &info) == 0) {
-
         // Ensure we only have one file to manage
         int realFileCount = 0;
 
@@ -121,6 +126,8 @@
                 cachefileName = multifileDirName + "/" + entry->d_name;
                 realFileCount++;
             }
+        } else {
+            printf("Unable to open %s, error: %s\n", multifileDirName.c_str(), std::strerror(errno));
         }
 
         if (realFileCount != 1) {
@@ -128,14 +135,18 @@
             // violates test assumptions
             cachefileName = "";
         }
+    } else {
+        printf("Unable to stat %s, error: %s\n", multifileDirName.c_str(), std::strerror(errno));
     }
 
     return cachefileName;
 }
 
-TEST_F(EGLCacheTest, ModifiedCacheMisses) {
-    // Turn this back on if multifile becomes the default
-    GTEST_SKIP() << "Skipping test designed for multifile, see b/263574392 and b/246966894";
+TEST_P(EGLCacheTest, ModifiedCacheMisses) {
+    // Skip if not in multifile mode
+    if (mCacheMode == egl_cache_t::EGLCacheMode::Monolithic) {
+        GTEST_SKIP() << "Skipping test designed for multifile";
+    }
 
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
@@ -147,13 +158,13 @@
     ASSERT_EQ('g', buf[2]);
     ASSERT_EQ('h', buf[3]);
 
+    // Ensure the cache file is written to disk
+    mCache->terminate();
+
     // Depending on the cache mode, the file will be in different locations
     std::string cachefileName = getCachefileName();
     ASSERT_TRUE(cachefileName.length() > 0);
 
-    // Ensure the cache file is written to disk
-    mCache->terminate();
-
     // Stomp on the beginning of the cache file, breaking the key match
     const long stomp = 0xbadf00d;
     FILE *file = fopen(cachefileName.c_str(), "w");
@@ -164,14 +175,15 @@
     // Ensure no cache hit
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
     uint8_t buf2[4] = { 0xee, 0xee, 0xee, 0xee };
-    ASSERT_EQ(0, mCache->getBlob("abcd", 4, buf2, 4));
+    // getBlob may return junk for required size, but should not return a cache hit
+    mCache->getBlob("abcd", 4, buf2, 4);
     ASSERT_EQ(0xee, buf2[0]);
     ASSERT_EQ(0xee, buf2[1]);
     ASSERT_EQ(0xee, buf2[2]);
     ASSERT_EQ(0xee, buf2[3]);
 }
 
-TEST_F(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
+TEST_P(EGLCacheTest, TerminatedCacheBelowCacheLimit) {
     uint8_t buf[4] = { 0xee, 0xee, 0xee, 0xee };
     mCache->initialize(egl_display_t::get(EGL_DEFAULT_DISPLAY));
 
@@ -204,4 +216,6 @@
     ASSERT_LE(mCache->getCacheSize(), 4);
 }
 
+INSTANTIATE_TEST_CASE_P(MonolithicCacheTests, EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Monolithic));
+INSTANTIATE_TEST_CASE_P(MultifileCacheTests, EGLCacheTest, ::testing::Values(egl_cache_t::EGLCacheMode::Multifile));
 }
diff --git a/services/inputflinger/TEST_MAPPING b/services/inputflinger/TEST_MAPPING
index cacf30b..2a3dba7 100644
--- a/services/inputflinger/TEST_MAPPING
+++ b/services/inputflinger/TEST_MAPPING
@@ -42,9 +42,11 @@
       "options": [
         {
           "include-filter": "android.view.cts.input",
+          "include-filter": "android.view.cts.HoverTest",
           "include-filter": "android.view.cts.MotionEventTest",
           "include-filter": "android.view.cts.PointerCaptureTest",
           "include-filter": "android.view.cts.TooltipTest",
+          "include-filter": "android.view.cts.VelocityTrackerTest",
           "include-filter": "android.view.cts.VerifyInputEventTest"
         }
       ]
@@ -131,9 +133,12 @@
       "name": "CtsViewTestCases",
       "options": [
         {
+          "include-filter": "android.view.cts.input",
+          "include-filter": "android.view.cts.HoverTest",
           "include-filter": "android.view.cts.MotionEventTest",
           "include-filter": "android.view.cts.PointerCaptureTest",
           "include-filter": "android.view.cts.TooltipTest",
+          "include-filter": "android.view.cts.VelocityTrackerTest",
           "include-filter": "android.view.cts.VerifyInputEventTest"
         }
       ]
diff --git a/services/inputflinger/include/InputReaderBase.h b/services/inputflinger/include/InputReaderBase.h
index 2173117..841c914 100644
--- a/services/inputflinger/include/InputReaderBase.h
+++ b/services/inputflinger/include/InputReaderBase.h
@@ -333,6 +333,9 @@
     // stylus button state changes are reported through motion events.
     bool stylusButtonMotionEventsEnabled;
 
+    // True if a pointer icon should be shown for direct stylus pointers.
+    bool stylusPointerIconEnabled;
+
     InputReaderConfiguration()
           : virtualKeyQuietTime(0),
             pointerVelocityControlParameters(1.0f, 500.0f, 3000.0f,
@@ -358,7 +361,8 @@
             touchpadNaturalScrollingEnabled(true),
             touchpadTapToClickEnabled(true),
             touchpadRightClickZoneEnabled(false),
-            stylusButtonMotionEventsEnabled(true) {}
+            stylusButtonMotionEventsEnabled(true),
+            stylusPointerIconEnabled(false) {}
 
     static std::string changesToString(uint32_t changes);
 
diff --git a/services/inputflinger/include/PointerControllerInterface.h b/services/inputflinger/include/PointerControllerInterface.h
index 7e0c1c7..9dbdd5a 100644
--- a/services/inputflinger/include/PointerControllerInterface.h
+++ b/services/inputflinger/include/PointerControllerInterface.h
@@ -79,8 +79,10 @@
         POINTER,
         // Show spots and a spot anchor in place of the mouse pointer.
         SPOT,
+        // Show the stylus hover pointer.
+        STYLUS_HOVER,
 
-        ftl_last = SPOT,
+        ftl_last = STYLUS_HOVER,
     };
 
     /* Sets the mode of the pointer controller. */
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.cpp b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
index d415854..b789156 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.cpp
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
@@ -977,12 +977,18 @@
         mOrientedRanges.clear();
     }
 
-    // Create pointer controller if needed, and keep it around if Pointer Capture is enabled to
-    // preserve the cursor position.
-    if (mDeviceMode == DeviceMode::POINTER ||
-        (mDeviceMode == DeviceMode::DIRECT && mConfig.showTouches) ||
-        (mParameters.deviceType == Parameters::DeviceType::POINTER &&
-         mConfig.pointerCaptureRequest.enable)) {
+    // Create and preserve the pointer controller in the following cases:
+    const bool isPointerControllerNeeded =
+            // - when the device is in pointer mode, to show the mouse cursor;
+            (mDeviceMode == DeviceMode::POINTER) ||
+            // - when pointer capture is enabled, to preserve the mouse cursor position;
+            (mParameters.deviceType == Parameters::DeviceType::POINTER &&
+             mConfig.pointerCaptureRequest.enable) ||
+            // - when we should be showing touches;
+            (mDeviceMode == DeviceMode::DIRECT && mConfig.showTouches) ||
+            // - when we should be showing a pointer icon for direct styluses.
+            (mDeviceMode == DeviceMode::DIRECT && mConfig.stylusPointerIconEnabled && hasStylus());
+    if (isPointerControllerNeeded) {
         if (mPointerController == nullptr) {
             mPointerController = getContext()->getPointerController(getDeviceId());
         }
@@ -3650,6 +3656,14 @@
     return out;
 }
 
+static bool isStylusEvent(uint32_t source, int32_t action, const PointerProperties* properties) {
+    if (!isFromSource(source, AINPUT_SOURCE_STYLUS)) {
+        return false;
+    }
+    const auto actionIndex = action >> AMOTION_EVENT_ACTION_POINTER_INDEX_SHIFT;
+    return isStylusToolType(properties[actionIndex].toolType);
+}
+
 NotifyMotionArgs TouchInputMapper::dispatchMotion(
         nsecs_t when, nsecs_t readTime, uint32_t policyFlags, uint32_t source, int32_t action,
         int32_t actionButton, int32_t flags, int32_t metaState, int32_t buttonState,
@@ -3691,12 +3705,35 @@
             ALOG_ASSERT(false);
         }
     }
+
+    const int32_t displayId = getAssociatedDisplayId().value_or(ADISPLAY_ID_NONE);
+    const bool showDirectStylusPointer = mConfig.stylusPointerIconEnabled &&
+            mDeviceMode == DeviceMode::DIRECT && isStylusEvent(source, action, pointerProperties) &&
+            mPointerController && displayId != ADISPLAY_ID_NONE &&
+            displayId == mPointerController->getDisplayId();
+    if (showDirectStylusPointer) {
+        switch (action & AMOTION_EVENT_ACTION_MASK) {
+            case AMOTION_EVENT_ACTION_HOVER_ENTER:
+            case AMOTION_EVENT_ACTION_HOVER_MOVE:
+                mPointerController->setPresentation(
+                        PointerControllerInterface::Presentation::STYLUS_HOVER);
+                mPointerController
+                        ->setPosition(mCurrentCookedState.cookedPointerData.pointerCoords[0].getX(),
+                                      mCurrentCookedState.cookedPointerData.pointerCoords[0]
+                                              .getY());
+                mPointerController->unfade(PointerControllerInterface::Transition::IMMEDIATE);
+                break;
+            case AMOTION_EVENT_ACTION_HOVER_EXIT:
+                mPointerController->fade(PointerControllerInterface::Transition::IMMEDIATE);
+                break;
+        }
+    }
+
     float xCursorPosition = AMOTION_EVENT_INVALID_CURSOR_POSITION;
     float yCursorPosition = AMOTION_EVENT_INVALID_CURSOR_POSITION;
     if (mDeviceMode == DeviceMode::POINTER) {
         mPointerController->getPosition(&xCursorPosition, &yCursorPosition);
     }
-    const int32_t displayId = getAssociatedDisplayId().value_or(ADISPLAY_ID_NONE);
     const int32_t deviceId = getDeviceId();
     std::vector<TouchVideoFrame> frames = getDeviceContext().getVideoFrames();
     std::for_each(frames.begin(), frames.end(),
diff --git a/services/inputflinger/tests/FakeInputReaderPolicy.cpp b/services/inputflinger/tests/FakeInputReaderPolicy.cpp
index bb8a30e..30c1719 100644
--- a/services/inputflinger/tests/FakeInputReaderPolicy.cpp
+++ b/services/inputflinger/tests/FakeInputReaderPolicy.cpp
@@ -205,6 +205,10 @@
     mConfig.stylusButtonMotionEventsEnabled = enabled;
 }
 
+void FakeInputReaderPolicy::setStylusPointerIconEnabled(bool enabled) {
+    mConfig.stylusPointerIconEnabled = enabled;
+}
+
 void FakeInputReaderPolicy::getReaderConfiguration(InputReaderConfiguration* outConfig) {
     *outConfig = mConfig;
 }
diff --git a/services/inputflinger/tests/FakeInputReaderPolicy.h b/services/inputflinger/tests/FakeInputReaderPolicy.h
index 9ec3217..28ac505 100644
--- a/services/inputflinger/tests/FakeInputReaderPolicy.h
+++ b/services/inputflinger/tests/FakeInputReaderPolicy.h
@@ -76,6 +76,7 @@
     float getPointerGestureZoomSpeedRatio();
     void setVelocityControlParams(const VelocityControlParameters& params);
     void setStylusButtonMotionEventsEnabled(bool enabled);
+    void setStylusPointerIconEnabled(bool enabled);
 
 private:
     void getReaderConfiguration(InputReaderConfiguration* outConfig) override;
diff --git a/services/inputflinger/tests/FakePointerController.cpp b/services/inputflinger/tests/FakePointerController.cpp
index ab7879f..28dad95 100644
--- a/services/inputflinger/tests/FakePointerController.cpp
+++ b/services/inputflinger/tests/FakePointerController.cpp
@@ -65,6 +65,10 @@
     ASSERT_NEAR(y, actualY, 1);
 }
 
+bool FakePointerController::isPointerShown() {
+    return mIsPointerShown;
+}
+
 bool FakePointerController::getBounds(float* outMinX, float* outMinY, float* outMaxX,
                                       float* outMaxY) const {
     *outMinX = mMinX;
@@ -83,6 +87,13 @@
     if (mY > mMaxY) mY = mMaxY;
 }
 
+void FakePointerController::fade(Transition) {
+    mIsPointerShown = false;
+}
+void FakePointerController::unfade(Transition) {
+    mIsPointerShown = true;
+}
+
 void FakePointerController::setSpots(const PointerCoords*, const uint32_t*, BitSet32 spotIdBits,
                                      int32_t displayId) {
     std::vector<int32_t> newSpots;
diff --git a/services/inputflinger/tests/FakePointerController.h b/services/inputflinger/tests/FakePointerController.h
index d10cbcd..dd56e65 100644
--- a/services/inputflinger/tests/FakePointerController.h
+++ b/services/inputflinger/tests/FakePointerController.h
@@ -39,12 +39,13 @@
     void setDisplayViewport(const DisplayViewport& viewport) override;
 
     void assertPosition(float x, float y);
+    bool isPointerShown();
 
 private:
     bool getBounds(float* outMinX, float* outMinY, float* outMaxX, float* outMaxY) const override;
     void move(float deltaX, float deltaY) override;
-    void fade(Transition) override {}
-    void unfade(Transition) override {}
+    void fade(Transition) override;
+    void unfade(Transition) override;
     void setPresentation(Presentation) override {}
     void setSpots(const PointerCoords*, const uint32_t*, BitSet32 spotIdBits,
                   int32_t displayId) override;
@@ -55,6 +56,7 @@
     float mX{0}, mY{0};
     int32_t mButtonState{0};
     int32_t mDisplayId{ADISPLAY_ID_DEFAULT};
+    bool mIsPointerShown{false};
 
     std::map<int32_t, std::vector<int32_t>> mSpotsByDisplay;
 };
diff --git a/services/inputflinger/tests/InputReader_test.cpp b/services/inputflinger/tests/InputReader_test.cpp
index e1c54e9..1b04375 100644
--- a/services/inputflinger/tests/InputReader_test.cpp
+++ b/services/inputflinger/tests/InputReader_test.cpp
@@ -6678,6 +6678,52 @@
     ASSERT_EQ(AINPUT_SOURCE_TOUCH_NAVIGATION, mapper.getSources());
 }
 
+TEST_F(SingleTouchInputMapperTest, Process_WhenConfigEnabled_ShouldShowDirectStylusPointer) {
+    std::shared_ptr<FakePointerController> fakePointerController =
+            std::make_shared<FakePointerController>();
+    addConfigurationProperty("touch.deviceType", "touchScreen");
+    prepareDisplay(ui::ROTATION_0);
+    prepareButtons();
+    prepareAxes(POSITION);
+    mFakeEventHub->addKey(EVENTHUB_ID, BTN_TOOL_PEN, 0, AKEYCODE_UNKNOWN, 0);
+    mFakePolicy->setPointerController(fakePointerController);
+    mFakePolicy->setStylusPointerIconEnabled(true);
+    SingleTouchInputMapper& mapper = addMapperAndConfigure<SingleTouchInputMapper>();
+
+    processKey(mapper, BTN_TOOL_PEN, 1);
+    processMove(mapper, 100, 200);
+    processSync(mapper);
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(
+            AllOf(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER),
+                  WithToolType(AMOTION_EVENT_TOOL_TYPE_STYLUS),
+                  WithPointerCoords(0, toDisplayX(100), toDisplayY(200)))));
+    ASSERT_TRUE(fakePointerController->isPointerShown());
+    ASSERT_NO_FATAL_FAILURE(
+            fakePointerController->assertPosition(toDisplayX(100), toDisplayY(200)));
+}
+
+TEST_F(SingleTouchInputMapperTest, Process_WhenConfigDisabled_ShouldNotShowDirectStylusPointer) {
+    std::shared_ptr<FakePointerController> fakePointerController =
+            std::make_shared<FakePointerController>();
+    addConfigurationProperty("touch.deviceType", "touchScreen");
+    prepareDisplay(ui::ROTATION_0);
+    prepareButtons();
+    prepareAxes(POSITION);
+    mFakeEventHub->addKey(EVENTHUB_ID, BTN_TOOL_PEN, 0, AKEYCODE_UNKNOWN, 0);
+    mFakePolicy->setPointerController(fakePointerController);
+    mFakePolicy->setStylusPointerIconEnabled(false);
+    SingleTouchInputMapper& mapper = addMapperAndConfigure<SingleTouchInputMapper>();
+
+    processKey(mapper, BTN_TOOL_PEN, 1);
+    processMove(mapper, 100, 200);
+    processSync(mapper);
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(
+            AllOf(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER),
+                  WithToolType(AMOTION_EVENT_TOOL_TYPE_STYLUS),
+                  WithPointerCoords(0, toDisplayX(100), toDisplayY(200)))));
+    ASSERT_FALSE(fakePointerController->isPointerShown());
+}
+
 // --- TouchDisplayProjectionTest ---
 
 class TouchDisplayProjectionTest : public SingleTouchInputMapperTest {
@@ -9757,6 +9803,58 @@
                   WithToolType(AMOTION_EVENT_TOOL_TYPE_STYLUS))));
 }
 
+TEST_F(MultiTouchInputMapperTest, Process_WhenConfigEnabled_ShouldShowDirectStylusPointer) {
+    addConfigurationProperty("touch.deviceType", "touchScreen");
+    prepareDisplay(ui::ROTATION_0);
+    prepareAxes(POSITION | ID | SLOT | TOOL_TYPE | PRESSURE);
+    // Add BTN_TOOL_PEN to statically show stylus support, since using ABS_MT_TOOL_TYPE can only
+    // indicate stylus presence dynamically.
+    mFakeEventHub->addKey(EVENTHUB_ID, BTN_TOOL_PEN, 0, AKEYCODE_UNKNOWN, 0);
+    std::shared_ptr<FakePointerController> fakePointerController =
+            std::make_shared<FakePointerController>();
+    mFakePolicy->setPointerController(fakePointerController);
+    mFakePolicy->setStylusPointerIconEnabled(true);
+    MultiTouchInputMapper& mapper = addMapperAndConfigure<MultiTouchInputMapper>();
+
+    processId(mapper, FIRST_TRACKING_ID);
+    processPressure(mapper, RAW_PRESSURE_MIN);
+    processPosition(mapper, 100, 200);
+    processToolType(mapper, MT_TOOL_PEN);
+    processSync(mapper);
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(
+            AllOf(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER),
+                  WithToolType(AMOTION_EVENT_TOOL_TYPE_STYLUS),
+                  WithPointerCoords(0, toDisplayX(100), toDisplayY(200)))));
+    ASSERT_TRUE(fakePointerController->isPointerShown());
+    ASSERT_NO_FATAL_FAILURE(
+            fakePointerController->assertPosition(toDisplayX(100), toDisplayY(200)));
+}
+
+TEST_F(MultiTouchInputMapperTest, Process_WhenConfigDisabled_ShouldNotShowDirectStylusPointer) {
+    addConfigurationProperty("touch.deviceType", "touchScreen");
+    prepareDisplay(ui::ROTATION_0);
+    prepareAxes(POSITION | ID | SLOT | TOOL_TYPE | PRESSURE);
+    // Add BTN_TOOL_PEN to statically show stylus support, since using ABS_MT_TOOL_TYPE can only
+    // indicate stylus presence dynamically.
+    mFakeEventHub->addKey(EVENTHUB_ID, BTN_TOOL_PEN, 0, AKEYCODE_UNKNOWN, 0);
+    std::shared_ptr<FakePointerController> fakePointerController =
+            std::make_shared<FakePointerController>();
+    mFakePolicy->setPointerController(fakePointerController);
+    mFakePolicy->setStylusPointerIconEnabled(false);
+    MultiTouchInputMapper& mapper = addMapperAndConfigure<MultiTouchInputMapper>();
+
+    processId(mapper, FIRST_TRACKING_ID);
+    processPressure(mapper, RAW_PRESSURE_MIN);
+    processPosition(mapper, 100, 200);
+    processToolType(mapper, MT_TOOL_PEN);
+    processSync(mapper);
+    ASSERT_NO_FATAL_FAILURE(mFakeListener->assertNotifyMotionWasCalled(
+            AllOf(WithMotionAction(AMOTION_EVENT_ACTION_HOVER_ENTER),
+                  WithToolType(AMOTION_EVENT_TOOL_TYPE_STYLUS),
+                  WithPointerCoords(0, toDisplayX(100), toDisplayY(200)))));
+    ASSERT_FALSE(fakePointerController->isPointerShown());
+}
+
 // --- MultiTouchInputMapperTest_ExternalDevice ---
 
 class MultiTouchInputMapperTest_ExternalDevice : public MultiTouchInputMapperTest {
diff --git a/services/stats/.clang-format b/services/stats/.clang-format
new file mode 100644
index 0000000..cead3a0
--- /dev/null
+++ b/services/stats/.clang-format
@@ -0,0 +1,17 @@
+BasedOnStyle: Google
+AllowShortIfStatementsOnASingleLine: true
+AllowShortFunctionsOnASingleLine: false
+AllowShortLoopsOnASingleLine: true
+BinPackArguments: true
+BinPackParameters: true
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+ContinuationIndentWidth: 8
+DerivePointerAlignment: false
+IndentWidth: 4
+PointerAlignment: Left
+TabWidth: 4
+AccessModifierOffset: -4
+IncludeCategories:
+  - Regex:    '^"Log\.h"'
+    Priority:    -1
diff --git a/services/stats/StatsAidl.cpp b/services/stats/StatsAidl.cpp
index 410a5af..1348548 100644
--- a/services/stats/StatsAidl.cpp
+++ b/services/stats/StatsAidl.cpp
@@ -14,32 +14,33 @@
  * limitations under the License.
  */
 
-#define DEBUG false // STOPSHIP if true
+#define DEBUG false  // STOPSHIP if true
 #define LOG_TAG "StatsAidl"
 
+#include "StatsAidl.h"
+
 #include <log/log.h>
 #include <statslog.h>
 
-#include "StatsAidl.h"
-
 namespace aidl {
 namespace android {
 namespace frameworks {
 namespace stats {
 
-StatsHal::StatsHal() {}
+StatsHal::StatsHal() {
+}
 
 ndk::ScopedAStatus StatsHal::reportVendorAtom(const VendorAtom& vendorAtom) {
     if (vendorAtom.atomId < 100000 || vendorAtom.atomId >= 200000) {
-        ALOGE("Atom ID %ld is not a valid vendor atom ID", (long) vendorAtom.atomId);
+        ALOGE("Atom ID %ld is not a valid vendor atom ID", (long)vendorAtom.atomId);
         return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
-            -1, "Not a valid vendor atom ID");
+                -1, "Not a valid vendor atom ID");
     }
     if (vendorAtom.reverseDomainName.length() > 50) {
         ALOGE("Vendor atom reverse domain name %s is too long.",
-            vendorAtom.reverseDomainName.c_str());
+              vendorAtom.reverseDomainName.c_str());
         return ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
-            -1, "Vendor atom reverse domain name is too long");
+                -1, "Vendor atom reverse domain name is too long");
     }
     AStatsEvent* event = AStatsEvent_obtain();
     AStatsEvent_setAtomId(event, vendorAtom.atomId);
@@ -47,24 +48,20 @@
     for (const auto& atomValue : vendorAtom.values) {
         switch (atomValue.getTag()) {
             case VendorAtomValue::intValue:
-                AStatsEvent_writeInt32(event,
-                    atomValue.get<VendorAtomValue::intValue>());
+                AStatsEvent_writeInt32(event, atomValue.get<VendorAtomValue::intValue>());
                 break;
             case VendorAtomValue::longValue:
-                AStatsEvent_writeInt64(event,
-                    atomValue.get<VendorAtomValue::longValue>());
+                AStatsEvent_writeInt64(event, atomValue.get<VendorAtomValue::longValue>());
                 break;
             case VendorAtomValue::floatValue:
-                AStatsEvent_writeFloat(event,
-                    atomValue.get<VendorAtomValue::floatValue>());
+                AStatsEvent_writeFloat(event, atomValue.get<VendorAtomValue::floatValue>());
                 break;
             case VendorAtomValue::stringValue:
                 AStatsEvent_writeString(event,
-                    atomValue.get<VendorAtomValue::stringValue>().c_str());
+                                        atomValue.get<VendorAtomValue::stringValue>().c_str());
                 break;
             case VendorAtomValue::boolValue:
-                AStatsEvent_writeBool(event,
-                    atomValue.get<VendorAtomValue::boolValue>());
+                AStatsEvent_writeBool(event, atomValue.get<VendorAtomValue::boolValue>());
                 break;
             case VendorAtomValue::repeatedIntValue: {
                 const std::optional<std::vector<int>>& repeatedIntValue =
@@ -112,8 +109,8 @@
 
                 for (int i = 0; i < repeatedStringVector.size(); ++i) {
                     cStringArray[i] = repeatedStringVector[i].has_value()
-                            ? repeatedStringVector[i]->c_str()
-                            : "";
+                                              ? repeatedStringVector[i]->c_str()
+                                              : "";
                 }
 
                 AStatsEvent_writeStringArray(event, cStringArray, repeatedStringVector.size());
@@ -152,9 +149,9 @@
     const int ret = AStatsEvent_write(event);
     AStatsEvent_release(event);
 
-    return ret <= 0 ?
-            ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(ret, "report atom failed") :
-            ndk::ScopedAStatus::ok();
+    return ret <= 0 ? ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(ret,
+                                                                              "report atom failed")
+                    : ndk::ScopedAStatus::ok();
 }
 
 }  // namespace stats
diff --git a/services/stats/StatsHal.cpp b/services/stats/StatsHal.cpp
index d27d989..19176d9 100644
--- a/services/stats/StatsHal.cpp
+++ b/services/stats/StatsHal.cpp
@@ -14,42 +14,42 @@
  * limitations under the License.
  */
 
-#define DEBUG false // STOPSHIP if true
+#define DEBUG false  // STOPSHIP if true
 #define LOG_TAG "StatsHal"
 
+#include "StatsHal.h"
+
 #include <log/log.h>
 #include <statslog.h>
 
-#include "StatsHal.h"
-
 namespace android {
 namespace frameworks {
 namespace stats {
 namespace V1_0 {
 namespace implementation {
 
-StatsHal::StatsHal() {}
+StatsHal::StatsHal() {
+}
 
-hardware::Return<void> StatsHal::reportSpeakerImpedance(
-        const SpeakerImpedance& speakerImpedance) {
+hardware::Return<void> StatsHal::reportSpeakerImpedance(const SpeakerImpedance& speakerImpedance) {
     android::util::stats_write(android::util::SPEAKER_IMPEDANCE_REPORTED,
-            speakerImpedance.speakerLocation, speakerImpedance.milliOhms);
+                               speakerImpedance.speakerLocation, speakerImpedance.milliOhms);
 
     return hardware::Void();
 }
 
 hardware::Return<void> StatsHal::reportHardwareFailed(const HardwareFailed& hardwareFailed) {
     android::util::stats_write(android::util::HARDWARE_FAILED, int32_t(hardwareFailed.hardwareType),
-            hardwareFailed.hardwareLocation, int32_t(hardwareFailed.errorCode));
+                               hardwareFailed.hardwareLocation, int32_t(hardwareFailed.errorCode));
 
     return hardware::Void();
 }
 
 hardware::Return<void> StatsHal::reportPhysicalDropDetected(
         const PhysicalDropDetected& physicalDropDetected) {
-    android::util::stats_write(android::util::PHYSICAL_DROP_DETECTED,
-            int32_t(physicalDropDetected.confidencePctg), physicalDropDetected.accelPeak,
-            physicalDropDetected.freefallDuration);
+    android::util::stats_write(
+            android::util::PHYSICAL_DROP_DETECTED, int32_t(physicalDropDetected.confidencePctg),
+            physicalDropDetected.accelPeak, physicalDropDetected.freefallDuration);
 
     return hardware::Void();
 }
@@ -58,20 +58,21 @@
     std::vector<int32_t> buckets = chargeCycles.cycleBucket;
     int initialSize = buckets.size();
     for (int i = 0; i < 10 - initialSize; i++) {
-        buckets.push_back(0); // Push 0 for buckets that do not exist.
+        buckets.push_back(0);  // Push 0 for buckets that do not exist.
     }
     android::util::stats_write(android::util::CHARGE_CYCLES_REPORTED, buckets[0], buckets[1],
-            buckets[2], buckets[3], buckets[4], buckets[5], buckets[6], buckets[7], buckets[8],
-            buckets[9]);
+                               buckets[2], buckets[3], buckets[4], buckets[5], buckets[6],
+                               buckets[7], buckets[8], buckets[9]);
 
     return hardware::Void();
 }
 
 hardware::Return<void> StatsHal::reportBatteryHealthSnapshot(
         const BatteryHealthSnapshotArgs& batteryHealthSnapshotArgs) {
-    android::util::stats_write(android::util::BATTERY_HEALTH_SNAPSHOT,
-            int32_t(batteryHealthSnapshotArgs.type), batteryHealthSnapshotArgs.temperatureDeciC,
-            batteryHealthSnapshotArgs.voltageMicroV, batteryHealthSnapshotArgs.currentMicroA,
+    android::util::stats_write(
+            android::util::BATTERY_HEALTH_SNAPSHOT, int32_t(batteryHealthSnapshotArgs.type),
+            batteryHealthSnapshotArgs.temperatureDeciC, batteryHealthSnapshotArgs.voltageMicroV,
+            batteryHealthSnapshotArgs.currentMicroA,
             batteryHealthSnapshotArgs.openCircuitVoltageMicroV,
             batteryHealthSnapshotArgs.resistanceMicroOhm, batteryHealthSnapshotArgs.levelPercent);
 
@@ -87,14 +88,15 @@
 hardware::Return<void> StatsHal::reportBatteryCausedShutdown(
         const BatteryCausedShutdown& batteryCausedShutdown) {
     android::util::stats_write(android::util::BATTERY_CAUSED_SHUTDOWN,
-            batteryCausedShutdown.voltageMicroV);
+                               batteryCausedShutdown.voltageMicroV);
 
     return hardware::Void();
 }
 
 hardware::Return<void> StatsHal::reportUsbPortOverheatEvent(
         const UsbPortOverheatEvent& usbPortOverheatEvent) {
-    android::util::stats_write(android::util::USB_PORT_OVERHEAT_EVENT_REPORTED,
+    android::util::stats_write(
+            android::util::USB_PORT_OVERHEAT_EVENT_REPORTED,
             usbPortOverheatEvent.plugTemperatureDeciC, usbPortOverheatEvent.maxTemperatureDeciC,
             usbPortOverheatEvent.timeToOverheat, usbPortOverheatEvent.timeToHysteresis,
             usbPortOverheatEvent.timeToInactive);
@@ -102,18 +104,17 @@
     return hardware::Void();
 }
 
-hardware::Return<void> StatsHal::reportSpeechDspStat(
-        const SpeechDspStat& speechDspStat) {
+hardware::Return<void> StatsHal::reportSpeechDspStat(const SpeechDspStat& speechDspStat) {
     android::util::stats_write(android::util::SPEECH_DSP_STAT_REPORTED,
-            speechDspStat.totalUptimeMillis, speechDspStat.totalDowntimeMillis,
-            speechDspStat.totalCrashCount, speechDspStat.totalRecoverCount);
+                               speechDspStat.totalUptimeMillis, speechDspStat.totalDowntimeMillis,
+                               speechDspStat.totalCrashCount, speechDspStat.totalRecoverCount);
 
     return hardware::Void();
 }
 
 hardware::Return<void> StatsHal::reportVendorAtom(const VendorAtom& vendorAtom) {
     if (vendorAtom.atomId < 100000 || vendorAtom.atomId >= 200000) {
-        ALOGE("Atom ID %ld is not a valid vendor atom ID", (long) vendorAtom.atomId);
+        ALOGE("Atom ID %ld is not a valid vendor atom ID", (long)vendorAtom.atomId);
         return hardware::Void();
     }
     if (vendorAtom.reverseDomainName.size() > 50) {
diff --git a/services/stats/include/stats/StatsAidl.h b/services/stats/include/stats/StatsAidl.h
index 219e71e..340b539 100644
--- a/services/stats/include/stats/StatsAidl.h
+++ b/services/stats/include/stats/StatsAidl.h
@@ -28,8 +28,7 @@
     /**
      * Binder call to get vendor atom.
      */
-    virtual ndk::ScopedAStatus reportVendorAtom(
-        const VendorAtom& in_vendorAtom) override;
+    virtual ndk::ScopedAStatus reportVendorAtom(const VendorAtom& in_vendorAtom) override;
 };
 
 }  // namespace stats
diff --git a/services/stats/include/stats/StatsHal.h b/services/stats/include/stats/StatsHal.h
index 071e54f..864ad14 100644
--- a/services/stats/include/stats/StatsHal.h
+++ b/services/stats/include/stats/StatsHal.h
@@ -16,7 +16,6 @@
 
 #include <android/frameworks/stats/1.0/IStats.h>
 #include <android/frameworks/stats/1.0/types.h>
-
 #include <stats_event.h>
 
 using namespace android::frameworks::stats::V1_0;
@@ -30,8 +29,8 @@
 using android::hardware::Return;
 
 /**
-* Implements the Stats HAL
-*/
+ * Implements the Stats HAL
+ */
 class StatsHal : public IStats {
 public:
     StatsHal();
@@ -50,12 +49,12 @@
      * Binder call to get PhysicalDropDetected atom.
      */
     virtual Return<void> reportPhysicalDropDetected(
-             const PhysicalDropDetected& physicalDropDetected) override;
+            const PhysicalDropDetected& physicalDropDetected) override;
 
     /**
      * Binder call to get ChargeCyclesReported atom.
      */
-     virtual Return<void> reportChargeCycles(const ChargeCycles& chargeCycles) override;
+    virtual Return<void> reportChargeCycles(const ChargeCycles& chargeCycles) override;
 
     /**
      * Binder call to get BatteryHealthSnapshot atom.
@@ -83,8 +82,7 @@
     /**
      * Binder call to get Speech DSP state atom.
      */
-    virtual Return<void> reportSpeechDspStat(
-            const SpeechDspStat& speechDspStat) override;
+    virtual Return<void> reportSpeechDspStat(const SpeechDspStat& speechDspStat) override;
 
     /**
      * Binder call to get vendor atom.
diff --git a/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h b/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h
index 6199a5a..933f616 100644
--- a/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h
+++ b/services/surfaceflinger/CompositionEngine/tests/MockHWComposer.h
@@ -93,7 +93,7 @@
     MOCK_METHOD2(onHotplug,
                  std::optional<DisplayIdentificationInfo>(hal::HWDisplayId, hal::Connection));
     MOCK_CONST_METHOD0(updatesDeviceProductInfoOnHotplugReconnect, bool());
-    MOCK_METHOD2(onVsync, bool(hal::HWDisplayId, int64_t));
+    MOCK_METHOD(std::optional<PhysicalDisplayId>, onVsync, (hal::HWDisplayId, int64_t));
     MOCK_METHOD2(setVsyncEnabled, void(PhysicalDisplayId, hal::Vsync));
     MOCK_CONST_METHOD1(isConnected, bool(PhysicalDisplayId));
     MOCK_CONST_METHOD1(getModes, std::vector<HWComposer::HWCDisplayMode>(PhysicalDisplayId));
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.cpp b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
index 7dde6b4..8e74716 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
@@ -30,6 +30,7 @@
 #include <compositionengine/Output.h>
 #include <compositionengine/OutputLayer.h>
 #include <compositionengine/impl/OutputLayerCompositionState.h>
+#include <ftl/concat.h>
 #include <log/log.h>
 #include <ui/DebugUtils.h>
 #include <ui/GraphicBuffer.h>
@@ -148,16 +149,17 @@
     return mUpdateDeviceProductInfoOnHotplugReconnect;
 }
 
-bool HWComposer::onVsync(hal::HWDisplayId hwcDisplayId, nsecs_t timestamp) {
-    const auto displayId = toPhysicalDisplayId(hwcDisplayId);
-    if (!displayId) {
+std::optional<PhysicalDisplayId> HWComposer::onVsync(hal::HWDisplayId hwcDisplayId,
+                                                     nsecs_t timestamp) {
+    const auto displayIdOpt = toPhysicalDisplayId(hwcDisplayId);
+    if (!displayIdOpt) {
         LOG_HWC_DISPLAY_ERROR(hwcDisplayId, "Invalid HWC display");
-        return false;
+        return {};
     }
 
-    RETURN_IF_INVALID_DISPLAY(*displayId, false);
+    RETURN_IF_INVALID_DISPLAY(*displayIdOpt, {});
 
-    auto& displayData = mDisplayData[*displayId];
+    auto& displayData = mDisplayData[*displayIdOpt];
 
     {
         // There have been reports of HWCs that signal several vsync events
@@ -166,18 +168,18 @@
         // out here so they don't cause havoc downstream.
         if (timestamp == displayData.lastPresentTimestamp) {
             ALOGW("Ignoring duplicate VSYNC event from HWC for display %s (t=%" PRId64 ")",
-                  to_string(*displayId).c_str(), timestamp);
-            return false;
+                  to_string(*displayIdOpt).c_str(), timestamp);
+            return {};
         }
 
         displayData.lastPresentTimestamp = timestamp;
     }
 
-    const auto tag = "HW_VSYNC_" + to_string(*displayId);
+    const ftl::Concat tag("HW_VSYNC_", displayIdOpt->value);
     ATRACE_INT(tag.c_str(), displayData.vsyncTraceToggle);
     displayData.vsyncTraceToggle = !displayData.vsyncTraceToggle;
 
-    return true;
+    return displayIdOpt;
 }
 
 size_t HWComposer::getMaxVirtualDisplayCount() const {
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.h b/services/surfaceflinger/DisplayHardware/HWComposer.h
index f6155d2..acebfb2 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.h
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.h
@@ -221,7 +221,10 @@
     // TODO(b/157555476): Remove when the framework has proper support for headless mode
     virtual bool updatesDeviceProductInfoOnHotplugReconnect() const = 0;
 
-    virtual bool onVsync(hal::HWDisplayId, nsecs_t timestamp) = 0;
+    // Called when a vsync happens. If the vsync is valid, returns the
+    // corresponding PhysicalDisplayId. Otherwise returns nullopt.
+    virtual std::optional<PhysicalDisplayId> onVsync(hal::HWDisplayId, nsecs_t timestamp) = 0;
+
     virtual void setVsyncEnabled(PhysicalDisplayId, hal::Vsync enabled) = 0;
 
     virtual bool isConnected(PhysicalDisplayId) const = 0;
@@ -402,7 +405,7 @@
 
     bool updatesDeviceProductInfoOnHotplugReconnect() const override;
 
-    bool onVsync(hal::HWDisplayId, nsecs_t timestamp) override;
+    std::optional<PhysicalDisplayId> onVsync(hal::HWDisplayId, nsecs_t timestamp) override;
     void setVsyncEnabled(PhysicalDisplayId, hal::Vsync enabled) override;
 
     bool isConnected(PhysicalDisplayId) const override;
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
index 6490476..3ed24b2 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
@@ -391,7 +391,9 @@
 
 void LayerSnapshotBuilder::updateSnapshots(const Args& args) {
     ATRACE_NAME("UpdateSnapshots");
-    if (args.forceUpdate || args.displayChanges) {
+    if (args.parentCrop) {
+        mRootSnapshot.geomLayerBounds = *args.parentCrop;
+    } else if (args.forceUpdate || args.displayChanges) {
         mRootSnapshot.geomLayerBounds = getMaxDisplayBounds(args.displays);
     }
     if (args.displayChanges) {
@@ -618,7 +620,8 @@
              RequestedLayerState::Changes::AffectsChildren);
     snapshot.changes = parentChanges | requested.changes;
     snapshot.isHiddenByPolicyFromParent = parentSnapshot.isHiddenByPolicyFromParent ||
-            parentSnapshot.invalidTransform || requested.isHiddenByPolicy();
+            parentSnapshot.invalidTransform || requested.isHiddenByPolicy() ||
+            (args.excludeLayerIds.find(path.id) != args.excludeLayerIds.end());
     snapshot.contentDirty = requested.what & layer_state_t::CONTENT_DIRTY;
     // TODO(b/238781169) scope down the changes to only buffer updates.
     snapshot.hasReadyFrame =
@@ -983,6 +986,20 @@
     }
 }
 
+// Visit each visible snapshot in z-order
+void LayerSnapshotBuilder::forEachVisibleSnapshot(const ConstVisitor& visitor,
+                                                  const LayerHierarchy& root) const {
+    root.traverseInZOrder(
+            [this, visitor](const LayerHierarchy&,
+                            const LayerHierarchy::TraversalPath& traversalPath) -> bool {
+                LayerSnapshot* snapshot = getSnapshot(traversalPath);
+                if (snapshot && snapshot->isVisible) {
+                    visitor(*snapshot);
+                }
+                return true;
+            });
+}
+
 void LayerSnapshotBuilder::forEachVisibleSnapshot(const Visitor& visitor) {
     for (int i = 0; i < mNumInterestingSnapshots; i++) {
         std::unique_ptr<LayerSnapshot>& snapshot = mSnapshots.at((size_t)i);
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
index abb7e66..f4544fd 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
@@ -36,7 +36,7 @@
 class LayerSnapshotBuilder {
 public:
     struct Args {
-        const LayerHierarchy& root;
+        LayerHierarchy root;
         const LayerLifecycleManager& layerLifecycleManager;
         bool forceUpdate = false;
         bool includeMetadata = false;
@@ -46,6 +46,8 @@
         const renderengine::ShadowSettings& globalShadowSettings;
         bool supportsBlur = true;
         bool forceFullDamage = false;
+        std::optional<FloatRect> parentCrop = std::nullopt;
+        std::unordered_set<uint32_t> excludeLayerIds;
     };
     LayerSnapshotBuilder();
 
@@ -65,6 +67,9 @@
     // Visit each visible snapshot in z-order
     void forEachVisibleSnapshot(const ConstVisitor& visitor) const;
 
+    // Visit each visible snapshot in z-order
+    void forEachVisibleSnapshot(const ConstVisitor& visitor, const LayerHierarchy& root) const;
+
     typedef std::function<void(std::unique_ptr<LayerSnapshot>& snapshot)> Visitor;
     // Visit each visible snapshot in z-order and move the snapshot if needed
     void forEachVisibleSnapshot(const Visitor& visitor);
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index b6b9965..31ee91e 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -146,7 +146,7 @@
         mLayerCreationFlags(args.flags),
         mBorderEnabled(false),
         mTextureName(args.textureName),
-        mLayerFE(args.flinger->getFactory().createLayerFE(mName)) {
+        mLegacyLayerFE(args.flinger->getFactory().createLayerFE(mName)) {
     ALOGV("Creating Layer %s", getDebugName());
 
     uint32_t layerFlags = 0;
@@ -3098,15 +3098,14 @@
     return true;
 }
 
-bool Layer::setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& handles) {
+bool Layer::setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& handles,
+                                             bool willPresent) {
     // If there is no handle, we will not send a callback so reset mReleasePreviousBuffer and return
     if (handles.empty()) {
         mReleasePreviousBuffer = false;
         return false;
     }
 
-    const bool willPresent = willPresentCurrentTransaction();
-
     for (const auto& handle : handles) {
         // If this transaction set a buffer on this layer, release its previous buffer
         handle->releasePreviousBuffer = mReleasePreviousBuffer;
@@ -3180,11 +3179,10 @@
     return fenceSignaled;
 }
 
-bool Layer::onPreComposition(nsecs_t refreshStartTime) {
+void Layer::onPreComposition(nsecs_t refreshStartTime) {
     for (const auto& handle : mDrawingState.callbackHandles) {
         handle->refreshStartTime = refreshStartTime;
     }
-    return hasReadyFrame();
 }
 
 void Layer::setAutoRefresh(bool autoRefresh) {
@@ -3570,7 +3568,7 @@
 
 sp<LayerFE> Layer::getCompositionEngineLayerFE() const {
     // There's no need to get a CE Layer if the layer isn't going to draw anything.
-    return hasSomethingToDraw() ? mLayerFE : nullptr;
+    return hasSomethingToDraw() ? mLegacyLayerFE : nullptr;
 }
 
 const LayerSnapshot* Layer::getLayerSnapshot() const {
@@ -3581,16 +3579,36 @@
     return mSnapshot.get();
 }
 
+std::unique_ptr<frontend::LayerSnapshot> Layer::stealLayerSnapshot() {
+    return std::move(mSnapshot);
+}
+
+void Layer::updateLayerSnapshot(std::unique_ptr<frontend::LayerSnapshot> snapshot) {
+    mSnapshot = std::move(snapshot);
+}
+
 const compositionengine::LayerFECompositionState* Layer::getCompositionState() const {
     return mSnapshot.get();
 }
 
 sp<LayerFE> Layer::copyCompositionEngineLayerFE() const {
-    auto result = mFlinger->getFactory().createLayerFE(mLayerFE->getDebugName());
+    auto result = mFlinger->getFactory().createLayerFE(mName);
     result->mSnapshot = std::make_unique<LayerSnapshot>(*mSnapshot);
     return result;
 }
 
+sp<LayerFE> Layer::getCompositionEngineLayerFE(
+        const frontend::LayerHierarchy::TraversalPath& path) {
+    for (auto& [p, layerFE] : mLayerFEs) {
+        if (p == path) {
+            return layerFE;
+        }
+    }
+    auto layerFE = mFlinger->getFactory().createLayerFE(mName);
+    mLayerFEs.emplace_back(path, layerFE);
+    return layerFE;
+}
+
 void Layer::useSurfaceDamage() {
     if (mFlinger->mForceFullDamage) {
         surfaceDamageRegion = Region::INVALID_REGION;
@@ -3986,28 +4004,6 @@
     }
 }
 
-LayerSnapshotGuard::LayerSnapshotGuard(Layer* layer) : mLayer(layer) {
-    if (mLayer) {
-        mLayer->mLayerFE->mSnapshot = std::move(mLayer->mSnapshot);
-    }
-}
-
-LayerSnapshotGuard::~LayerSnapshotGuard() {
-    if (mLayer) {
-        mLayer->mSnapshot = std::move(mLayer->mLayerFE->mSnapshot);
-    }
-}
-
-LayerSnapshotGuard::LayerSnapshotGuard(LayerSnapshotGuard&& other) : mLayer(other.mLayer) {
-    other.mLayer = nullptr;
-}
-
-LayerSnapshotGuard& LayerSnapshotGuard::operator=(LayerSnapshotGuard&& other) {
-    mLayer = other.mLayer;
-    other.mLayer = nullptr;
-    return *this;
-}
-
 void Layer::setTrustedPresentationInfo(TrustedPresentationThresholds const& thresholds,
                                        TrustedPresentationListener const& listener) {
     bool hadTrustedPresentationListener = hasTrustedPresentationListener();
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index f858224..3d4f03f 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -307,7 +307,8 @@
     bool setSurfaceDamageRegion(const Region& /*surfaceDamage*/);
     bool setApi(int32_t /*api*/);
     bool setSidebandStream(const sp<NativeHandle>& /*sidebandStream*/);
-    bool setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& /*handles*/);
+    bool setTransactionCompletedListeners(const std::vector<sp<CallbackHandle>>& /*handles*/,
+                                          bool willPresent);
     virtual bool setBackgroundColor(const half3& color, float alpha, ui::Dataspace dataspace);
     virtual bool setColorSpaceAgnostic(const bool agnostic);
     virtual bool setDimmingEnabled(const bool dimmingEnabled);
@@ -328,9 +329,12 @@
 
     virtual sp<LayerFE> getCompositionEngineLayerFE() const;
     virtual sp<LayerFE> copyCompositionEngineLayerFE() const;
+    sp<LayerFE> getCompositionEngineLayerFE(const frontend::LayerHierarchy::TraversalPath&);
 
     const frontend::LayerSnapshot* getLayerSnapshot() const;
     frontend::LayerSnapshot* editLayerSnapshot();
+    std::unique_ptr<frontend::LayerSnapshot> stealLayerSnapshot();
+    void updateLayerSnapshot(std::unique_ptr<frontend::LayerSnapshot> snapshot);
 
     // If we have received a new buffer this frame, we will pass its surface
     // damage down to hardware composer. Otherwise, we must send a region with
@@ -512,7 +516,7 @@
     // implements compositionengine::LayerFE
     const compositionengine::LayerFECompositionState* getCompositionState() const;
     bool fenceHasSignaled() const;
-    bool onPreComposition(nsecs_t refreshStartTime);
+    void onPreComposition(nsecs_t refreshStartTime);
     void onLayerDisplayed(ftl::SharedFuture<FenceResult>);
 
     void setWasClientComposed(const sp<Fence>& fence) {
@@ -832,6 +836,7 @@
     void updateMetadataSnapshot(const LayerMetadata& parentMetadata);
     void updateRelativeMetadataSnapshot(const LayerMetadata& relativeLayerMetadata,
                                         std::unordered_set<Layer*>& visited);
+    bool willPresentCurrentTransaction() const;
 
 protected:
     // For unit tests
@@ -1037,8 +1042,6 @@
     // Crop that applies to the buffer
     Rect computeBufferCrop(const State& s);
 
-    bool willPresentCurrentTransaction() const;
-
     void callReleaseBufferCallback(const sp<ITransactionCompletedListener>& listener,
                                    const sp<GraphicBuffer>& buffer, uint64_t framenumber,
                                    const sp<Fence>& releaseFence,
@@ -1146,34 +1149,10 @@
     // not specify a destination frame.
     ui::Transform mRequestedTransform;
 
-    sp<LayerFE> mLayerFE;
+    sp<LayerFE> mLegacyLayerFE;
+    std::vector<std::pair<frontend::LayerHierarchy::TraversalPath, sp<LayerFE>>> mLayerFEs;
     std::unique_ptr<frontend::LayerSnapshot> mSnapshot =
             std::make_unique<frontend::LayerSnapshot>();
-
-    friend class LayerSnapshotGuard;
-};
-
-// LayerSnapshotGuard manages the movement of LayerSnapshot between a Layer and its corresponding
-// LayerFE. This class must be used whenever LayerFEs are passed to CompositionEngine. Instances of
-// LayerSnapshotGuard should only be constructed on the main thread and should not be moved outside
-// the main thread.
-//
-// Moving the snapshot instead of sharing common state prevents use of LayerFE outside the main
-// thread by making errors obvious (i.e. use outside the main thread results in SEGFAULTs due to
-// nullptr dereference).
-class LayerSnapshotGuard {
-public:
-    LayerSnapshotGuard(Layer* layer) REQUIRES(kMainThreadContext);
-    ~LayerSnapshotGuard() REQUIRES(kMainThreadContext);
-
-    LayerSnapshotGuard(const LayerSnapshotGuard&) = delete;
-    LayerSnapshotGuard& operator=(const LayerSnapshotGuard&) = delete;
-
-    LayerSnapshotGuard(LayerSnapshotGuard&& other) REQUIRES(kMainThreadContext);
-    LayerSnapshotGuard& operator=(LayerSnapshotGuard&& other) REQUIRES(kMainThreadContext);
-
-private:
-    Layer* mLayer;
 };
 
 std::ostream& operator<<(std::ostream& stream, const Layer::FrameRate& rate);
diff --git a/services/surfaceflinger/LayerRenderArea.cpp b/services/surfaceflinger/LayerRenderArea.cpp
index 2b4375b..03a7f22 100644
--- a/services/surfaceflinger/LayerRenderArea.cpp
+++ b/services/surfaceflinger/LayerRenderArea.cpp
@@ -69,6 +69,14 @@
 void LayerRenderArea::render(std::function<void()> drawLayers) {
     using namespace std::string_literals;
 
+    if (!mChildrenOnly) {
+        mTransform = mLayer->getTransform().inverse();
+    }
+
+    if (mFlinger.mLayerLifecycleManagerEnabled) {
+        drawLayers();
+        return;
+    }
     // If layer is offscreen, update mirroring info if it exists
     if (mLayer->isRemovedFromCurrentState()) {
         mLayer->traverse(LayerVector::StateSet::Drawing,
@@ -78,7 +86,6 @@
     }
 
     if (!mChildrenOnly) {
-        mTransform = mLayer->getTransform().inverse();
         // If the layer is offscreen, compute bounds since we don't compute bounds for offscreen
         // layers in a regular cycles.
         if (mLayer->isRemovedFromCurrentState()) {
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp
index a902a8e..5e79a5c 100644
--- a/services/surfaceflinger/Scheduler/EventThread.cpp
+++ b/services/surfaceflinger/Scheduler/EventThread.cpp
@@ -238,29 +238,19 @@
 
 namespace impl {
 
-EventThread::EventThread(const char* name, scheduler::VsyncSchedule& vsyncSchedule,
+EventThread::EventThread(const char* name, std::shared_ptr<scheduler::VsyncSchedule> vsyncSchedule,
+                         IEventThreadCallback& eventThreadCallback,
                          android::frametimeline::TokenManager* tokenManager,
-                         ThrottleVsyncCallback throttleVsyncCallback,
-                         GetVsyncPeriodFunction getVsyncPeriodFunction,
                          std::chrono::nanoseconds workDuration,
                          std::chrono::nanoseconds readyDuration)
       : mThreadName(name),
         mVsyncTracer(base::StringPrintf("VSYNC-%s", name), 0),
         mWorkDuration(base::StringPrintf("VsyncWorkDuration-%s", name), workDuration),
         mReadyDuration(readyDuration),
-        mVsyncSchedule(vsyncSchedule),
-        mVsyncRegistration(
-                vsyncSchedule.getDispatch(),
-                [this](nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
-                    onVsync(vsyncTime, wakeupTime, readyTime);
-                },
-                name),
+        mVsyncSchedule(std::move(vsyncSchedule)),
+        mVsyncRegistration(mVsyncSchedule->getDispatch(), createDispatchCallback(), name),
         mTokenManager(tokenManager),
-        mThrottleVsyncCallback(std::move(throttleVsyncCallback)),
-        mGetVsyncPeriodFunction(std::move(getVsyncPeriodFunction)) {
-    LOG_ALWAYS_FATAL_IF(getVsyncPeriodFunction == nullptr,
-            "getVsyncPeriodFunction must not be null");
-
+        mEventThreadCallback(eventThreadCallback) {
     mThread = std::thread([this]() NO_THREAD_SAFETY_ANALYSIS {
         std::unique_lock<std::mutex> lock(mMutex);
         threadMain(lock);
@@ -371,16 +361,16 @@
     }
 
     VsyncEventData vsyncEventData;
-    nsecs_t frameInterval = mGetVsyncPeriodFunction(connection->mOwnerUid);
-    vsyncEventData.frameInterval = frameInterval;
+    const Fps frameInterval = mEventThreadCallback.getLeaderRenderFrameRate(connection->mOwnerUid);
+    vsyncEventData.frameInterval = frameInterval.getPeriodNsecs();
     const auto [presentTime, deadline] = [&]() -> std::pair<nsecs_t, nsecs_t> {
         std::lock_guard<std::mutex> lock(mMutex);
-        const auto vsyncTime = mVsyncSchedule.getTracker().nextAnticipatedVSyncTimeFrom(
+        const auto vsyncTime = mVsyncSchedule->getTracker().nextAnticipatedVSyncTimeFrom(
                 systemTime() + mWorkDuration.get().count() + mReadyDuration.count());
         return {vsyncTime, vsyncTime - mReadyDuration.count()};
     }();
-    generateFrameTimeline(vsyncEventData, frameInterval, systemTime(SYSTEM_TIME_MONOTONIC),
-                          presentTime, deadline);
+    generateFrameTimeline(vsyncEventData, frameInterval.getPeriodNsecs(),
+                          systemTime(SYSTEM_TIME_MONOTONIC), presentTime, deadline);
     return vsyncEventData;
 }
 
@@ -541,9 +531,17 @@
 bool EventThread::shouldConsumeEvent(const DisplayEventReceiver::Event& event,
                                      const sp<EventThreadConnection>& connection) const {
     const auto throttleVsync = [&] {
-        return mThrottleVsyncCallback &&
-                mThrottleVsyncCallback(event.vsync.vsyncData.preferredExpectedPresentationTime(),
-                                       connection->mOwnerUid);
+        const auto& vsyncData = event.vsync.vsyncData;
+        if (connection->frameRate.isValid()) {
+            return !mVsyncSchedule->getTracker()
+                            .isVSyncInPhase(vsyncData.preferredExpectedPresentationTime(),
+                                            connection->frameRate);
+        }
+
+        const auto expectedPresentTime =
+                TimePoint::fromNs(vsyncData.preferredExpectedPresentationTime());
+        return !mEventThreadCallback.isVsyncTargetForUid(expectedPresentTime,
+                                                         connection->mOwnerUid);
     };
 
     switch (event.header.type) {
@@ -631,9 +629,11 @@
     for (const auto& consumer : consumers) {
         DisplayEventReceiver::Event copy = event;
         if (event.header.type == DisplayEventReceiver::DISPLAY_EVENT_VSYNC) {
-            const int64_t frameInterval = mGetVsyncPeriodFunction(consumer->mOwnerUid);
-            copy.vsync.vsyncData.frameInterval = frameInterval;
-            generateFrameTimeline(copy.vsync.vsyncData, frameInterval, copy.header.timestamp,
+            const Fps frameInterval =
+                    mEventThreadCallback.getLeaderRenderFrameRate(consumer->mOwnerUid);
+            copy.vsync.vsyncData.frameInterval = frameInterval.getPeriodNsecs();
+            generateFrameTimeline(copy.vsync.vsyncData, frameInterval.getPeriodNsecs(),
+                                  copy.header.timestamp,
                                   event.vsync.vsyncData.preferredExpectedPresentationTime(),
                                   event.vsync.vsyncData.preferredDeadlineTimestamp());
         }
@@ -699,6 +699,26 @@
     }
 }
 
+void EventThread::onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule> schedule) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    const bool reschedule = mVsyncRegistration.cancel() == scheduler::CancelResult::Cancelled;
+    mVsyncSchedule = std::move(schedule);
+    mVsyncRegistration =
+            scheduler::VSyncCallbackRegistration(mVsyncSchedule->getDispatch(),
+                                                 createDispatchCallback(), mThreadName);
+    if (reschedule) {
+        mVsyncRegistration.schedule({.workDuration = mWorkDuration.get().count(),
+                                     .readyDuration = mReadyDuration.count(),
+                                     .earliestVsync = mLastVsyncCallbackTime.ns()});
+    }
+}
+
+scheduler::VSyncDispatch::Callback EventThread::createDispatchCallback() {
+    return [this](nsecs_t vsyncTime, nsecs_t wakeupTime, nsecs_t readyTime) {
+        onVsync(vsyncTime, wakeupTime, readyTime);
+    };
+}
+
 } // namespace impl
 } // namespace android
 
diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h
index ab9085e..aa27091 100644
--- a/services/surfaceflinger/Scheduler/EventThread.h
+++ b/services/surfaceflinger/Scheduler/EventThread.h
@@ -23,6 +23,7 @@
 #include <sys/types.h>
 #include <utils/Errors.h>
 
+#include <scheduler/Fps.h>
 #include <scheduler/FrameRateMode.h>
 #include <condition_variable>
 #include <cstdint>
@@ -67,6 +68,15 @@
     // Subsequent values are periods.
 };
 
+class IEventThreadCallback {
+public:
+    virtual ~IEventThreadCallback() = default;
+
+    virtual bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const = 0;
+
+    virtual Fps getLeaderRenderFrameRate(uid_t uid) const = 0;
+};
+
 class EventThreadConnection : public gui::BnDisplayEventConnection {
 public:
     EventThreadConnection(EventThread*, uid_t callingUid, ResyncCallback,
@@ -87,6 +97,9 @@
     const uid_t mOwnerUid;
     const EventRegistrationFlags mEventRegistration;
 
+    /** The frame rate set to the attached choreographer. */
+    Fps frameRate;
+
 private:
     virtual void onFirstRef();
     EventThread* const mEventThread;
@@ -133,18 +146,17 @@
 
     // Retrieves the number of event connections tracked by this EventThread.
     virtual size_t getEventThreadConnectionCount() = 0;
+
+    virtual void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) = 0;
 };
 
 namespace impl {
 
 class EventThread : public android::EventThread {
 public:
-    using ThrottleVsyncCallback = std::function<bool(nsecs_t, uid_t)>;
-    using GetVsyncPeriodFunction = std::function<nsecs_t(uid_t)>;
-
-    EventThread(const char* name, scheduler::VsyncSchedule&, frametimeline::TokenManager*,
-                ThrottleVsyncCallback, GetVsyncPeriodFunction,
-                std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration);
+    EventThread(const char* name, std::shared_ptr<scheduler::VsyncSchedule>, IEventThreadCallback&,
+                frametimeline::TokenManager*, std::chrono::nanoseconds workDuration,
+                std::chrono::nanoseconds readyDuration);
     ~EventThread();
 
     sp<EventThreadConnection> createEventConnection(
@@ -176,6 +188,8 @@
 
     size_t getEventThreadConnectionCount() override;
 
+    void onNewVsyncSchedule(std::shared_ptr<scheduler::VsyncSchedule>) override;
+
 private:
     friend EventThreadTest;
 
@@ -199,17 +213,19 @@
                                nsecs_t timestamp, nsecs_t preferredExpectedPresentationTime,
                                nsecs_t preferredDeadlineTimestamp) const;
 
+    scheduler::VSyncDispatch::Callback createDispatchCallback();
+
     const char* const mThreadName;
     TracedOrdinal<int> mVsyncTracer;
     TracedOrdinal<std::chrono::nanoseconds> mWorkDuration GUARDED_BY(mMutex);
     std::chrono::nanoseconds mReadyDuration GUARDED_BY(mMutex);
-    scheduler::VsyncSchedule& mVsyncSchedule;
+    std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
     TimePoint mLastVsyncCallbackTime GUARDED_BY(mMutex) = TimePoint::now();
     scheduler::VSyncCallbackRegistration mVsyncRegistration GUARDED_BY(mMutex);
     frametimeline::TokenManager* const mTokenManager;
 
-    const ThrottleVsyncCallback mThrottleVsyncCallback;
-    const GetVsyncPeriodFunction mGetVsyncPeriodFunction;
+    // mEventThreadCallback will outlive the EventThread.
+    IEventThreadCallback& mEventThreadCallback;
 
     std::thread mThread;
     mutable std::mutex mMutex;
diff --git a/services/surfaceflinger/Scheduler/ISchedulerCallback.h b/services/surfaceflinger/Scheduler/ISchedulerCallback.h
new file mode 100644
index 0000000..92c2189
--- /dev/null
+++ b/services/surfaceflinger/Scheduler/ISchedulerCallback.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+
+#include <ui/DisplayId.h>
+
+#include "Display/DisplayModeRequest.h"
+
+namespace android::scheduler {
+
+struct ISchedulerCallback {
+    virtual void setVsyncEnabled(PhysicalDisplayId, bool) = 0;
+    virtual void requestDisplayModes(std::vector<display::DisplayModeRequest>) = 0;
+    virtual void kernelTimerChanged(bool expired) = 0;
+    virtual void triggerOnFrameRateOverridesChanged() = 0;
+
+protected:
+    ~ISchedulerCallback() = default;
+};
+
+} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/LayerHistory.cpp b/services/surfaceflinger/Scheduler/LayerHistory.cpp
index 55fa402..e853833 100644
--- a/services/surfaceflinger/Scheduler/LayerHistory.cpp
+++ b/services/surfaceflinger/Scheduler/LayerHistory.cpp
@@ -32,6 +32,7 @@
 #include <utility>
 
 #include "../Layer.h"
+#include "EventThread.h"
 #include "LayerInfo.h"
 
 namespace android::scheduler {
@@ -140,6 +141,22 @@
 
     info->setLastPresentTime(presentTime, now, updateType, mModeChangePending, layerProps);
 
+    // Set frame rate to attached choreographer.
+    // TODO(b/260898223): Change to use layer hierarchy and handle frame rate vote.
+    if (updateType == LayerUpdateType::SetFrameRate) {
+        auto range = mAttachedChoreographers.equal_range(id);
+        auto it = range.first;
+        while (it != range.second) {
+            sp<EventThreadConnection> choreographerConnection = it->second.promote();
+            if (choreographerConnection) {
+                choreographerConnection->frameRate = layer->getFrameRateForLayerTree().rate;
+                it++;
+            } else {
+                it = mAttachedChoreographers.erase(it);
+            }
+        }
+    }
+
     // Activate layer if inactive.
     if (found == LayerStatus::LayerInInactiveMap) {
         mActiveLayerInfos.insert(
@@ -294,6 +311,12 @@
     return 0.f;
 }
 
+void LayerHistory::attachChoreographer(int32_t layerId,
+                                       const sp<EventThreadConnection>& choreographerConnection) {
+    std::lock_guard lock(mLock);
+    mAttachedChoreographers.insert({layerId, wp<EventThreadConnection>(choreographerConnection)});
+}
+
 auto LayerHistory::findLayer(int32_t id) -> std::pair<LayerStatus, LayerPair*> {
     // the layer could be in either the active or inactive map, try both
     auto it = mActiveLayerInfos.find(id);
diff --git a/services/surfaceflinger/Scheduler/LayerHistory.h b/services/surfaceflinger/Scheduler/LayerHistory.h
index 5022906..68e7030 100644
--- a/services/surfaceflinger/Scheduler/LayerHistory.h
+++ b/services/surfaceflinger/Scheduler/LayerHistory.h
@@ -27,6 +27,8 @@
 #include <utility>
 #include <vector>
 
+#include "EventThread.h"
+
 #include "RefreshRateSelector.h"
 
 namespace android {
@@ -80,6 +82,9 @@
     // return the frames per second of the layer with the given sequence id.
     float getLayerFramerate(nsecs_t now, int32_t id) const;
 
+    void attachChoreographer(int32_t layerId,
+                             const sp<EventThreadConnection>& choreographerConnection);
+
 private:
     friend class LayerHistoryTest;
     friend class TestableScheduler;
@@ -117,6 +122,10 @@
     LayerInfos mActiveLayerInfos GUARDED_BY(mLock);
     LayerInfos mInactiveLayerInfos GUARDED_BY(mLock);
 
+    // Map keyed by layer ID (sequence) to choreographer connections.
+    std::unordered_multimap<int32_t, wp<EventThreadConnection>> mAttachedChoreographers
+            GUARDED_BY(mLock);
+
     uint32_t mDisplayArea = 0;
 
     // Whether to emit systrace output and debug logs.
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.cpp b/services/surfaceflinger/Scheduler/MessageQueue.cpp
index dec8f59..925f739 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.cpp
+++ b/services/surfaceflinger/Scheduler/MessageQueue.cpp
@@ -75,19 +75,37 @@
     mHandler->dispatchFrame(vsyncId, expectedVsyncTime);
 }
 
-void MessageQueue::initVsync(scheduler::VSyncDispatch& dispatch,
+void MessageQueue::initVsync(std::shared_ptr<scheduler::VSyncDispatch> dispatch,
                              frametimeline::TokenManager& tokenManager,
                              std::chrono::nanoseconds workDuration) {
     std::lock_guard lock(mVsync.mutex);
     mVsync.workDuration = workDuration;
     mVsync.tokenManager = &tokenManager;
+    updateVsyncRegistrationLocked(std::move(dispatch));
+}
+
+void MessageQueue::updateVsyncRegistration(std::shared_ptr<scheduler::VSyncDispatch> dispatch) {
+    std::lock_guard lock(mVsync.mutex);
+    updateVsyncRegistrationLocked(std::move(dispatch));
+}
+
+void MessageQueue::updateVsyncRegistrationLocked(
+        std::shared_ptr<scheduler::VSyncDispatch> dispatch) {
+    const bool reschedule = mVsync.registration &&
+            mVsync.registration->cancel() == scheduler::CancelResult::Cancelled;
     mVsync.registration = std::make_unique<
-            scheduler::VSyncCallbackRegistration>(dispatch,
+            scheduler::VSyncCallbackRegistration>(std::move(dispatch),
                                                   std::bind(&MessageQueue::vsyncCallback, this,
                                                             std::placeholders::_1,
                                                             std::placeholders::_2,
                                                             std::placeholders::_3),
                                                   "sf");
+    if (reschedule) {
+        mVsync.scheduledFrameTime =
+                mVsync.registration->schedule({.workDuration = mVsync.workDuration.get().count(),
+                                               .readyDuration = 0,
+                                               .earliestVsync = mVsync.lastCallbackTime.ns()});
+    }
 }
 
 void MessageQueue::destroyVsync() {
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.h b/services/surfaceflinger/Scheduler/MessageQueue.h
index 0d59337..ecb237d 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.h
+++ b/services/surfaceflinger/Scheduler/MessageQueue.h
@@ -65,7 +65,7 @@
 public:
     virtual ~MessageQueue() = default;
 
-    virtual void initVsync(scheduler::VSyncDispatch&, frametimeline::TokenManager&,
+    virtual void initVsync(std::shared_ptr<scheduler::VSyncDispatch>, frametimeline::TokenManager&,
                            std::chrono::nanoseconds workDuration) = 0;
     virtual void destroyVsync() = 0;
     virtual void setDuration(std::chrono::nanoseconds workDuration) = 0;
@@ -106,6 +106,8 @@
 
     void vsyncCallback(nsecs_t vsyncTime, nsecs_t targetWakeupTime, nsecs_t readyTime);
 
+    void updateVsyncRegistration(std::shared_ptr<scheduler::VSyncDispatch>) EXCLUDES(mVsync.mutex);
+
 private:
     virtual void onFrameSignal(ICompositor&, VsyncId, TimePoint expectedVsyncTime) = 0;
 
@@ -127,10 +129,13 @@
 
     Vsync mVsync;
 
+    void updateVsyncRegistrationLocked(std::shared_ptr<scheduler::VSyncDispatch>)
+            REQUIRES(mVsync.mutex);
+
 public:
     explicit MessageQueue(ICompositor&);
 
-    void initVsync(scheduler::VSyncDispatch&, frametimeline::TokenManager&,
+    void initVsync(std::shared_ptr<scheduler::VSyncDispatch>, frametimeline::TokenManager&,
                    std::chrono::nanoseconds workDuration) override;
     void destroyVsync() override;
     void setDuration(std::chrono::nanoseconds workDuration) override;
diff --git a/services/surfaceflinger/Scheduler/OneShotTimer.h b/services/surfaceflinger/Scheduler/OneShotTimer.h
index f95646c..02e8719 100644
--- a/services/surfaceflinger/Scheduler/OneShotTimer.h
+++ b/services/surfaceflinger/Scheduler/OneShotTimer.h
@@ -40,7 +40,7 @@
 
     OneShotTimer(std::string name, const Interval& interval, const ResetCallback& resetCallback,
                  const TimeoutCallback& timeoutCallback,
-                 std::unique_ptr<Clock> clock = std::make_unique<SteadyClock>());
+                 std::unique_ptr<android::Clock> clock = std::make_unique<SteadyClock>());
     ~OneShotTimer();
 
     Duration interval() const { return mInterval; }
@@ -82,7 +82,7 @@
     std::thread mThread;
 
     // Clock object for the timer. Mocked in unit tests.
-    std::unique_ptr<Clock> mClock;
+    std::unique_ptr<android::Clock> mClock;
 
     // Semaphore to keep mThread synchronized.
     sem_t mSemaphore;
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index 33c98ff..eed57ef 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -47,6 +47,7 @@
 #include "Display/DisplayMap.h"
 #include "EventThread.h"
 #include "FrameRateOverrideMappings.h"
+#include "FrontEnd/LayerHandle.h"
 #include "OneShotTimer.h"
 #include "SurfaceFlingerProperties.h"
 #include "VSyncPredictor.h"
@@ -113,10 +114,18 @@
 }
 
 void Scheduler::registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr) {
+    registerDisplayInternal(displayId, std::move(selectorPtr),
+                            std::make_shared<VsyncSchedule>(displayId, mFeatures));
+}
+
+void Scheduler::registerDisplayInternal(PhysicalDisplayId displayId,
+                                        RefreshRateSelectorPtr selectorPtr,
+                                        std::shared_ptr<VsyncSchedule> vsyncSchedule) {
     demoteLeaderDisplay();
 
     std::scoped_lock lock(mDisplayLock);
     mRefreshRateSelectors.emplace_or_replace(displayId, std::move(selectorPtr));
+    mVsyncSchedules.emplace_or_replace(displayId, std::move(vsyncSchedule));
 
     promoteLeaderDisplay();
 }
@@ -126,6 +135,7 @@
 
     std::scoped_lock lock(mDisplayLock);
     mRefreshRateSelectors.erase(displayId);
+    mVsyncSchedules.erase(displayId);
 
     // Do not allow removing the final display. Code in the scheduler expects
     // there to be at least one display. (This may be relaxed in the future with
@@ -153,52 +163,49 @@
     compositor.sample();
 }
 
-void Scheduler::createVsyncSchedule(FeatureFlags features) {
-    mVsyncSchedule.emplace(features);
+std::optional<Fps> Scheduler::getFrameRateOverride(uid_t uid) const {
+    std::scoped_lock lock(mDisplayLock);
+    return getFrameRateOverrideLocked(uid);
 }
 
-std::optional<Fps> Scheduler::getFrameRateOverride(uid_t uid) const {
+std::optional<Fps> Scheduler::getFrameRateOverrideLocked(uid_t uid) const {
     const bool supportsFrameRateOverrideByContent =
-            leaderSelectorPtr()->supportsAppFrameRateOverrideByContent();
+            leaderSelectorPtrLocked()->supportsAppFrameRateOverrideByContent();
     return mFrameRateOverrideMappings
             .getFrameRateOverrideForUid(uid, supportsFrameRateOverrideByContent);
 }
 
-bool Scheduler::isVsyncValid(TimePoint expectedVsyncTimestamp, uid_t uid) const {
+bool Scheduler::isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const {
     const auto frameRate = getFrameRateOverride(uid);
     if (!frameRate.has_value()) {
         return true;
     }
 
-    return mVsyncSchedule->getTracker().isVSyncInPhase(expectedVsyncTimestamp.ns(), *frameRate);
+    return isVsyncInPhase(expectedVsyncTime, *frameRate);
 }
 
-bool Scheduler::isVsyncInPhase(TimePoint timePoint, const Fps frameRate) const {
-    return mVsyncSchedule->getTracker().isVSyncInPhase(timePoint.ns(), frameRate);
+bool Scheduler::isVsyncInPhase(TimePoint expectedVsyncTime, Fps frameRate) const {
+    return getVsyncSchedule()->getTracker().isVSyncInPhase(expectedVsyncTime.ns(), frameRate);
 }
 
-impl::EventThread::ThrottleVsyncCallback Scheduler::makeThrottleVsyncCallback() const {
-    return [this](nsecs_t expectedVsyncTimestamp, uid_t uid) {
-        return !isVsyncValid(TimePoint::fromNs(expectedVsyncTimestamp), uid);
-    };
-}
+Fps Scheduler::getLeaderRenderFrameRate(uid_t uid) const {
+    std::scoped_lock lock(mDisplayLock);
+    ftl::FakeGuard guard(kMainThreadContext);
+    auto vsyncSchedule = getVsyncScheduleLocked();
 
-impl::EventThread::GetVsyncPeriodFunction Scheduler::makeGetVsyncPeriodFunction() const {
-    return [this](uid_t uid) {
-        const Fps refreshRate = leaderSelectorPtr()->getActiveMode().fps;
-        const nsecs_t currentPeriod = mVsyncSchedule->period().ns() ?: refreshRate.getPeriodNsecs();
+    const Fps refreshRate = leaderSelectorPtrLocked()->getActiveMode().fps;
+    const nsecs_t currentPeriod = vsyncSchedule->period().ns() ?: refreshRate.getPeriodNsecs();
 
-        const auto frameRate = getFrameRateOverride(uid);
-        if (!frameRate.has_value()) {
-            return currentPeriod;
-        }
+    const auto frameRate = getFrameRateOverrideLocked(uid);
+    if (!frameRate.has_value()) {
+        return Fps::fromPeriodNsecs(currentPeriod);
+    }
 
-        const auto divisor = RefreshRateSelector::getFrameRateDivisor(refreshRate, *frameRate);
-        if (divisor <= 1) {
-            return currentPeriod;
-        }
-        return currentPeriod * divisor;
-    };
+    const auto divisor = RefreshRateSelector::getFrameRateDivisor(refreshRate, *frameRate);
+    if (divisor <= 1) {
+        return Fps::fromPeriodNsecs(currentPeriod);
+    }
+    return Fps::fromPeriodNsecs(currentPeriod * divisor);
 }
 
 ConnectionHandle Scheduler::createEventThread(Cycle cycle,
@@ -206,9 +213,7 @@
                                               std::chrono::nanoseconds workDuration,
                                               std::chrono::nanoseconds readyDuration) {
     auto eventThread = std::make_unique<impl::EventThread>(cycle == Cycle::Render ? "app" : "appSf",
-                                                           *mVsyncSchedule, tokenManager,
-                                                           makeThrottleVsyncCallback(),
-                                                           makeGetVsyncPeriodFunction(),
+                                                           getVsyncSchedule(), *this, tokenManager,
                                                            workDuration, readyDuration);
 
     auto& handle = cycle == Cycle::Render ? mAppConnectionHandle : mSfConnectionHandle;
@@ -228,15 +233,21 @@
 }
 
 sp<EventThreadConnection> Scheduler::createConnectionInternal(
-        EventThread* eventThread, EventRegistrationFlags eventRegistration) {
-    return eventThread->createEventConnection([&] { resync(); }, eventRegistration);
+        EventThread* eventThread, EventRegistrationFlags eventRegistration,
+        const sp<IBinder>& layerHandle) {
+    int32_t layerId = static_cast<int32_t>(LayerHandle::getLayerId(layerHandle));
+    auto connection = eventThread->createEventConnection([&] { resync(); }, eventRegistration);
+    mLayerHistory.attachChoreographer(layerId, connection);
+    return connection;
 }
 
 sp<IDisplayEventConnection> Scheduler::createDisplayEventConnection(
-        ConnectionHandle handle, EventRegistrationFlags eventRegistration) {
+        ConnectionHandle handle, EventRegistrationFlags eventRegistration,
+        const sp<IBinder>& layerHandle) {
     std::lock_guard<std::mutex> lock(mConnectionsLock);
     RETURN_IF_INVALID_HANDLE(handle, nullptr);
-    return createConnectionInternal(mConnections[handle].thread.get(), eventRegistration);
+    return createConnectionInternal(mConnections[handle].thread.get(), eventRegistration,
+                                    layerHandle);
 }
 
 sp<EventThreadConnection> Scheduler::getEventConnection(ConnectionHandle handle) {
@@ -265,7 +276,6 @@
         thread = mConnections[handle].thread.get();
     }
     thread->onScreenAcquired();
-    mScreenAcquired = true;
 }
 
 void Scheduler::onScreenReleased(ConnectionHandle handle) {
@@ -276,7 +286,6 @@
         thread = mConnections[handle].thread.get();
     }
     thread->onScreenReleased();
-    mScreenAcquired = false;
 }
 
 void Scheduler::onFrameRateOverridesChanged(ConnectionHandle handle, PhysicalDisplayId displayId) {
@@ -387,48 +396,57 @@
     setDuration(config.sfWorkDuration);
 }
 
-void Scheduler::enableHardwareVsync() {
-    std::lock_guard<std::mutex> lock(mHWVsyncLock);
-    if (!mPrimaryHWVsyncEnabled && mHWVsyncAvailable) {
-        mVsyncSchedule->getTracker().resetModel();
-        mSchedulerCallback.setVsyncEnabled(true);
-        mPrimaryHWVsyncEnabled = true;
+void Scheduler::enableHardwareVsync(PhysicalDisplayId id) {
+    auto schedule = getVsyncSchedule(id);
+    schedule->enableHardwareVsync(mSchedulerCallback);
+}
+
+void Scheduler::disableHardwareVsync(PhysicalDisplayId id, bool disallow) {
+    auto schedule = getVsyncSchedule(id);
+    schedule->disableHardwareVsync(mSchedulerCallback, disallow);
+}
+
+void Scheduler::resyncAllToHardwareVsync(bool allowToEnable) {
+    std::scoped_lock lock(mDisplayLock);
+    ftl::FakeGuard guard(kMainThreadContext);
+
+    for (const auto& [id, _] : mRefreshRateSelectors) {
+        resyncToHardwareVsyncLocked(id, allowToEnable);
     }
 }
 
-void Scheduler::disableHardwareVsync(bool makeUnavailable) {
-    std::lock_guard<std::mutex> lock(mHWVsyncLock);
-    if (mPrimaryHWVsyncEnabled) {
-        mSchedulerCallback.setVsyncEnabled(false);
-        mPrimaryHWVsyncEnabled = false;
+void Scheduler::resyncToHardwareVsyncLocked(PhysicalDisplayId id, bool allowToEnable,
+                                            std::optional<Fps> refreshRate) {
+    if (!refreshRate) {
+        auto selectorPtr = mRefreshRateSelectors.get(id);
+        LOG_ALWAYS_FATAL_IF(!selectorPtr);
+        refreshRate = selectorPtr->get()->getActiveMode().modePtr->getFps();
     }
-    if (makeUnavailable) {
-        mHWVsyncAvailable = false;
+    auto schedule = getVsyncScheduleLocked(id);
+    if (allowToEnable) {
+        schedule->allowHardwareVsync();
+    } else if (!schedule->isHardwareVsyncAllowed()) {
+        // Hardware vsync is not currently allowed, so abort the resync
+        // attempt for now.
+        return;
     }
+
+    setVsyncPeriod(schedule, refreshRate->getPeriodNsecs(), false /* force */);
 }
 
-void Scheduler::resyncToHardwareVsync(bool makeAvailable, Fps refreshRate) {
-    {
-        std::lock_guard<std::mutex> lock(mHWVsyncLock);
-        if (makeAvailable) {
-            mHWVsyncAvailable = makeAvailable;
-        } else if (!mHWVsyncAvailable) {
-            // Hardware vsync is not currently available, so abort the resync
-            // attempt for now
-            return;
-        }
-    }
+void Scheduler::setRenderRate(PhysicalDisplayId id, Fps renderFrameRate) {
+    std::scoped_lock lock(mDisplayLock);
+    ftl::FakeGuard guard(kMainThreadContext);
 
-    setVsyncPeriod(refreshRate.getPeriodNsecs());
-}
-
-void Scheduler::setRenderRate(Fps renderFrameRate) {
-    const auto mode = leaderSelectorPtr()->getActiveMode();
+    auto selectorPtr = mRefreshRateSelectors.get(id);
+    LOG_ALWAYS_FATAL_IF(!selectorPtr);
+    const auto mode = selectorPtr->get()->getActiveMode();
 
     using fps_approx_ops::operator!=;
     LOG_ALWAYS_FATAL_IF(renderFrameRate != mode.fps,
-                        "Mismatch in render frame rates. Selector: %s, Scheduler: %s",
-                        to_string(mode.fps).c_str(), to_string(renderFrameRate).c_str());
+                        "Mismatch in render frame rates. Selector: %s, Scheduler: %s, Display: "
+                        "%" PRIu64,
+                        to_string(mode.fps).c_str(), to_string(renderFrameRate).c_str(), id.value);
 
     ALOGV("%s %s (%s)", __func__, to_string(mode.fps).c_str(),
           to_string(mode.modePtr->getFps()).c_str());
@@ -437,7 +455,7 @@
     LOG_ALWAYS_FATAL_IF(divisor == 0, "%s <> %s -- not divisors", to_string(mode.fps).c_str(),
                         to_string(mode.fps).c_str());
 
-    mVsyncSchedule->getTracker().setDivisor(static_cast<unsigned>(divisor));
+    getVsyncScheduleLocked(id)->getTracker().setDivisor(static_cast<unsigned>(divisor));
 }
 
 void Scheduler::resync() {
@@ -447,49 +465,43 @@
     const nsecs_t last = mLastResyncTime.exchange(now);
 
     if (now - last > kIgnoreDelay) {
-        const auto refreshRate = leaderSelectorPtr()->getActiveMode().modePtr->getFps();
-        resyncToHardwareVsync(false, refreshRate);
+        resyncAllToHardwareVsync(false /* allowToEnable */);
     }
 }
 
-void Scheduler::setVsyncPeriod(nsecs_t period) {
+void Scheduler::setVsyncPeriod(const std::shared_ptr<VsyncSchedule>& schedule, nsecs_t period,
+                               bool force) {
+    ALOGD("Scheduler::setVsyncPeriod");
     if (period <= 0) return;
 
-    std::lock_guard<std::mutex> lock(mHWVsyncLock);
-    mVsyncSchedule->getController().startPeriodTransition(period);
-
-    if (!mPrimaryHWVsyncEnabled) {
-        mVsyncSchedule->getTracker().resetModel();
-        mSchedulerCallback.setVsyncEnabled(true);
-        mPrimaryHWVsyncEnabled = true;
-    }
+    // TODO (b/266712910):The old code held mHWVsyncLock before calling
+    // startPeriodTransition. Move these into a new method on VsyncSchedule that
+    // encapsulates this behavior there and allows holding the lock the whole
+    // time.
+    schedule->getController().startPeriodTransition(period, force);
+    schedule->enableHardwareVsync(mSchedulerCallback);
 }
 
-void Scheduler::addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
-                                bool* periodFlushed) {
-    bool needsHwVsync = false;
-    *periodFlushed = false;
-    { // Scope for the lock
-        std::lock_guard<std::mutex> lock(mHWVsyncLock);
-        if (mPrimaryHWVsyncEnabled) {
-            needsHwVsync =
-                    mVsyncSchedule->getController().addHwVsyncTimestamp(timestamp, hwcVsyncPeriod,
-                                                                        periodFlushed);
-        }
+bool Scheduler::addResyncSample(PhysicalDisplayId id, nsecs_t timestamp,
+                                std::optional<nsecs_t> hwcVsyncPeriod) {
+    bool periodFlushed = false;
+    auto schedule = getVsyncSchedule(id);
+    if (schedule->getController().addHwVsyncTimestamp(timestamp, hwcVsyncPeriod, &periodFlushed)) {
+        schedule->enableHardwareVsync(mSchedulerCallback);
+    } else {
+        schedule->disableHardwareVsync(mSchedulerCallback, false /* disallow */);
     }
 
-    if (needsHwVsync) {
-        enableHardwareVsync();
-    } else {
-        disableHardwareVsync(false);
-    }
+    return periodFlushed;
 }
 
-void Scheduler::addPresentFence(std::shared_ptr<FenceTime> fence) {
-    if (mVsyncSchedule->getController().addPresentFence(std::move(fence))) {
-        enableHardwareVsync();
+void Scheduler::addPresentFence(PhysicalDisplayId id, std::shared_ptr<FenceTime> fence) {
+    auto schedule = getVsyncSchedule(id);
+    const bool needMoreSignals = schedule->getController().addPresentFence(std::move(fence));
+    if (needMoreSignals) {
+        schedule->enableHardwareVsync(mSchedulerCallback);
     } else {
-        disableHardwareVsync(false);
+        schedule->disableHardwareVsync(mSchedulerCallback, false /* disallow */);
     }
 }
 
@@ -541,12 +553,22 @@
     }
 }
 
-void Scheduler::setDisplayPowerMode(hal::PowerMode powerMode) {
-    {
+void Scheduler::setDisplayPowerMode(PhysicalDisplayId id, hal::PowerMode powerMode) {
+    const bool isLeader = [this, id]() REQUIRES(kMainThreadContext) {
+        ftl::FakeGuard guard(mDisplayLock);
+        return id == mLeaderDisplayId;
+    }();
+    if (isLeader) {
+        // TODO (b/255657128): This needs to be handled per display.
         std::lock_guard<std::mutex> lock(mPolicyLock);
         mPolicy.displayPowerMode = powerMode;
     }
-    mVsyncSchedule->getController().setDisplayPowerMode(powerMode);
+    {
+        std::scoped_lock lock(mDisplayLock);
+        auto vsyncSchedule = getVsyncScheduleLocked(id);
+        vsyncSchedule->getController().setDisplayPowerMode(powerMode);
+    }
+    if (!isLeader) return;
 
     if (mDisplayPowerTimer) {
         mDisplayPowerTimer->reset();
@@ -557,6 +579,24 @@
     mLayerHistory.clear();
 }
 
+std::shared_ptr<const VsyncSchedule> Scheduler::getVsyncSchedule(
+        std::optional<PhysicalDisplayId> idOpt) const {
+    std::scoped_lock lock(mDisplayLock);
+    return getVsyncScheduleLocked(idOpt);
+}
+
+std::shared_ptr<const VsyncSchedule> Scheduler::getVsyncScheduleLocked(
+        std::optional<PhysicalDisplayId> idOpt) const {
+    ftl::FakeGuard guard(kMainThreadContext);
+    if (!idOpt) {
+        LOG_ALWAYS_FATAL_IF(!mLeaderDisplayId, "Missing a leader!");
+        idOpt = mLeaderDisplayId;
+    }
+    auto scheduleOpt = mVsyncSchedules.get(*idOpt);
+    LOG_ALWAYS_FATAL_IF(!scheduleOpt);
+    return std::const_pointer_cast<const VsyncSchedule>(scheduleOpt->get());
+}
+
 void Scheduler::kernelIdleTimerCallback(TimerState state) {
     ATRACE_INT("ExpiredKernelIdleTimer", static_cast<int>(state));
 
@@ -571,12 +611,17 @@
         // If we're not in performance mode then the kernel timer shouldn't do
         // anything, as the refresh rate during DPU power collapse will be the
         // same.
-        resyncToHardwareVsync(true /* makeAvailable */, refreshRate);
+        resyncAllToHardwareVsync(true /* allowToEnable */);
     } else if (state == TimerState::Expired && refreshRate <= FPS_THRESHOLD_FOR_KERNEL_TIMER) {
         // Disable HW VSYNC if the timer expired, as we don't need it enabled if
         // we're not pushing frames, and if we're in PERFORMANCE mode then we'll
         // need to update the VsyncController model anyway.
-        disableHardwareVsync(false /* makeUnavailable */);
+        std::scoped_lock lock(mDisplayLock);
+        ftl::FakeGuard guard(kMainThreadContext);
+        constexpr bool disallow = false;
+        for (auto& [_, schedule] : mVsyncSchedules) {
+            schedule->disableHardwareVsync(mSchedulerCallback, disallow);
+        }
     }
 
     mSchedulerCallback.kernelTimerChanged(state == TimerState::Expired);
@@ -630,19 +675,23 @@
 
     mFrameRateOverrideMappings.dump(dumper);
     dumper.eol();
-
-    {
-        utils::Dumper::Section section(dumper, "Hardware VSYNC"sv);
-
-        std::lock_guard lock(mHWVsyncLock);
-        dumper.dump("screenAcquired"sv, mScreenAcquired.load());
-        dumper.dump("hwVsyncAvailable"sv, mHWVsyncAvailable);
-        dumper.dump("hwVsyncEnabled"sv, mPrimaryHWVsyncEnabled);
-    }
 }
 
 void Scheduler::dumpVsync(std::string& out) const {
-    mVsyncSchedule->dump(out);
+    std::scoped_lock lock(mDisplayLock);
+    ftl::FakeGuard guard(kMainThreadContext);
+    if (mLeaderDisplayId) {
+        base::StringAppendF(&out, "VsyncSchedule for leader %s:\n",
+                            to_string(*mLeaderDisplayId).c_str());
+        getVsyncScheduleLocked()->dump(out);
+    }
+    for (auto& [id, vsyncSchedule] : mVsyncSchedules) {
+        if (id == mLeaderDisplayId) {
+            continue;
+        }
+        base::StringAppendF(&out, "VsyncSchedule for follower %s:\n", to_string(id).c_str());
+        vsyncSchedule->dump(out);
+    }
 }
 
 bool Scheduler::updateFrameRateOverrides(GlobalSignals consideredSignals, Fps displayRefreshRate) {
@@ -662,6 +711,7 @@
     mLeaderDisplayId = leaderIdOpt.value_or(mRefreshRateSelectors.begin()->first);
     ALOGI("Display %s is the leader", to_string(*mLeaderDisplayId).c_str());
 
+    auto vsyncSchedule = getVsyncScheduleLocked(*mLeaderDisplayId);
     if (const auto leaderPtr = leaderSelectorPtrLocked()) {
         leaderPtr->setIdleTimerCallbacks(
                 {.platform = {.onReset = [this] { idleTimerCallback(TimerState::Reset); },
@@ -671,6 +721,17 @@
                                     [this] { kernelIdleTimerCallback(TimerState::Expired); }}});
 
         leaderPtr->startIdleTimer();
+
+        const Fps refreshRate = leaderPtr->getActiveMode().modePtr->getFps();
+        setVsyncPeriod(vsyncSchedule, refreshRate.getPeriodNsecs(), true /* force */);
+    }
+
+    updateVsyncRegistration(vsyncSchedule->getDispatch());
+    {
+        std::lock_guard<std::mutex> lock(mConnectionsLock);
+        for (auto& [_, connection] : mConnections) {
+            connection.thread->onNewVsyncSchedule(vsyncSchedule);
+        }
     }
 }
 
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index e822448..8c8fc21 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -43,6 +43,7 @@
 #include "Display/DisplayModeRequest.h"
 #include "EventThread.h"
 #include "FrameRateOverrideMappings.h"
+#include "ISchedulerCallback.h"
 #include "LayerHistory.h"
 #include "MessageQueue.h"
 #include "OneShotTimer.h"
@@ -92,17 +93,7 @@
 
 using GlobalSignals = RefreshRateSelector::GlobalSignals;
 
-struct ISchedulerCallback {
-    virtual void setVsyncEnabled(bool) = 0;
-    virtual void requestDisplayModes(std::vector<display::DisplayModeRequest>) = 0;
-    virtual void kernelTimerChanged(bool expired) = 0;
-    virtual void triggerOnFrameRateOverridesChanged() = 0;
-
-protected:
-    ~ISchedulerCallback() = default;
-};
-
-class Scheduler : android::impl::MessageQueue {
+class Scheduler : android::impl::MessageQueue, public IEventThreadCallback {
     using Impl = android::impl::MessageQueue;
 
 public:
@@ -123,8 +114,6 @@
 
     void run();
 
-    void createVsyncSchedule(FeatureFlags);
-
     using Impl::initVsync;
 
     using Impl::getScheduledFrameTime;
@@ -158,7 +147,8 @@
                                        std::chrono::nanoseconds readyDuration);
 
     sp<IDisplayEventConnection> createDisplayEventConnection(
-            ConnectionHandle, EventRegistrationFlags eventRegistration = {});
+            ConnectionHandle, EventRegistrationFlags eventRegistration = {},
+            const sp<IBinder>& layerHandle = nullptr);
 
     sp<EventThreadConnection> getEventConnection(ConnectionHandle);
 
@@ -177,9 +167,21 @@
 
     const VsyncModulator& vsyncModulator() const { return *mVsyncModulator; }
 
+    // In some cases, we should only modulate for the leader display. In those
+    // cases, the caller should pass in the relevant display, and the method
+    // will no-op if it's not the leader. Other cases are not specific to a
+    // display.
     template <typename... Args,
               typename Handler = std::optional<VsyncConfig> (VsyncModulator::*)(Args...)>
-    void modulateVsync(Handler handler, Args... args) {
+    void modulateVsync(std::optional<PhysicalDisplayId> id, Handler handler, Args... args) {
+        if (id) {
+            std::scoped_lock lock(mDisplayLock);
+            ftl::FakeGuard guard(kMainThreadContext);
+            if (id != mLeaderDisplayId) {
+                return;
+            }
+        }
+
         if (const auto config = (*mVsyncModulator.*handler)(args...)) {
             setVsyncConfig(*config, getLeaderVsyncPeriod());
         }
@@ -188,24 +190,32 @@
     void setVsyncConfigSet(const VsyncConfigSet&, Period vsyncPeriod);
 
     // Sets the render rate for the scheduler to run at.
-    void setRenderRate(Fps);
+    void setRenderRate(PhysicalDisplayId, Fps);
 
-    void enableHardwareVsync();
-    void disableHardwareVsync(bool makeUnavailable);
+    void enableHardwareVsync(PhysicalDisplayId);
+    void disableHardwareVsync(PhysicalDisplayId, bool makeUnavailable);
 
     // Resyncs the scheduler to hardware vsync.
-    // If makeAvailable is true, then hardware vsync will be turned on.
+    // If allowToEnable is true, then hardware vsync will be turned on.
     // Otherwise, if hardware vsync is not already enabled then this method will
     // no-op.
-    void resyncToHardwareVsync(bool makeAvailable, Fps refreshRate);
+    // If refreshRate is nullopt, use the existing refresh rate of the display.
+    void resyncToHardwareVsync(PhysicalDisplayId id, bool allowToEnable,
+                               std::optional<Fps> refreshRate = std::nullopt)
+            EXCLUDES(mDisplayLock) {
+        std::scoped_lock lock(mDisplayLock);
+        ftl::FakeGuard guard(kMainThreadContext);
+        resyncToHardwareVsyncLocked(id, allowToEnable, refreshRate);
+    }
     void resync() EXCLUDES(mDisplayLock);
     void forceNextResync() { mLastResyncTime = 0; }
 
-    // Passes a vsync sample to VsyncController. periodFlushed will be true if
-    // VsyncController detected that the vsync period changed, and false otherwise.
-    void addResyncSample(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
-                         bool* periodFlushed);
-    void addPresentFence(std::shared_ptr<FenceTime>);
+    // Passes a vsync sample to VsyncController. Returns true if
+    // VsyncController detected that the vsync period changed and false
+    // otherwise.
+    bool addResyncSample(PhysicalDisplayId, nsecs_t timestamp,
+                         std::optional<nsecs_t> hwcVsyncPeriod);
+    void addPresentFence(PhysicalDisplayId, std::shared_ptr<FenceTime>) EXCLUDES(mDisplayLock);
 
     // Layers are registered on creation, and unregistered when the weak reference expires.
     void registerLayer(Layer*);
@@ -223,20 +233,22 @@
     // Indicates that touch interaction is taking place.
     void onTouchHint();
 
-    void setDisplayPowerMode(hal::PowerMode powerMode);
+    void setDisplayPowerMode(PhysicalDisplayId, hal::PowerMode powerMode)
+            REQUIRES(kMainThreadContext);
 
-    VsyncSchedule& getVsyncSchedule() { return *mVsyncSchedule; }
+    std::shared_ptr<const VsyncSchedule> getVsyncSchedule(
+            std::optional<PhysicalDisplayId> idOpt = std::nullopt) const EXCLUDES(mDisplayLock);
+    std::shared_ptr<VsyncSchedule> getVsyncSchedule(
+            std::optional<PhysicalDisplayId> idOpt = std::nullopt) EXCLUDES(mDisplayLock) {
+        return std::const_pointer_cast<VsyncSchedule>(
+                static_cast<const Scheduler*>(this)->getVsyncSchedule(idOpt));
+    }
 
-    // Returns true if a given vsync timestamp is considered valid vsync
-    // for a given uid
-    bool isVsyncValid(TimePoint expectedVsyncTimestamp, uid_t uid) const;
-
-    // Checks if a vsync timestamp is in phase for a frame rate
-    bool isVsyncInPhase(TimePoint timePoint, const Fps frameRate) const;
+    bool isVsyncInPhase(TimePoint expectedVsyncTime, Fps frameRate) const;
 
     void dump(utils::Dumper&) const;
     void dump(ConnectionHandle, std::string&) const;
-    void dumpVsync(std::string&) const;
+    void dumpVsync(std::string&) const EXCLUDES(mDisplayLock);
 
     // Returns the preferred refresh rate and frame rate for the leader display.
     FrameRateMode getPreferredDisplayMode();
@@ -274,6 +286,10 @@
         return mLayerHistory.getLayerFramerate(now, id);
     }
 
+    // IEventThreadCallback overrides:
+    bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const override;
+    Fps getLeaderRenderFrameRate(uid_t uid) const override;
+
 private:
     friend class TestableScheduler;
 
@@ -287,7 +303,8 @@
     // Create a connection on the given EventThread.
     ConnectionHandle createConnection(std::unique_ptr<EventThread>);
     sp<EventThreadConnection> createConnectionInternal(
-            EventThread*, EventRegistrationFlags eventRegistration = {});
+            EventThread*, EventRegistrationFlags eventRegistration = {},
+            const sp<IBinder>& layerHandle = nullptr);
 
     // Update feature state machine to given state when corresponding timer resets or expires.
     void kernelIdleTimerCallback(TimerState) EXCLUDES(mDisplayLock);
@@ -295,7 +312,12 @@
     void touchTimerCallback(TimerState);
     void displayPowerTimerCallback(TimerState);
 
-    void setVsyncPeriod(nsecs_t period);
+    void resyncToHardwareVsyncLocked(PhysicalDisplayId, bool allowToEnable,
+                                     std::optional<Fps> refreshRate = std::nullopt)
+            REQUIRES(kMainThreadContext, mDisplayLock);
+    void resyncAllToHardwareVsync(bool allowToEnable) EXCLUDES(mDisplayLock);
+    void setVsyncPeriod(const std::shared_ptr<VsyncSchedule>&, nsecs_t period, bool force)
+            REQUIRES(mDisplayLock);
     void setVsyncConfig(const VsyncConfig&, Period vsyncPeriod);
 
     // Chooses a leader among the registered displays, unless `leaderIdOpt` is specified. The new
@@ -307,6 +329,12 @@
     // caller on the main thread to avoid deadlock, since the timer thread locks it before exit.
     void demoteLeaderDisplay() REQUIRES(kMainThreadContext) EXCLUDES(mDisplayLock, mPolicyLock);
 
+    void registerDisplayInternal(PhysicalDisplayId, RefreshRateSelectorPtr,
+                                 std::shared_ptr<VsyncSchedule>) REQUIRES(kMainThreadContext)
+            EXCLUDES(mDisplayLock);
+
+    std::optional<Fps> getFrameRateOverrideLocked(uid_t) const REQUIRES(mDisplayLock);
+
     struct Policy;
 
     // Sets the S state of the policy to the T value under mPolicyLock, and chooses a display mode
@@ -344,9 +372,6 @@
 
     void dispatchCachedReportedMode() REQUIRES(mPolicyLock) EXCLUDES(mDisplayLock);
 
-    android::impl::EventThread::ThrottleVsyncCallback makeThrottleVsyncCallback() const;
-    android::impl::EventThread::GetVsyncPeriodFunction makeGetVsyncPeriodFunction() const;
-
     // Stores EventThread associated with a given VSyncSource, and an initial EventThreadConnection.
     struct Connection {
         sp<EventThreadConnection> connection;
@@ -360,14 +385,9 @@
     ConnectionHandle mAppConnectionHandle;
     ConnectionHandle mSfConnectionHandle;
 
-    mutable std::mutex mHWVsyncLock;
-    bool mPrimaryHWVsyncEnabled GUARDED_BY(mHWVsyncLock) = false;
-    bool mHWVsyncAvailable GUARDED_BY(mHWVsyncLock) = false;
-
     std::atomic<nsecs_t> mLastResyncTime = 0;
 
     const FeatureFlags mFeatures;
-    std::optional<VsyncSchedule> mVsyncSchedule;
 
     // Shifts the VSYNC phase during certain transactions and refresh rate changes.
     const sp<VsyncModulator> mVsyncModulator;
@@ -392,6 +412,10 @@
     display::PhysicalDisplayMap<PhysicalDisplayId, RefreshRateSelectorPtr> mRefreshRateSelectors
             GUARDED_BY(mDisplayLock) GUARDED_BY(kMainThreadContext);
 
+    // TODO (b/266715559): Store in the same map as mRefreshRateSelectors.
+    display::PhysicalDisplayMap<PhysicalDisplayId, std::shared_ptr<VsyncSchedule>> mVsyncSchedules
+            GUARDED_BY(mDisplayLock) GUARDED_BY(kMainThreadContext);
+
     ftl::Optional<PhysicalDisplayId> mLeaderDisplayId GUARDED_BY(mDisplayLock)
             GUARDED_BY(kMainThreadContext);
 
@@ -411,6 +435,14 @@
                 .value_or(std::cref(noLeader));
     }
 
+    std::shared_ptr<const VsyncSchedule> getVsyncScheduleLocked(
+            std::optional<PhysicalDisplayId> idOpt = std::nullopt) const REQUIRES(mDisplayLock);
+    std::shared_ptr<VsyncSchedule> getVsyncScheduleLocked(
+            std::optional<PhysicalDisplayId> idOpt = std::nullopt) REQUIRES(mDisplayLock) {
+        return std::const_pointer_cast<VsyncSchedule>(
+                static_cast<const Scheduler*>(this)->getVsyncScheduleLocked(idOpt));
+    }
+
     struct Policy {
         // Policy for choosing the display mode.
         LayerHistory::Summary contentRequirements;
@@ -437,9 +469,6 @@
     static constexpr std::chrono::nanoseconds MAX_VSYNC_APPLIED_TIME = 200ms;
 
     FrameRateOverrideMappings mFrameRateOverrideMappings;
-
-    // Keeps track of whether the screen is acquired for debug
-    std::atomic<bool> mScreenAcquired = false;
 };
 
 } // namespace scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatch.h b/services/surfaceflinger/Scheduler/VSyncDispatch.h
index 9520131..77875e3 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatch.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatch.h
@@ -161,7 +161,8 @@
  */
 class VSyncCallbackRegistration {
 public:
-    VSyncCallbackRegistration(VSyncDispatch&, VSyncDispatch::Callback, std::string callbackName);
+    VSyncCallbackRegistration(std::shared_ptr<VSyncDispatch>, VSyncDispatch::Callback,
+                              std::string callbackName);
     ~VSyncCallbackRegistration();
 
     VSyncCallbackRegistration(VSyncCallbackRegistration&&);
@@ -177,7 +178,7 @@
     CancelResult cancel();
 
 private:
-    std::reference_wrapper<VSyncDispatch> mDispatch;
+    std::shared_ptr<VSyncDispatch> mDispatch;
     VSyncDispatch::CallbackToken mToken;
     bool mValidToken;
 };
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
index 73d52cf..26389eb 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
@@ -215,10 +215,10 @@
 }
 
 VSyncDispatchTimerQueue::VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper> tk,
-                                                 VSyncTracker& tracker, nsecs_t timerSlack,
-                                                 nsecs_t minVsyncDistance)
+                                                 VsyncSchedule::TrackerPtr tracker,
+                                                 nsecs_t timerSlack, nsecs_t minVsyncDistance)
       : mTimeKeeper(std::move(tk)),
-        mTracker(tracker),
+        mTracker(std::move(tracker)),
         mTimerSlack(timerSlack),
         mMinVsyncDistance(minVsyncDistance) {}
 
@@ -255,7 +255,7 @@
         }
 
         if (it != skipUpdateIt) {
-            callback->update(mTracker, now);
+            callback->update(*mTracker, now);
         }
         auto const wakeupTime = *callback->wakeupTime();
         if (!min || *min > wakeupTime) {
@@ -365,10 +365,10 @@
     auto const rearmImminent = now > mIntendedWakeupTime;
     if (CC_UNLIKELY(rearmImminent)) {
         callback->addPendingWorkloadUpdate(scheduleTiming);
-        return getExpectedCallbackTime(mTracker, now, scheduleTiming);
+        return getExpectedCallbackTime(*mTracker, now, scheduleTiming);
     }
 
-    const ScheduleResult result = callback->schedule(scheduleTiming, mTracker, now);
+    const ScheduleResult result = callback->schedule(scheduleTiming, *mTracker, now);
     if (!result.has_value()) {
         return {};
     }
@@ -434,15 +434,15 @@
     }
 }
 
-VSyncCallbackRegistration::VSyncCallbackRegistration(VSyncDispatch& dispatch,
+VSyncCallbackRegistration::VSyncCallbackRegistration(std::shared_ptr<VSyncDispatch> dispatch,
                                                      VSyncDispatch::Callback callback,
                                                      std::string callbackName)
-      : mDispatch(dispatch),
-        mToken(dispatch.registerCallback(std::move(callback), std::move(callbackName))),
+      : mDispatch(std::move(dispatch)),
+        mToken(mDispatch->registerCallback(std::move(callback), std::move(callbackName))),
         mValidToken(true) {}
 
 VSyncCallbackRegistration::VSyncCallbackRegistration(VSyncCallbackRegistration&& other)
-      : mDispatch(other.mDispatch),
+      : mDispatch(std::move(other.mDispatch)),
         mToken(std::move(other.mToken)),
         mValidToken(std::move(other.mValidToken)) {
     other.mValidToken = false;
@@ -457,28 +457,28 @@
 }
 
 VSyncCallbackRegistration::~VSyncCallbackRegistration() {
-    if (mValidToken) mDispatch.get().unregisterCallback(mToken);
+    if (mValidToken) mDispatch->unregisterCallback(mToken);
 }
 
 ScheduleResult VSyncCallbackRegistration::schedule(VSyncDispatch::ScheduleTiming scheduleTiming) {
     if (!mValidToken) {
         return std::nullopt;
     }
-    return mDispatch.get().schedule(mToken, scheduleTiming);
+    return mDispatch->schedule(mToken, scheduleTiming);
 }
 
 ScheduleResult VSyncCallbackRegistration::update(VSyncDispatch::ScheduleTiming scheduleTiming) {
     if (!mValidToken) {
         return std::nullopt;
     }
-    return mDispatch.get().update(mToken, scheduleTiming);
+    return mDispatch->update(mToken, scheduleTiming);
 }
 
 CancelResult VSyncCallbackRegistration::cancel() {
     if (!mValidToken) {
         return CancelResult::Error;
     }
-    return mDispatch.get().cancel(mToken);
+    return mDispatch->cancel(mToken);
 }
 
 } // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
index c3af136..6499d69 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.h
@@ -26,11 +26,11 @@
 #include <android-base/thread_annotations.h>
 
 #include "VSyncDispatch.h"
+#include "VsyncSchedule.h"
 
 namespace android::scheduler {
 
 class TimeKeeper;
-class VSyncTracker;
 
 // VSyncDispatchTimerQueueEntry is a helper class representing internal state for each entry in
 // VSyncDispatchTimerQueue hoisted to public for unit testing.
@@ -120,8 +120,8 @@
     //                                  should be grouped into one wakeup.
     // \param[in] minVsyncDistance      The minimum distance between two vsync estimates before the
     //                                  vsyncs are considered the same vsync event.
-    VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper>, VSyncTracker&, nsecs_t timerSlack,
-                            nsecs_t minVsyncDistance);
+    VSyncDispatchTimerQueue(std::unique_ptr<TimeKeeper>, VsyncSchedule::TrackerPtr,
+                            nsecs_t timerSlack, nsecs_t minVsyncDistance);
     ~VSyncDispatchTimerQueue();
 
     CallbackToken registerCallback(Callback, std::string callbackName) final;
@@ -148,7 +148,7 @@
 
     static constexpr nsecs_t kInvalidTime = std::numeric_limits<int64_t>::max();
     std::unique_ptr<TimeKeeper> const mTimeKeeper;
-    VSyncTracker& mTracker;
+    VsyncSchedule::TrackerPtr mTracker;
     nsecs_t const mTimerSlack;
     nsecs_t const mMinVsyncDistance;
 
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index 02e12fd..a3b8a56 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -31,6 +31,7 @@
 #include <android-base/stringprintf.h>
 #include <cutils/compiler.h>
 #include <cutils/properties.h>
+#include <gui/TraceUtils.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
@@ -40,14 +41,16 @@
 namespace android::scheduler {
 
 using base::StringAppendF;
+using base::StringPrintf;
 
 static auto constexpr kMaxPercent = 100u;
 
 VSyncPredictor::~VSyncPredictor() = default;
 
-VSyncPredictor::VSyncPredictor(nsecs_t idealPeriod, size_t historySize,
+VSyncPredictor::VSyncPredictor(std::string name, nsecs_t idealPeriod, size_t historySize,
                                size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent)
-      : mTraceOn(property_get_bool("debug.sf.vsp_trace", false)),
+      : mName(name),
+        mTraceOn(property_get_bool("debug.sf.vsp_trace", false)),
         kHistorySize(historySize),
         kMinimumSamplesForPrediction(minimumSamplesForPrediction),
         kOutlierTolerancePercent(std::min(outlierTolerancePercent, kMaxPercent)),
@@ -57,12 +60,14 @@
 
 inline void VSyncPredictor::traceInt64If(const char* name, int64_t value) const {
     if (CC_UNLIKELY(mTraceOn)) {
-        ATRACE_INT64(name, value);
+        traceInt64(name, value);
     }
 }
 
 inline void VSyncPredictor::traceInt64(const char* name, int64_t value) const {
-    ATRACE_INT64(name, value);
+    // TODO (b/266817103): Pass in PhysicalDisplayId and use ftl::Concat to
+    // avoid unnecessary allocations.
+    ATRACE_INT64(StringPrintf("%s %s", name, mName.c_str()).c_str(), value);
 }
 
 inline size_t VSyncPredictor::next(size_t i) const {
@@ -214,8 +219,8 @@
 
     it->second = {anticipatedPeriod, intercept};
 
-    ALOGV("model update ts: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, timestamp,
-          anticipatedPeriod, intercept);
+    ALOGV("model update ts %s: %" PRId64 " slope: %" PRId64 " intercept: %" PRId64, mName.c_str(),
+          timestamp, anticipatedPeriod, intercept);
     return true;
 }
 
@@ -327,7 +332,7 @@
 }
 
 void VSyncPredictor::setDivisor(unsigned divisor) {
-    ALOGV("%s: %d", __func__, divisor);
+    ALOGV("%s %s: %d", __func__, mName.c_str(), divisor);
     std::lock_guard lock(mMutex);
     mDivisor = divisor;
 }
@@ -343,7 +348,7 @@
 }
 
 void VSyncPredictor::setPeriod(nsecs_t period) {
-    ATRACE_CALL();
+    ATRACE_FORMAT("%s %s", __func__, mName.c_str());
     traceInt64("VSP-setPeriod", period);
 
     std::lock_guard lock(mMutex);
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
index 305cdb0..1ded54f 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.h
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -29,14 +29,15 @@
 class VSyncPredictor : public VSyncTracker {
 public:
     /*
+     * \param [in] name The name of the display this corresponds to.
      * \param [in] idealPeriod  The initial ideal period to use.
      * \param [in] historySize  The internal amount of entries to store in the model.
      * \param [in] minimumSamplesForPrediction The minimum number of samples to collect before
      * predicting. \param [in] outlierTolerancePercent a number 0 to 100 that will be used to filter
      * samples that fall outlierTolerancePercent from an anticipated vsync event.
      */
-    VSyncPredictor(nsecs_t idealPeriod, size_t historySize, size_t minimumSamplesForPrediction,
-                   uint32_t outlierTolerancePercent);
+    VSyncPredictor(std::string name, nsecs_t idealPeriod, size_t historySize,
+                   size_t minimumSamplesForPrediction, uint32_t outlierTolerancePercent);
     ~VSyncPredictor();
 
     bool addVsyncTimestamp(nsecs_t timestamp) final EXCLUDES(mMutex);
@@ -76,6 +77,8 @@
     VSyncPredictor& operator=(VSyncPredictor const&) = delete;
     void clearTimestamps() REQUIRES(mMutex);
 
+    const std::string mName;
+
     inline void traceInt64If(const char* name, int64_t value) const;
     inline void traceInt64(const char* name, int64_t value) const;
     bool const mTraceOn;
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.cpp b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
index b5f212e..a831f66 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
@@ -21,6 +21,7 @@
 
 #include <assert.h>
 #include <cutils/properties.h>
+#include <gui/TraceUtils.h>
 #include <log/log.h>
 #include <utils/Trace.h>
 
@@ -32,6 +33,7 @@
 namespace android::scheduler {
 
 using base::StringAppendF;
+using base::StringPrintf;
 
 VsyncController::~VsyncController() = default;
 
@@ -39,12 +41,12 @@
     return systemTime(SYSTEM_TIME_MONOTONIC);
 }
 
-VSyncReactor::VSyncReactor(std::unique_ptr<Clock> clock, VSyncTracker& tracker,
+VSyncReactor::VSyncReactor(std::string name, std::unique_ptr<Clock> clock, VSyncTracker& tracker,
                            size_t pendingFenceLimit, bool supportKernelIdleTimer)
-      : mClock(std::move(clock)),
+      : mName(name),
+        mClock(std::move(clock)),
         mTracker(tracker),
         mPendingLimit(pendingFenceLimit),
-        // TODO(adyabr): change mSupportKernelIdleTimer when the active display changes
         mSupportKernelIdleTimer(supportKernelIdleTimer) {}
 
 VSyncReactor::~VSyncReactor() = default;
@@ -114,7 +116,7 @@
 }
 
 void VSyncReactor::startPeriodTransitionInternal(nsecs_t newPeriod) {
-    ATRACE_CALL();
+    ATRACE_FORMAT("%s %s", __func__, mName.c_str());
     mPeriodConfirmationInProgress = true;
     mPeriodTransitioningTo = newPeriod;
     mMoreSamplesNeeded = true;
@@ -122,18 +124,20 @@
 }
 
 void VSyncReactor::endPeriodTransition() {
-    ATRACE_CALL();
+    ATRACE_FORMAT("%s %s", __func__, mName.c_str());
     mPeriodTransitioningTo.reset();
     mPeriodConfirmationInProgress = false;
     mLastHwVsync.reset();
 }
 
-void VSyncReactor::startPeriodTransition(nsecs_t period) {
-    ATRACE_INT64("VSR-startPeriodTransition", period);
+void VSyncReactor::startPeriodTransition(nsecs_t period, bool force) {
+    // TODO (b/266817103): Pass in PhysicalDisplayId and use ftl::Concat to
+    // avoid unnecessary allocations.
+    ATRACE_INT64(StringPrintf("VSR-startPeriodTransition %s", mName.c_str()).c_str(), period);
     std::lock_guard lock(mMutex);
     mLastHwVsync.reset();
 
-    if (!mSupportKernelIdleTimer && period == mTracker.currentPeriod()) {
+    if (!mSupportKernelIdleTimer && period == mTracker.currentPeriod() && !force) {
         endPeriodTransition();
         setIgnorePresentFencesInternal(false);
         mMoreSamplesNeeded = false;
@@ -181,7 +185,7 @@
 
     std::lock_guard lock(mMutex);
     if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
-        ATRACE_NAME("VSR: period confirmed");
+        ATRACE_FORMAT("VSR %s: period confirmed", mName.c_str());
         if (mPeriodTransitioningTo) {
             mTracker.setPeriod(*mPeriodTransitioningTo);
             *periodFlushed = true;
@@ -195,12 +199,12 @@
         endPeriodTransition();
         mMoreSamplesNeeded = mTracker.needsMoreSamples();
     } else if (mPeriodConfirmationInProgress) {
-        ATRACE_NAME("VSR: still confirming period");
+        ATRACE_FORMAT("VSR %s: still confirming period", mName.c_str());
         mLastHwVsync = timestamp;
         mMoreSamplesNeeded = true;
         *periodFlushed = false;
     } else {
-        ATRACE_NAME("VSR: adding sample");
+        ATRACE_FORMAT("VSR %s: adding sample", mName.c_str());
         *periodFlushed = false;
         mTracker.addVsyncTimestamp(timestamp);
         mMoreSamplesNeeded = mTracker.needsMoreSamples();
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.h b/services/surfaceflinger/Scheduler/VSyncReactor.h
index 4501487..fd9ca42 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.h
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.h
@@ -22,6 +22,7 @@
 #include <vector>
 
 #include <android-base/thread_annotations.h>
+#include <ui/DisplayId.h>
 #include <ui/FenceTime.h>
 
 #include <scheduler/TimeKeeper.h>
@@ -37,14 +38,14 @@
 // TODO (b/145217110): consider renaming.
 class VSyncReactor : public VsyncController {
 public:
-    VSyncReactor(std::unique_ptr<Clock> clock, VSyncTracker& tracker, size_t pendingFenceLimit,
-                 bool supportKernelIdleTimer);
+    VSyncReactor(std::string name, std::unique_ptr<Clock> clock, VSyncTracker& tracker,
+                 size_t pendingFenceLimit, bool supportKernelIdleTimer);
     ~VSyncReactor();
 
     bool addPresentFence(std::shared_ptr<FenceTime>) final;
     void setIgnorePresentFences(bool ignore) final;
 
-    void startPeriodTransition(nsecs_t period) final;
+    void startPeriodTransition(nsecs_t period, bool force) final;
 
     bool addHwVsyncTimestamp(nsecs_t timestamp, std::optional<nsecs_t> hwcVsyncPeriod,
                              bool* periodFlushed) final;
@@ -61,6 +62,7 @@
     bool periodConfirmed(nsecs_t vsync_timestamp, std::optional<nsecs_t> hwcVsyncPeriod)
             REQUIRES(mMutex);
 
+    const std::string mName;
     std::unique_ptr<Clock> const mClock;
     VSyncTracker& mTracker;
     size_t const mPendingLimit;
diff --git a/services/surfaceflinger/Scheduler/VsyncController.h b/services/surfaceflinger/Scheduler/VsyncController.h
index 726a420..9177899 100644
--- a/services/surfaceflinger/Scheduler/VsyncController.h
+++ b/services/surfaceflinger/Scheduler/VsyncController.h
@@ -63,8 +63,9 @@
      * itself. The controller will end the period transition internally.
      *
      * \param [in] period   The period that the system is changing into.
+     * \param [in] force    True to recalibrate even if period matches the existing period.
      */
-    virtual void startPeriodTransition(nsecs_t period) = 0;
+    virtual void startPeriodTransition(nsecs_t period, bool force) = 0;
 
     /*
      * Tells the tracker to stop using present fences to get a vsync signal.
diff --git a/services/surfaceflinger/Scheduler/VsyncSchedule.cpp b/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
index 95bc31f..951c1ec 100644
--- a/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
+++ b/services/surfaceflinger/Scheduler/VsyncSchedule.cpp
@@ -21,6 +21,9 @@
 
 #include "VsyncSchedule.h"
 
+#include "ISchedulerCallback.h"
+#include "Scheduler.h"
+#include "Utils/Dumper.h"
 #include "VSyncDispatchTimerQueue.h"
 #include "VSyncPredictor.h"
 #include "VSyncReactor.h"
@@ -39,8 +42,8 @@
     }
 
 public:
-    explicit PredictedVsyncTracer(VsyncDispatch& dispatch)
-          : mRegistration(dispatch, makeVsyncCallback(), __func__) {
+    explicit PredictedVsyncTracer(std::shared_ptr<VsyncDispatch> dispatch)
+          : mRegistration(std::move(dispatch), makeVsyncCallback(), __func__) {
         schedule();
     }
 
@@ -51,21 +54,23 @@
     VSyncCallbackRegistration mRegistration;
 };
 
-VsyncSchedule::VsyncSchedule(FeatureFlags features)
-      : mTracker(createTracker()),
-        mDispatch(createDispatch(*mTracker)),
-        mController(createController(*mTracker, features)) {
+VsyncSchedule::VsyncSchedule(PhysicalDisplayId id, FeatureFlags features)
+      : mId(id),
+        mTracker(createTracker(id)),
+        mDispatch(createDispatch(mTracker)),
+        mController(createController(id, *mTracker, features)) {
     if (features.test(Feature::kTracePredictedVsync)) {
-        mTracer = std::make_unique<PredictedVsyncTracer>(*mDispatch);
+        mTracer = std::make_unique<PredictedVsyncTracer>(mDispatch);
     }
 }
 
-VsyncSchedule::VsyncSchedule(TrackerPtr tracker, DispatchPtr dispatch, ControllerPtr controller)
-      : mTracker(std::move(tracker)),
+VsyncSchedule::VsyncSchedule(PhysicalDisplayId id, TrackerPtr tracker, DispatchPtr dispatch,
+                             ControllerPtr controller)
+      : mId(id),
+        mTracker(std::move(tracker)),
         mDispatch(std::move(dispatch)),
         mController(std::move(controller)) {}
 
-VsyncSchedule::VsyncSchedule(VsyncSchedule&&) = default;
 VsyncSchedule::~VsyncSchedule() = default;
 
 Period VsyncSchedule::period() const {
@@ -77,6 +82,13 @@
 }
 
 void VsyncSchedule::dump(std::string& out) const {
+    utils::Dumper dumper(out);
+    {
+        std::lock_guard<std::mutex> lock(mHwVsyncLock);
+        dumper.dump("hwVsyncState", ftl::enum_string(mHwVsyncState));
+        dumper.dump("lastHwVsyncState", ftl::enum_string(mLastHwVsyncState));
+    }
+
     out.append("VsyncController:\n");
     mController->dump(out);
 
@@ -84,40 +96,72 @@
     mDispatch->dump(out);
 }
 
-VsyncSchedule::TrackerPtr VsyncSchedule::createTracker() {
+VsyncSchedule::TrackerPtr VsyncSchedule::createTracker(PhysicalDisplayId id) {
     // TODO(b/144707443): Tune constants.
     constexpr nsecs_t kInitialPeriod = (60_Hz).getPeriodNsecs();
     constexpr size_t kHistorySize = 20;
     constexpr size_t kMinSamplesForPrediction = 6;
     constexpr uint32_t kDiscardOutlierPercent = 20;
 
-    return std::make_unique<VSyncPredictor>(kInitialPeriod, kHistorySize, kMinSamplesForPrediction,
-                                            kDiscardOutlierPercent);
+    return std::make_unique<VSyncPredictor>(to_string(id), kInitialPeriod, kHistorySize,
+                                            kMinSamplesForPrediction, kDiscardOutlierPercent);
 }
 
-VsyncSchedule::DispatchPtr VsyncSchedule::createDispatch(VsyncTracker& tracker) {
+VsyncSchedule::DispatchPtr VsyncSchedule::createDispatch(TrackerPtr tracker) {
     using namespace std::chrono_literals;
 
     // TODO(b/144707443): Tune constants.
     constexpr std::chrono::nanoseconds kGroupDispatchWithin = 500us;
     constexpr std::chrono::nanoseconds kSnapToSameVsyncWithin = 3ms;
 
-    return std::make_unique<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
+    return std::make_unique<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), std::move(tracker),
                                                      kGroupDispatchWithin.count(),
                                                      kSnapToSameVsyncWithin.count());
 }
 
-VsyncSchedule::ControllerPtr VsyncSchedule::createController(VsyncTracker& tracker,
+VsyncSchedule::ControllerPtr VsyncSchedule::createController(PhysicalDisplayId id,
+                                                             VsyncTracker& tracker,
                                                              FeatureFlags features) {
     // TODO(b/144707443): Tune constants.
     constexpr size_t kMaxPendingFences = 20;
     const bool hasKernelIdleTimer = features.test(Feature::kKernelIdleTimer);
 
-    auto reactor = std::make_unique<VSyncReactor>(std::make_unique<SystemClock>(), tracker,
-                                                  kMaxPendingFences, hasKernelIdleTimer);
+    auto reactor = std::make_unique<VSyncReactor>(to_string(id), std::make_unique<SystemClock>(),
+                                                  tracker, kMaxPendingFences, hasKernelIdleTimer);
 
     reactor->setIgnorePresentFences(!features.test(Feature::kPresentFences));
     return reactor;
 }
 
+void VsyncSchedule::enableHardwareVsync(ISchedulerCallback& callback) {
+    std::lock_guard<std::mutex> lock(mHwVsyncLock);
+    if (mHwVsyncState == HwVsyncState::Disabled) {
+        getTracker().resetModel();
+        callback.setVsyncEnabled(mId, true);
+        mHwVsyncState = HwVsyncState::Enabled;
+        mLastHwVsyncState = HwVsyncState::Enabled;
+    }
+}
+
+void VsyncSchedule::disableHardwareVsync(ISchedulerCallback& callback, bool disallow) {
+    std::lock_guard<std::mutex> lock(mHwVsyncLock);
+    if (mHwVsyncState == HwVsyncState::Enabled) {
+        callback.setVsyncEnabled(mId, false);
+        mLastHwVsyncState = HwVsyncState::Disabled;
+    }
+    mHwVsyncState = disallow ? HwVsyncState::Disallowed : HwVsyncState::Disabled;
+}
+
+bool VsyncSchedule::isHardwareVsyncAllowed() const {
+    std::lock_guard<std::mutex> lock(mHwVsyncLock);
+    return mHwVsyncState != HwVsyncState::Disallowed;
+}
+
+void VsyncSchedule::allowHardwareVsync() {
+    std::lock_guard<std::mutex> lock(mHwVsyncLock);
+    if (mHwVsyncState == HwVsyncState::Disallowed) {
+        mHwVsyncState = HwVsyncState::Disabled;
+    }
+}
+
 } // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/VsyncSchedule.h b/services/surfaceflinger/Scheduler/VsyncSchedule.h
index 173b1d0..ffb7ad5 100644
--- a/services/surfaceflinger/Scheduler/VsyncSchedule.h
+++ b/services/surfaceflinger/Scheduler/VsyncSchedule.h
@@ -19,8 +19,10 @@
 #include <memory>
 #include <string>
 
+#include <ftl/enum.h>
 #include <scheduler/Features.h>
 #include <scheduler/Time.h>
+#include <ui/DisplayId.h>
 
 namespace android {
 class EventThreadTest;
@@ -32,6 +34,8 @@
 
 namespace android::scheduler {
 
+struct ISchedulerCallback;
+
 // TODO(b/185535769): Rename classes, and remove aliases.
 class VSyncDispatch;
 class VSyncTracker;
@@ -43,8 +47,7 @@
 // Schedule that synchronizes to hardware VSYNC of a physical display.
 class VsyncSchedule {
 public:
-    explicit VsyncSchedule(FeatureFlags);
-    VsyncSchedule(VsyncSchedule&&);
+    VsyncSchedule(PhysicalDisplayId, FeatureFlags);
     ~VsyncSchedule();
 
     Period period() const;
@@ -55,30 +58,71 @@
     VsyncTracker& getTracker() { return *mTracker; }
     VsyncController& getController() { return *mController; }
 
+    // TODO(b/185535769): Once these are hidden behind the API, they may no
+    // longer need to be shared_ptrs.
+    using DispatchPtr = std::shared_ptr<VsyncDispatch>;
+    using TrackerPtr = std::shared_ptr<VsyncTracker>;
+
     // TODO(b/185535769): Remove once VsyncSchedule owns all registrations.
-    VsyncDispatch& getDispatch() { return *mDispatch; }
+    DispatchPtr getDispatch() { return mDispatch; }
 
     void dump(std::string&) const;
 
+    // Turn on hardware vsyncs, unless mHwVsyncState is Disallowed, in which
+    // case this call is ignored.
+    void enableHardwareVsync(ISchedulerCallback&) EXCLUDES(mHwVsyncLock);
+
+    // Disable hardware vsyncs. If `disallow` is true, future calls to
+    // enableHardwareVsync are ineffective until allowHardwareVsync is called.
+    void disableHardwareVsync(ISchedulerCallback&, bool disallow) EXCLUDES(mHwVsyncLock);
+
+    // Restore the ability to enable hardware vsync.
+    void allowHardwareVsync() EXCLUDES(mHwVsyncLock);
+
+    // If true, enableHardwareVsync can enable hardware vsync (if not already
+    // enabled). If false, enableHardwareVsync does nothing.
+    bool isHardwareVsyncAllowed() const EXCLUDES(mHwVsyncLock);
+
+protected:
+    using ControllerPtr = std::unique_ptr<VsyncController>;
+
+    // For tests.
+    VsyncSchedule(PhysicalDisplayId, TrackerPtr, DispatchPtr, ControllerPtr);
+
 private:
     friend class TestableScheduler;
     friend class android::EventThreadTest;
     friend class android::fuzz::SchedulerFuzzer;
 
-    using TrackerPtr = std::unique_ptr<VsyncTracker>;
-    using DispatchPtr = std::unique_ptr<VsyncDispatch>;
-    using ControllerPtr = std::unique_ptr<VsyncController>;
+    static TrackerPtr createTracker(PhysicalDisplayId);
+    static DispatchPtr createDispatch(TrackerPtr);
+    static ControllerPtr createController(PhysicalDisplayId, VsyncTracker&, FeatureFlags);
 
-    // For tests.
-    VsyncSchedule(TrackerPtr, DispatchPtr, ControllerPtr);
+    mutable std::mutex mHwVsyncLock;
+    enum class HwVsyncState {
+        // Hardware vsyncs are currently enabled.
+        Enabled,
 
-    static TrackerPtr createTracker();
-    static DispatchPtr createDispatch(VsyncTracker&);
-    static ControllerPtr createController(VsyncTracker&, FeatureFlags);
+        // Hardware vsyncs are currently disabled. They can be enabled by a call
+        // to `enableHardwareVsync`.
+        Disabled,
+
+        // Hardware vsyncs are not currently allowed (e.g. because the display
+        // is off).
+        Disallowed,
+
+        ftl_last = Disallowed,
+    };
+    HwVsyncState mHwVsyncState GUARDED_BY(mHwVsyncLock) = HwVsyncState::Disallowed;
+
+    // The last state, which may be the current state, or the state prior to setting to Disallowed.
+    HwVsyncState mLastHwVsyncState GUARDED_BY(mHwVsyncLock) = HwVsyncState::Disabled;
 
     class PredictedVsyncTracer;
     using TracerPtr = std::unique_ptr<PredictedVsyncTracer>;
 
+    const PhysicalDisplayId mId;
+
     // Effectively const except in move constructor.
     TrackerPtr mTracker;
     DispatchPtr mDispatch;
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 43483e4..a0c3eb0 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -173,6 +173,8 @@
 using namespace hardware::configstore;
 using namespace hardware::configstore::V1_0;
 using namespace sysprop;
+using ftl::Flags;
+using namespace ftl::flag_operators;
 
 using aidl::android::hardware::graphics::common::DisplayDecorationSupport;
 using aidl::android::hardware::graphics::composer3::Capability;
@@ -470,6 +472,10 @@
     mPowerHintSessionMode =
             {.late = base::GetBoolProperty("debug.sf.send_late_power_session_hint"s, true),
              .early = base::GetBoolProperty("debug.sf.send_early_power_session_hint"s, false)};
+    mLayerLifecycleManagerEnabled =
+            base::GetBoolProperty("debug.sf.enable_layer_lifecycle_manager"s, false);
+    mLegacyFrontEndEnabled = !mLayerLifecycleManagerEnabled ||
+            base::GetBoolProperty("debug.sf.enable_legacy_frontend"s, true);
 }
 
 LatchUnsignaledConfig SurfaceFlinger::getLatchUnsignaledConfig() {
@@ -1124,21 +1130,33 @@
     return NO_ERROR;
 }
 
-status_t SurfaceFlinger::getDisplayStats(const sp<IBinder>&, DisplayStatInfo* outStats) {
+status_t SurfaceFlinger::getDisplayStats(const sp<IBinder>& displayToken,
+                                         DisplayStatInfo* outStats) {
     if (!outStats) {
         return BAD_VALUE;
     }
 
-    const auto& schedule = mScheduler->getVsyncSchedule();
-    outStats->vsyncTime = schedule.vsyncDeadlineAfter(TimePoint::now()).ns();
-    outStats->vsyncPeriod = schedule.period().ns();
+    std::optional<PhysicalDisplayId> displayIdOpt;
+    {
+        Mutex::Autolock lock(mStateLock);
+        displayIdOpt = getPhysicalDisplayIdLocked(displayToken);
+    }
+
+    if (!displayIdOpt) {
+        ALOGE("%s: Invalid physical display token %p", __func__, displayToken.get());
+        return NAME_NOT_FOUND;
+    }
+    const auto schedule = mScheduler->getVsyncSchedule(displayIdOpt);
+    outStats->vsyncTime = schedule->vsyncDeadlineAfter(TimePoint::now()).ns();
+    outStats->vsyncPeriod = schedule->period().ns();
     return NO_ERROR;
 }
 
 void SurfaceFlinger::setDesiredActiveMode(display::DisplayModeRequest&& request, bool force) {
     ATRACE_CALL();
 
-    auto display = getDisplayDeviceLocked(request.mode.modePtr->getPhysicalDisplayId());
+    const auto displayId = request.mode.modePtr->getPhysicalDisplayId();
+    const auto display = getDisplayDeviceLocked(displayId);
     if (!display) {
         ALOGW("%s: display is no longer valid", __func__);
         return;
@@ -1151,23 +1169,25 @@
                                           force)) {
         case DisplayDevice::DesiredActiveModeAction::InitiateDisplayModeSwitch:
             // Set the render rate as setDesiredActiveMode updated it.
-            mScheduler->setRenderRate(display->refreshRateSelector().getActiveMode().fps);
+            mScheduler->setRenderRate(displayId,
+                                      display->refreshRateSelector().getActiveMode().fps);
 
             // Schedule a new frame to initiate the display mode switch.
             scheduleComposite(FrameHint::kNone);
 
             // Start receiving vsync samples now, so that we can detect a period
             // switch.
-            mScheduler->resyncToHardwareVsync(true, mode.modePtr->getFps());
+            mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */,
+                                              mode.modePtr->getFps());
+
             // As we called to set period, we will call to onRefreshRateChangeCompleted once
             // VsyncController model is locked.
-            mScheduler->modulateVsync(&VsyncModulator::onRefreshRateChangeInitiated);
-
+            mScheduler->modulateVsync(displayId, &VsyncModulator::onRefreshRateChangeInitiated);
             updatePhaseConfiguration(mode.fps);
             mScheduler->setModeChangePending(true);
             break;
         case DisplayDevice::DesiredActiveModeAction::InitiateRenderRateSwitch:
-            mScheduler->setRenderRate(mode.fps);
+            mScheduler->setRenderRate(displayId, mode.fps);
             updatePhaseConfiguration(mode.fps);
             mRefreshRateStats->setRefreshRate(mode.fps);
             if (display->getPhysicalId() == mActiveDisplayId && emitEvent) {
@@ -1283,11 +1303,14 @@
 }
 
 void SurfaceFlinger::desiredActiveModeChangeDone(const sp<DisplayDevice>& display) {
-    const auto displayFps = display->getDesiredActiveMode()->modeOpt->modePtr->getFps();
-    const auto renderFps = display->getDesiredActiveMode()->modeOpt->fps;
+    const auto desiredActiveMode = display->getDesiredActiveMode();
+    const auto& modeOpt = desiredActiveMode->modeOpt;
+    const auto displayId = modeOpt->modePtr->getPhysicalDisplayId();
+    const auto displayFps = modeOpt->modePtr->getFps();
+    const auto renderFps = modeOpt->fps;
     clearDesiredActiveModeState(display);
-    mScheduler->resyncToHardwareVsync(true, displayFps);
-    mScheduler->setRenderRate(renderFps);
+    mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, displayFps);
+    mScheduler->setRenderRate(displayId, renderFps);
     updatePhaseConfiguration(renderFps);
 }
 
@@ -1971,13 +1994,14 @@
 // ----------------------------------------------------------------------------
 
 sp<IDisplayEventConnection> SurfaceFlinger::createDisplayEventConnection(
-        gui::ISurfaceComposer::VsyncSource vsyncSource, EventRegistrationFlags eventRegistration) {
+        gui::ISurfaceComposer::VsyncSource vsyncSource, EventRegistrationFlags eventRegistration,
+        const sp<IBinder>& layerHandle) {
     const auto& handle =
             vsyncSource == gui::ISurfaceComposer::VsyncSource::eVsyncSourceSurfaceFlinger
             ? mSfConnectionHandle
             : mAppConnectionHandle;
 
-    return mScheduler->createDisplayEventConnection(handle, eventRegistration);
+    return mScheduler->createDisplayEventConnection(handle, eventRegistration, layerHandle);
 }
 
 void SurfaceFlinger::scheduleCommit(FrameHint hint) {
@@ -2023,21 +2047,11 @@
     ATRACE_FORMAT("onComposerHalVsync%s", tracePeriod.c_str());
 
     Mutex::Autolock lock(mStateLock);
-
-    if (!getHwComposer().onVsync(hwcDisplayId, timestamp)) {
-        return;
-    }
-
-    if (const auto displayId = getHwComposer().toPhysicalDisplayId(hwcDisplayId);
-        displayId != mActiveDisplayId) {
-        // For now, we don't do anything with non active display vsyncs.
-        return;
-    }
-
-    bool periodFlushed = false;
-    mScheduler->addResyncSample(timestamp, vsyncPeriod, &periodFlushed);
-    if (periodFlushed) {
-        mScheduler->modulateVsync(&VsyncModulator::onRefreshRateChangeCompleted);
+    if (const auto displayIdOpt = getHwComposer().onVsync(hwcDisplayId, timestamp)) {
+        if (mScheduler->addResyncSample(*displayIdOpt, timestamp, vsyncPeriod)) {
+            // period flushed
+            mScheduler->modulateVsync(displayIdOpt, &VsyncModulator::onRefreshRateChangeCompleted);
+        }
     }
 }
 
@@ -2078,16 +2092,15 @@
     mScheduler->forceNextResync();
 }
 
-void SurfaceFlinger::setVsyncEnabled(bool enabled) {
-    ATRACE_CALL();
+void SurfaceFlinger::setVsyncEnabled(PhysicalDisplayId id, bool enabled) {
+    const char* const whence = __func__;
+    ATRACE_FORMAT("%s (%d) for %" PRIu64, whence, enabled, id.value);
 
     // On main thread to avoid race conditions with display power state.
     static_cast<void>(mScheduler->schedule([=]() FTL_FAKE_GUARD(mStateLock) {
-        mHWCVsyncPendingState = enabled ? hal::Vsync::ENABLE : hal::Vsync::DISABLE;
-
-        if (const auto display = getDefaultDisplayDeviceLocked();
-            display && display->isPoweredOn()) {
-            setHWCVsyncEnabled(display->getPhysicalId(), mHWCVsyncPendingState);
+        ATRACE_FORMAT("%s (%d) for %" PRIu64 " (main thread)", whence, enabled, id.value);
+        if (const auto display = getDisplayDeviceLocked(id); display && display->isPoweredOn()) {
+            setHWCVsyncEnabled(id, enabled);
         }
     }));
 }
@@ -2114,13 +2127,13 @@
 TimePoint SurfaceFlinger::calculateExpectedPresentTime(TimePoint frameTime) const {
     const auto& schedule = mScheduler->getVsyncSchedule();
 
-    const TimePoint vsyncDeadline = schedule.vsyncDeadlineAfter(frameTime);
+    const TimePoint vsyncDeadline = schedule->vsyncDeadlineAfter(frameTime);
     if (mScheduler->vsyncModulator().getVsyncConfig().sfOffset > 0) {
         return vsyncDeadline;
     }
 
     // Inflate the expected present time if we're targeting the next vsync.
-    return vsyncDeadline + schedule.period();
+    return vsyncDeadline + schedule->period();
 }
 
 void SurfaceFlinger::configure() FTL_FAKE_GUARD(kMainThreadContext) {
@@ -2130,6 +2143,110 @@
     }
 }
 
+bool SurfaceFlinger::updateLayerSnapshotsLegacy(VsyncId vsyncId, LifecycleUpdate& update,
+                                                bool transactionsFlushed,
+                                                bool& outTransactionsAreEmpty) {
+    bool needsTraversal = false;
+    if (transactionsFlushed) {
+        needsTraversal |= commitMirrorDisplays(vsyncId);
+        needsTraversal |= commitCreatedLayers(vsyncId, update.layerCreatedStates);
+        needsTraversal |= applyTransactions(update.transactions, vsyncId);
+    }
+    outTransactionsAreEmpty = !needsTraversal;
+    const bool shouldCommit = (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
+    if (shouldCommit) {
+        commitTransactions();
+    }
+
+    bool mustComposite = latchBuffers() || shouldCommit;
+    updateLayerGeometry();
+    return mustComposite;
+}
+
+bool SurfaceFlinger::updateLayerSnapshots(VsyncId vsyncId, LifecycleUpdate& update,
+                                          bool transactionsFlushed, bool& outTransactionsAreEmpty) {
+    using Changes = frontend::RequestedLayerState::Changes;
+    ATRACE_NAME("updateLayerSnapshots");
+    {
+        mLayerLifecycleManager.addLayers(std::move(update.newLayers));
+        mLayerLifecycleManager.applyTransactions(update.transactions);
+        mLayerLifecycleManager.onHandlesDestroyed(update.destroyedHandles);
+        for (auto& legacyLayer : update.layerCreatedStates) {
+            sp<Layer> layer = legacyLayer.layer.promote();
+            if (layer) {
+                mLegacyLayers[layer->sequence] = layer;
+            }
+        }
+    }
+    if (mLayerLifecycleManager.getGlobalChanges().test(Changes::Hierarchy)) {
+        ATRACE_NAME("LayerHierarchyBuilder:update");
+        mLayerHierarchyBuilder.update(mLayerLifecycleManager.getLayers(),
+                                      mLayerLifecycleManager.getDestroyedLayers());
+    }
+
+    applyAndCommitDisplayTransactionStates(update.transactions);
+
+    {
+        ATRACE_NAME("LayerSnapshotBuilder:update");
+        frontend::LayerSnapshotBuilder::Args args{.root = mLayerHierarchyBuilder.getHierarchy(),
+                                                  .layerLifecycleManager = mLayerLifecycleManager,
+                                                  .displays = mFrontEndDisplayInfos,
+                                                  .displayChanges = mFrontEndDisplayInfosChanged,
+                                                  .globalShadowSettings =
+                                                          mDrawingState.globalShadowSettings,
+                                                  .supportsBlur = mSupportsBlur,
+                                                  .forceFullDamage = mForceFullDamage};
+        mLayerSnapshotBuilder.update(args);
+    }
+
+    if (mLayerLifecycleManager.getGlobalChanges().any(Changes::Geometry | Changes::Input |
+                                                      Changes::Hierarchy)) {
+        mUpdateInputInfo = true;
+    }
+    if (mLayerLifecycleManager.getGlobalChanges().any(Changes::VisibleRegion | Changes::Hierarchy |
+                                                      Changes::Visibility)) {
+        mVisibleRegionsDirty = true;
+    }
+    outTransactionsAreEmpty = mLayerLifecycleManager.getGlobalChanges().get() == 0;
+    const bool mustComposite = mLayerLifecycleManager.getGlobalChanges().get() != 0;
+    {
+        ATRACE_NAME("LLM:commitChanges");
+        mLayerLifecycleManager.commitChanges();
+    }
+
+    if (!mLegacyFrontEndEnabled) {
+        ATRACE_NAME("DisplayCallbackAndStatsUpdates");
+        applyTransactions(update.transactions, vsyncId);
+
+        bool newDataLatched = false;
+        for (auto& snapshot : mLayerSnapshotBuilder.getSnapshots()) {
+            if (!snapshot->changes.test(Changes::Buffer)) continue;
+            auto it = mLegacyLayers.find(snapshot->sequence);
+            LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(), "Couldnt find layer object for %s",
+                                snapshot->getDebugString().c_str());
+            mLayersWithQueuedFrames.emplace(it->second);
+            newDataLatched = true;
+            if (!snapshot->isVisible) break;
+
+            Region visibleReg;
+            visibleReg.set(snapshot->transformedBoundsWithoutTransparentRegion);
+            invalidateLayerStack(snapshot->outputFilter, visibleReg);
+        }
+
+        for (auto& destroyedLayer : mLayerLifecycleManager.getDestroyedLayers()) {
+            mLegacyLayers.erase(destroyedLayer->id);
+        }
+
+        // enter boot animation on first buffer latch
+        if (CC_UNLIKELY(mBootStage == BootStage::BOOTLOADER && newDataLatched)) {
+            ALOGI("Enter boot animation");
+            mBootStage = BootStage::BOOTANIMATION;
+        }
+        commitTransactions();
+    }
+    return mustComposite;
+}
+
 bool SurfaceFlinger::commit(TimePoint frameTime, VsyncId vsyncId, TimePoint expectedVsyncTime)
         FTL_FAKE_GUARD(kMainThreadContext) {
     // The expectedVsyncTime, which was predicted when this frame was scheduled, is normally in the
@@ -2147,7 +2264,7 @@
                   ticks<std::milli, float>(mExpectedPresentTime - TimePoint::now()),
                   mExpectedPresentTime == expectedVsyncTime ? "" : " (adjusted)");
 
-    const Period vsyncPeriod = mScheduler->getVsyncSchedule().period();
+    const Period vsyncPeriod = mScheduler->getVsyncSchedule()->period();
     const FenceTimePtr& previousPresentFence = getPreviousPresentFence(frameTime, vsyncPeriod);
 
     // When backpressure propagation is enabled, we want to give a small grace period of 1ms
@@ -2269,45 +2386,34 @@
         mFrameTimeline->setSfWakeUp(vsyncId.value, frameTime.ns(),
                                     Fps::fromPeriodNsecs(vsyncPeriod.ns()));
 
-        bool needsTraversal = false;
-        if (clearTransactionFlags(eTransactionFlushNeeded)) {
-            // Locking:
-            // 1. to prevent onHandleDestroyed from being called while the state lock is held,
-            // we must keep a copy of the transactions (specifically the composer
-            // states) around outside the scope of the lock.
-            // 2. Transactions and created layers do not share a lock. To prevent applying
-            // transactions with layers still in the createdLayer queue, flush the transactions
-            // before committing the created layers.
-            std::vector<TransactionState> transactions = mTransactionHandler.flushTransactions();
-            needsTraversal |= commitMirrorDisplays(vsyncId);
-            needsTraversal |= commitCreatedLayers(vsyncId);
-            needsTraversal |= applyTransactions(transactions, vsyncId);
+        const bool flushTransactions = clearTransactionFlags(eTransactionFlushNeeded);
+        LifecycleUpdate updates;
+        if (flushTransactions) {
+            updates = flushLifecycleUpdates();
         }
-
-        const bool shouldCommit =
-                (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
-        if (shouldCommit) {
-            commitTransactions();
+        bool transactionsAreEmpty;
+        if (mLegacyFrontEndEnabled) {
+            mustComposite |= updateLayerSnapshotsLegacy(vsyncId, updates, flushTransactions,
+                                                        transactionsAreEmpty);
+        }
+        if (mLayerLifecycleManagerEnabled) {
+            mustComposite |=
+                    updateLayerSnapshots(vsyncId, updates, flushTransactions, transactionsAreEmpty);
         }
 
         if (transactionFlushNeeded()) {
             setTransactionFlags(eTransactionFlushNeeded);
         }
 
-        mustComposite |= shouldCommit;
-        mustComposite |= latchBuffers();
-
         // This has to be called after latchBuffers because we want to include the layers that have
         // been latched in the commit callback
-        if (!needsTraversal) {
+        if (transactionsAreEmpty) {
             // Invoke empty transaction callbacks early.
             mTransactionCallbackInvoker.sendCallbacks(false /* onCommitOnly */);
         } else {
             // Invoke OnCommit callbacks.
             mTransactionCallbackInvoker.sendCallbacks(true /* onCommitOnly */);
         }
-
-        updateLayerGeometry();
     }
 
     // Layers need to get updated (in the previous line) before we can use them for
@@ -2394,15 +2500,6 @@
 
     refreshArgs.updatingOutputGeometryThisFrame = mVisibleRegionsDirty;
     refreshArgs.updatingGeometryThisFrame = mGeometryDirty.exchange(false) || mVisibleRegionsDirty;
-    std::vector<Layer*> layers;
-
-    mDrawingState.traverseInZOrder([&refreshArgs, &layers](Layer* layer) {
-        if (auto layerFE = layer->getCompositionEngineLayerFE()) {
-            layer->updateSnapshot(refreshArgs.updatingGeometryThisFrame);
-            refreshArgs.layers.push_back(layerFE);
-            layers.push_back(layer);
-        }
-    });
     refreshArgs.internalDisplayRotationFlags = DisplayDevice::getPrimaryDisplayRotationFlags();
 
     if (CC_UNLIKELY(mDrawingState.colorMatrixChanged)) {
@@ -2417,7 +2514,7 @@
         refreshArgs.devOptFlashDirtyRegionsDelay = std::chrono::milliseconds(mDebugFlashDelay);
     }
 
-    const auto prevVsyncTime = mExpectedPresentTime - mScheduler->getVsyncSchedule().period();
+    const auto prevVsyncTime = mExpectedPresentTime - mScheduler->getVsyncSchedule()->period();
     const auto hwcMinWorkDuration = mVsyncConfiguration->getCurrentConfigs().hwcMinWorkDuration;
 
     refreshArgs.earliestPresentTime = prevVsyncTime - hwcMinWorkDuration;
@@ -2429,17 +2526,13 @@
     // the scheduler.
     const auto presentTime = systemTime();
 
-    {
-        std::vector<LayerSnapshotGuard> layerSnapshotGuards;
-        for (Layer* layer : layers) {
-            layerSnapshotGuards.emplace_back(layer);
-        }
-        mCompositionEngine->present(refreshArgs);
-    }
+    std::vector<std::pair<Layer*, LayerFE*>> layers =
+            moveSnapshotsToCompositionArgs(refreshArgs, /*cursorOnly=*/false, vsyncId.value);
+    mCompositionEngine->present(refreshArgs);
+    moveSnapshotsFromCompositionArgs(refreshArgs, layers);
 
-    for (auto& layer : layers) {
-        CompositionResult compositionResult{
-                layer->getCompositionEngineLayerFE()->stealCompositionResult()};
+    for (auto [layer, layerFE] : layers) {
+        CompositionResult compositionResult{layerFE->stealCompositionResult()};
         layer->onPreComposition(compositionResult.refreshStartTime);
         for (auto releaseFence : compositionResult.releaseFences) {
             layer->onLayerDisplayed(releaseFence);
@@ -2503,7 +2596,7 @@
     // TODO(b/160583065): Enable skip validation when SF caches all client composition layers.
     const bool hasGpuUseOrReuse =
             mCompositionCoverage.any(CompositionCoverage::Gpu | CompositionCoverage::GpuReuse);
-    mScheduler->modulateVsync(&VsyncModulator::onDisplayRefresh, hasGpuUseOrReuse);
+    mScheduler->modulateVsync({}, &VsyncModulator::onDisplayRefresh, hasGpuUseOrReuse);
 
     mLayersWithQueuedFrames.clear();
     if (mLayerTracingEnabled && mLayerTracing.flagIsSet(LayerTracing::TRACE_COMPOSITION)) {
@@ -2533,7 +2626,7 @@
     for (auto& layer : mLayersPendingRefresh) {
         Region visibleReg;
         visibleReg.set(layer->getScreenBounds());
-        invalidateLayerStack(layer, visibleReg);
+        invalidateLayerStack(layer->getOutputFilter(), visibleReg);
     }
     mLayersPendingRefresh.clear();
 }
@@ -2608,12 +2701,13 @@
     ATRACE_CALL();
     ALOGV(__func__);
 
-    const auto* display = FTL_FAKE_GUARD(mStateLock, getDefaultDisplayDeviceLocked()).get();
+    const auto* defaultDisplay = FTL_FAKE_GUARD(mStateLock, getDefaultDisplayDeviceLocked()).get();
 
     std::shared_ptr<FenceTime> glCompositionDoneFenceTime;
-    if (display && display->getCompositionDisplay()->getState().usesClientComposition) {
+    if (defaultDisplay &&
+        defaultDisplay->getCompositionDisplay()->getState().usesClientComposition) {
         glCompositionDoneFenceTime =
-                std::make_shared<FenceTime>(display->getCompositionDisplay()
+                std::make_shared<FenceTime>(defaultDisplay->getCompositionDisplay()
                                                     ->getRenderSurface()
                                                     ->getClientTargetAcquireFence());
     } else {
@@ -2622,8 +2716,9 @@
 
     mPreviousPresentFences[1] = mPreviousPresentFences[0];
 
-    auto presentFence =
-            display ? getHwComposer().getPresentFence(display->getPhysicalId()) : Fence::NO_FENCE;
+    auto presentFence = defaultDisplay
+            ? getHwComposer().getPresentFence(defaultDisplay->getPhysicalId())
+            : Fence::NO_FENCE;
 
     auto presentFenceTime = std::make_shared<FenceTime>(presentFence);
     mPreviousPresentFences[0] = {presentFence, presentFenceTime};
@@ -2645,16 +2740,16 @@
             ? mPresentLatencyTracker.trackPendingFrame(compositeTime, presentFenceTime)
             : Duration::zero();
 
-    const auto& schedule = mScheduler->getVsyncSchedule();
-    const TimePoint vsyncDeadline = schedule.vsyncDeadlineAfter(presentTime);
-    const Period vsyncPeriod = schedule.period();
+    const auto schedule = mScheduler->getVsyncSchedule();
+    const TimePoint vsyncDeadline = schedule->vsyncDeadlineAfter(presentTime);
+    const Period vsyncPeriod = schedule->period();
     const nsecs_t vsyncPhase = mVsyncConfiguration->getCurrentConfigs().late.sfOffset;
 
     const CompositorTiming compositorTiming(vsyncDeadline.ns(), vsyncPeriod.ns(), vsyncPhase,
                                             presentLatency.ns());
 
     for (const auto& layer: mLayersWithQueuedFrames) {
-        layer->onPostComposition(display, glCompositionDoneFenceTime, presentFenceTime,
+        layer->onPostComposition(defaultDisplay, glCompositionDoneFenceTime, presentFenceTime,
                                  compositorTiming);
         layer->releasePendingBuffer(presentTime.ns());
     }
@@ -2722,23 +2817,27 @@
     mTimeStats->incrementTotalFrames();
     mTimeStats->setPresentFenceGlobal(presentFenceTime);
 
-    const bool isInternalDisplay = display &&
-            FTL_FAKE_GUARD(mStateLock, mPhysicalDisplays)
-                    .get(display->getPhysicalId())
-                    .transform(&PhysicalDisplay::isInternal)
-                    .value_or(false);
-
-    if (isInternalDisplay && display && display->getPowerMode() == hal::PowerMode::ON &&
-        presentFenceTime->isValid()) {
-        mScheduler->addPresentFence(std::move(presentFenceTime));
+    {
+        ftl::FakeGuard guard(mStateLock);
+        for (const auto& [id, physicalDisplay] : mPhysicalDisplays) {
+            if (auto displayDevice = getDisplayDeviceLocked(id);
+                displayDevice && displayDevice->isPoweredOn() && physicalDisplay.isInternal()) {
+                auto presentFenceTimeI = defaultDisplay && defaultDisplay->getPhysicalId() == id
+                        ? std::move(presentFenceTime)
+                        : std::make_shared<FenceTime>(getHwComposer().getPresentFence(id));
+                if (presentFenceTimeI->isValid()) {
+                    mScheduler->addPresentFence(id, std::move(presentFenceTimeI));
+                }
+            }
+        }
     }
 
     const bool isDisplayConnected =
-            display && getHwComposer().isConnected(display->getPhysicalId());
+            defaultDisplay && getHwComposer().isConnected(defaultDisplay->getPhysicalId());
 
     if (!hasSyncFramework) {
-        if (isDisplayConnected && display->isPoweredOn()) {
-            mScheduler->enableHardwareVsync();
+        if (isDisplayConnected && defaultDisplay->isPoweredOn()) {
+            mScheduler->enableHardwareVsync(defaultDisplay->getPhysicalId());
         }
     }
 
@@ -2746,7 +2845,7 @@
     const size_t appConnections = mScheduler->getEventThreadConnectionCount(mAppConnectionHandle);
     mTimeStats->recordDisplayEventConnectionCount(sfConnections + appConnections);
 
-    if (isDisplayConnected && !display->isPoweredOn()) {
+    if (isDisplayConnected && !defaultDisplay->isPoweredOn()) {
         getRenderEngine().cleanupPostRender();
         return;
     }
@@ -2785,9 +2884,10 @@
             if (!layer->hasTrustedPresentationListener()) {
                 return;
             }
-            const auto display =
+            const std::optional<const DisplayDevice*> displayOpt =
                     layerStackToDisplay.get(layer->getLayerSnapshot()->outputFilter.layerStack);
-            layer->updateTrustedPresentationState(display->get(), layer->getLayerSnapshot(),
+            const DisplayDevice* display = displayOpt.value_or(nullptr);
+            layer->updateTrustedPresentationState(display, layer->getLayerSnapshot(),
                                                   nanoseconds_to_milliseconds(callTime), false);
         });
     }
@@ -2848,7 +2948,7 @@
     // so we can call commitTransactionsLocked unconditionally.
     // We clear the flags with mStateLock held to guarantee that
     // mCurrentState won't change until the transaction is committed.
-    mScheduler->modulateVsync(&VsyncModulator::onTransactionCommit);
+    mScheduler->modulateVsync({}, &VsyncModulator::onTransactionCommit);
     commitTransactionsLocked(clearTransactionFlags(eTransactionMask));
 
     mDebugInTransaction = 0;
@@ -3387,7 +3487,8 @@
 void SurfaceFlinger::commitTransactionsLocked(uint32_t transactionFlags) {
     // Commit display transactions.
     const bool displayTransactionNeeded = transactionFlags & eDisplayTransactionNeeded;
-    if (displayTransactionNeeded) {
+    mFrontEndDisplayInfosChanged = displayTransactionNeeded;
+    if (displayTransactionNeeded && !mLayerLifecycleManagerEnabled) {
         processDisplayChangesLocked();
         mFrontEndDisplayInfos.clear();
         for (const auto& [_, display] : mDisplays) {
@@ -3478,7 +3579,7 @@
                 // this layer is not visible anymore
                 Region visibleReg;
                 visibleReg.set(layer->getScreenBounds());
-                invalidateLayerStack(sp<Layer>::fromExisting(layer), visibleReg);
+                invalidateLayerStack(layer->getOutputFilter(), visibleReg);
             }
         });
     }
@@ -3566,16 +3667,23 @@
     outWindowInfos.reserve(sNumWindowInfos);
     sNumWindowInfos = 0;
 
-    mDrawingState.traverseInReverseZOrder([&](Layer* layer) {
-        if (!layer->needsInputInfo()) return;
+    if (mLayerLifecycleManagerEnabled) {
+        mLayerSnapshotBuilder.forEachInputSnapshot(
+                [&outWindowInfos](const frontend::LayerSnapshot& snapshot) {
+                    outWindowInfos.push_back(snapshot.inputInfo);
+                });
+    } else {
+        mDrawingState.traverseInReverseZOrder([&](Layer* layer) {
+            if (!layer->needsInputInfo()) return;
+            const auto opt =
+                    mFrontEndDisplayInfos.get(layer->getLayerStack())
+                            .transform([](const frontend::DisplayInfo& info) {
+                                return Layer::InputDisplayArgs{&info.transform, info.isSecure};
+                            });
 
-        const auto opt = mFrontEndDisplayInfos.get(layer->getLayerStack())
-                                 .transform([](const frontend::DisplayInfo& info) {
-                                     return Layer::InputDisplayArgs{&info.transform, info.isSecure};
-                                 });
-
-        outWindowInfos.push_back(layer->fillInputInfo(opt.value_or(Layer::InputDisplayArgs{})));
-    });
+            outWindowInfos.push_back(layer->fillInputInfo(opt.value_or(Layer::InputDisplayArgs{})));
+        });
+    }
 
     sNumWindowInfos = outWindowInfos.size();
 
@@ -3592,17 +3700,9 @@
             refreshArgs.outputs.push_back(display->getCompositionDisplay());
         }
     }
-
-    std::vector<LayerSnapshotGuard> layerSnapshotGuards;
-    mDrawingState.traverse([&layerSnapshotGuards](Layer* layer) {
-        if (layer->getLayerSnapshot()->compositionType ==
-            aidl::android::hardware::graphics::composer3::Composition::CURSOR) {
-            layer->updateSnapshot(false /* updateGeometry */);
-            layerSnapshotGuards.emplace_back(layer);
-        }
-    });
-
+    auto layers = moveSnapshotsToCompositionArgs(refreshArgs, /*cursorOnly=*/true, 0);
     mCompositionEngine->updateCursorAsync(refreshArgs);
+    moveSnapshotsFromCompositionArgs(refreshArgs, layers);
 }
 
 void SurfaceFlinger::requestDisplayModes(std::vector<display::DisplayModeRequest> modeRequests) {
@@ -3687,10 +3787,9 @@
     mScheduler = std::make_unique<Scheduler>(static_cast<ICompositor&>(*this),
                                              static_cast<ISchedulerCallback&>(*this), features,
                                              std::move(modulatorPtr));
-    mScheduler->createVsyncSchedule(features);
     mScheduler->registerDisplay(display->getPhysicalId(), display->holdRefreshRateSelector());
 
-    setVsyncEnabled(false);
+    setVsyncEnabled(display->getPhysicalId(), false);
     mScheduler->startTimers();
 
     const auto configs = mVsyncConfiguration->getCurrentConfigs();
@@ -3706,7 +3805,7 @@
                                           /* workDuration */ activeRefreshRate.getPeriod(),
                                           /* readyDuration */ configs.late.sfWorkDuration);
 
-    mScheduler->initVsync(mScheduler->getVsyncSchedule().getDispatch(),
+    mScheduler->initVsync(mScheduler->getVsyncSchedule()->getDispatch(),
                           *mFrameTimeline->getTokenManager(), configs.late.sfWorkDuration);
 
     mRegionSamplingThread =
@@ -3778,10 +3877,10 @@
     }
 }
 
-void SurfaceFlinger::invalidateLayerStack(const sp<const Layer>& layer, const Region& dirty) {
+void SurfaceFlinger::invalidateLayerStack(const ui::LayerFilter& layerFilter, const Region& dirty) {
     for (const auto& [token, displayDevice] : FTL_FAKE_GUARD(mStateLock, mDisplays)) {
         auto display = displayDevice->getCompositionDisplay();
-        if (display->includesLayer(layer->getOutputFilter())) {
+        if (display->includesLayer(layerFilter)) {
             display->editState().dirtyRegion.orSelf(dirty);
         }
     }
@@ -3901,6 +4000,7 @@
     {
         std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
         mCreatedLayers.emplace_back(layer, parent, args.addToRoot);
+        mNewLayers.emplace_back(std::make_unique<frontend::RequestedLayerState>(args));
     }
 
     setTransactionFlags(eTransactionNeeded);
@@ -3912,14 +4012,18 @@
 }
 
 uint32_t SurfaceFlinger::clearTransactionFlags(uint32_t mask) {
-    return mTransactionFlags.fetch_and(~mask) & mask;
+    uint32_t transactionFlags = mTransactionFlags.fetch_and(~mask);
+    ATRACE_INT("mTransactionFlags", transactionFlags);
+    return transactionFlags & mask;
 }
 
 void SurfaceFlinger::setTransactionFlags(uint32_t mask, TransactionSchedule schedule,
                                          const sp<IBinder>& applyToken, FrameHint frameHint) {
-    mScheduler->modulateVsync(&VsyncModulator::setTransactionSchedule, schedule, applyToken);
+    mScheduler->modulateVsync({}, &VsyncModulator::setTransactionSchedule, schedule, applyToken);
+    uint32_t transactionFlags = mTransactionFlags.fetch_or(mask);
+    ATRACE_INT("mTransactionFlags", transactionFlags);
 
-    if (const bool scheduled = mTransactionFlags.fetch_or(mask) & mask; !scheduled) {
+    if (const bool scheduled = transactionFlags & mask; !scheduled) {
         scheduleCommit(frameHint);
     } else if (frameHint == FrameHint::kActive) {
         // Even if the next frame is already scheduled, we should reset the idle timer
@@ -3944,7 +4048,7 @@
         return TransactionReadiness::NotReady;
     }
 
-    if (!mScheduler->isVsyncValid(mExpectedPresentTime, transaction.originUid)) {
+    if (!mScheduler->isVsyncTargetForUid(mExpectedPresentTime, transaction.originUid)) {
         ATRACE_NAME("!isVsyncValid");
         return TransactionReadiness::NotReady;
     }
@@ -4088,7 +4192,7 @@
         return false;
     }
 
-    const Duration earlyLatchVsyncThreshold = mScheduler->getVsyncSchedule().period() / 2;
+    const Duration earlyLatchVsyncThreshold = mScheduler->getVsyncSchedule()->period() / 2;
 
     return predictedPresentTime >= expectedPresentTime &&
             predictedPresentTime - expectedPresentTime >= earlyLatchVsyncThreshold;
@@ -4234,9 +4338,8 @@
     }(state.flags);
 
     const auto frameHint = state.isFrameActive() ? FrameHint::kActive : FrameHint::kNone;
-    setTransactionFlags(eTransactionFlushNeeded, schedule, state.applyToken, frameHint);
     mTransactionHandler.queueTransaction(std::move(state));
-
+    setTransactionFlags(eTransactionFlushNeeded, schedule, applyToken, frameHint);
     return NO_ERROR;
 }
 
@@ -4251,9 +4354,11 @@
                                            const std::vector<ListenerCallbacks>& listenerCallbacks,
                                            int originPid, int originUid, uint64_t transactionId) {
     uint32_t transactionFlags = 0;
-    for (DisplayState& display : displays) {
-        display.sanitize(permissions);
-        transactionFlags |= setDisplayStateLocked(display);
+    if (!mLayerLifecycleManagerEnabled) {
+        for (DisplayState& display : displays) {
+            display.sanitize(permissions);
+            transactionFlags |= setDisplayStateLocked(display);
+        }
     }
 
     // start and end registration for listeners w/ no surface so they can get their callback.  Note
@@ -4265,9 +4370,16 @@
 
     uint32_t clientStateFlags = 0;
     for (auto& resolvedState : states) {
-        clientStateFlags |=
-                setClientStateLocked(frameTimelineInfo, resolvedState, desiredPresentTime,
-                                     isAutoTimestamp, postTime, permissions, transactionId);
+        if (mLegacyFrontEndEnabled) {
+            clientStateFlags |=
+                    setClientStateLocked(frameTimelineInfo, resolvedState, desiredPresentTime,
+                                         isAutoTimestamp, postTime, permissions, transactionId);
+
+        } else /*mLayerLifecycleManagerEnabled*/ {
+            clientStateFlags |= updateLayerCallbacksAndStats(frameTimelineInfo, resolvedState,
+                                                             desiredPresentTime, isAutoTimestamp,
+                                                             postTime, permissions, transactionId);
+        }
         if ((flags & eAnimation) && resolvedState.state.surface) {
             if (const auto layer = LayerHandle::getLayer(resolvedState.state.surface)) {
                 using LayerUpdateType = scheduler::LayerHistory::LayerUpdateType;
@@ -4300,8 +4412,8 @@
 
     bool needsTraversal = false;
     if (transactionFlags) {
-        // We are on the main thread, we are about to preform a traversal. Clear the traversal bit
-        // so we don't have to wake up again next frame to preform an unnecessary traversal.
+        // We are on the main thread, we are about to perform a traversal. Clear the traversal bit
+        // so we don't have to wake up again next frame to perform an unnecessary traversal.
         if (transactionFlags & eTraversalNeeded) {
             transactionFlags = transactionFlags & (~eTraversalNeeded);
             needsTraversal = true;
@@ -4314,6 +4426,42 @@
     return needsTraversal;
 }
 
+bool SurfaceFlinger::applyAndCommitDisplayTransactionStates(
+        std::vector<TransactionState>& transactions) {
+    Mutex::Autolock _l(mStateLock);
+    bool needsTraversal = false;
+    uint32_t transactionFlags = 0;
+    for (auto& transaction : transactions) {
+        for (DisplayState& display : transaction.displays) {
+            display.sanitize(transaction.permissions);
+            transactionFlags |= setDisplayStateLocked(display);
+        }
+    }
+
+    if (transactionFlags) {
+        // We are on the main thread, we are about to perform a traversal. Clear the traversal bit
+        // so we don't have to wake up again next frame to perform an unnecessary traversal.
+        if (transactionFlags & eTraversalNeeded) {
+            transactionFlags = transactionFlags & (~eTraversalNeeded);
+            needsTraversal = true;
+        }
+        if (transactionFlags) {
+            setTransactionFlags(transactionFlags);
+        }
+    }
+
+    mFrontEndDisplayInfosChanged = mTransactionFlags & eDisplayTransactionNeeded;
+    if (mFrontEndDisplayInfosChanged && !mLegacyFrontEndEnabled) {
+        processDisplayChangesLocked();
+        mFrontEndDisplayInfos.clear();
+        for (const auto& [_, display] : mDisplays) {
+            mFrontEndDisplayInfos.try_emplace(display->getLayerStack(), display->getFrontEndInfo());
+        }
+    }
+
+    return needsTraversal;
+}
+
 uint32_t SurfaceFlinger::setDisplayStateLocked(const DisplayState& s) {
     const ssize_t index = mCurrentState.displays.indexOfKey(s.token);
     if (index < 0) return 0;
@@ -4695,7 +4843,11 @@
                                           s.trustedPresentationListener);
     }
 
-    if (layer->setTransactionCompletedListeners(callbackHandles)) flags |= eTraversalNeeded;
+    if (layer->setTransactionCompletedListeners(callbackHandles,
+                                                layer->willPresentCurrentTransaction())) {
+        flags |= eTraversalNeeded;
+    }
+
     // Do not put anything that updates layer state or modifies flags after
     // setTransactionCompletedListener
 
@@ -4708,6 +4860,94 @@
     return flags;
 }
 
+uint32_t SurfaceFlinger::updateLayerCallbacksAndStats(const FrameTimelineInfo& frameTimelineInfo,
+                                                      ResolvedComposerState& composerState,
+                                                      int64_t desiredPresentTime,
+                                                      bool isAutoTimestamp, int64_t postTime,
+                                                      uint32_t permissions,
+                                                      uint64_t transactionId) {
+    layer_state_t& s = composerState.state;
+    s.sanitize(permissions);
+    const nsecs_t latchTime = systemTime();
+    bool unused;
+
+    std::vector<ListenerCallbacks> filteredListeners;
+    for (auto& listener : s.listeners) {
+        // Starts a registration but separates the callback ids according to callback type. This
+        // allows the callback invoker to send on latch callbacks earlier.
+        // note that startRegistration will not re-register if the listener has
+        // already be registered for a prior surface control
+
+        ListenerCallbacks onCommitCallbacks = listener.filter(CallbackId::Type::ON_COMMIT);
+        if (!onCommitCallbacks.callbackIds.empty()) {
+            filteredListeners.push_back(onCommitCallbacks);
+        }
+
+        ListenerCallbacks onCompleteCallbacks = listener.filter(CallbackId::Type::ON_COMPLETE);
+        if (!onCompleteCallbacks.callbackIds.empty()) {
+            filteredListeners.push_back(onCompleteCallbacks);
+        }
+    }
+
+    const uint64_t what = s.what;
+    uint32_t flags = 0;
+    sp<Layer> layer = nullptr;
+    if (s.surface) {
+        layer = LayerHandle::getLayer(s.surface);
+    } else {
+        // The client may provide us a null handle. Treat it as if the layer was removed.
+        ALOGW("Attempt to set client state with a null layer handle");
+    }
+    if (layer == nullptr) {
+        for (auto& [listener, callbackIds] : s.listeners) {
+            mTransactionCallbackInvoker.registerUnpresentedCallbackHandle(
+                    sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+        }
+        return 0;
+    }
+    if (what & layer_state_t::eProducerDisconnect) {
+        layer->onDisconnect();
+    }
+    std::optional<nsecs_t> dequeueBufferTimestamp;
+    if (what & layer_state_t::eMetadataChanged) {
+        dequeueBufferTimestamp = s.metadata.getInt64(gui::METADATA_DEQUEUE_TIME);
+    }
+
+    std::vector<sp<CallbackHandle>> callbackHandles;
+    if ((what & layer_state_t::eHasListenerCallbacksChanged) && (!filteredListeners.empty())) {
+        for (auto& [listener, callbackIds] : filteredListeners) {
+            callbackHandles.emplace_back(
+                    sp<CallbackHandle>::make(listener, callbackIds, s.surface));
+        }
+    }
+    if (what & layer_state_t::eSidebandStreamChanged) {
+        if (layer->setSidebandStream(s.sidebandStream)) flags |= eTraversalNeeded;
+    }
+    if (what & layer_state_t::eBufferChanged) {
+        if (layer->setBuffer(composerState.externalTexture, *s.bufferData, postTime,
+                             desiredPresentTime, isAutoTimestamp, dequeueBufferTimestamp,
+                             frameTimelineInfo)) {
+            layer->latchBuffer(unused, latchTime);
+            flags |= eTraversalNeeded;
+        }
+        mLayersWithQueuedFrames.emplace(layer);
+    } else if (frameTimelineInfo.vsyncId != FrameTimelineInfo::INVALID_VSYNC_ID) {
+        layer->setFrameTimelineVsyncForBufferlessTransaction(frameTimelineInfo, postTime);
+    }
+
+    if (what & layer_state_t::eTrustedPresentationInfoChanged) {
+        layer->setTrustedPresentationInfo(s.trustedPresentationThresholds,
+                                          s.trustedPresentationListener);
+    }
+
+    const auto& snapshot = mLayerSnapshotBuilder.getSnapshot(layer->getSequence());
+    bool willPresentCurrentTransaction =
+            snapshot && (snapshot->hasReadyFrame || snapshot->sidebandStreamHasFrame);
+    if (layer->setTransactionCompletedListeners(callbackHandles, willPresentCurrentTransaction))
+        flags |= eTraversalNeeded;
+    return flags;
+}
+
 uint32_t SurfaceFlinger::addInputWindowCommands(const InputWindowCommands& inputWindowCommands) {
     bool hasChanges = mInputWindowCommands.merge(inputWindowCommands);
     return hasChanges ? eTraversalNeeded : 0;
@@ -4776,6 +5016,7 @@
         LayerCreationArgs mirrorArgs(args);
         mirrorArgs.flags |= ISurfaceComposerClient::eNoColorFill;
         mirrorArgs.addToRoot = true;
+        mirrorArgs.layerStackToMirror = layerStack;
         result = createEffectLayer(mirrorArgs, &outResult.handle, &rootMirrorLayer);
         outResult.layerId = rootMirrorLayer->sequence;
         outResult.layerName = String16(rootMirrorLayer->getDebugName());
@@ -4878,7 +5119,12 @@
     setTransactionFlags(eTransactionNeeded);
 }
 
-void SurfaceFlinger::onHandleDestroyed(BBinder* handle, sp<Layer>& layer, uint32_t /* layerId */) {
+void SurfaceFlinger::onHandleDestroyed(BBinder* handle, sp<Layer>& layer, uint32_t layerId) {
+    {
+        std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
+        mDestroyedHandles.emplace_back(layerId);
+    }
+
     Mutex::Autolock lock(mStateLock);
     markLayerPendingRemovalLocked(layer);
     mBufferCountTracker.remove(handle);
@@ -4886,6 +5132,8 @@
     if (mTransactionTracing) {
         mTransactionTracing->onHandleRemoved(handle);
     }
+
+    setTransactionFlags(eTransactionFlushNeeded);
 }
 
 void SurfaceFlinger::onInitializeDisplays() {
@@ -4985,10 +5233,11 @@
             ALOGW("Couldn't set SCHED_FIFO on display on: %s\n", strerror(errno));
         }
         getHwComposer().setPowerMode(displayId, mode);
-        if (isActiveDisplay && mode != hal::PowerMode::DOZE_SUSPEND) {
-            setHWCVsyncEnabled(displayId, mHWCVsyncPendingState);
-            mScheduler->onScreenAcquired(mAppConnectionHandle);
-            mScheduler->resyncToHardwareVsync(true, refreshRate);
+        if (mode != hal::PowerMode::DOZE_SUSPEND) {
+            if (isActiveDisplay) {
+                mScheduler->onScreenAcquired(mAppConnectionHandle);
+            }
+            mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, refreshRate);
         }
 
         mVisibleRegionsDirty = true;
@@ -5001,33 +5250,34 @@
         if (SurfaceFlinger::setSchedAttr(false) != NO_ERROR) {
             ALOGW("Couldn't set uclamp.min on display off: %s\n", strerror(errno));
         }
-        if (isActiveDisplay && *currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
-            mScheduler->disableHardwareVsync(true);
-            mScheduler->onScreenReleased(mAppConnectionHandle);
+        if (*currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
+            mScheduler->disableHardwareVsync(displayId, true);
+            if (isActiveDisplay) {
+                mScheduler->onScreenReleased(mAppConnectionHandle);
+            }
         }
 
-        // Make sure HWVsync is disabled before turning off the display
-        setHWCVsyncEnabled(displayId, hal::Vsync::DISABLE);
-
         getHwComposer().setPowerMode(displayId, mode);
         mVisibleRegionsDirty = true;
         // from this point on, SF will stop drawing on this display
     } else if (mode == hal::PowerMode::DOZE || mode == hal::PowerMode::ON) {
         // Update display while dozing
         getHwComposer().setPowerMode(displayId, mode);
-        if (isActiveDisplay && *currentModeOpt == hal::PowerMode::DOZE_SUSPEND) {
+        if (*currentModeOpt == hal::PowerMode::DOZE_SUSPEND) {
+            if (isActiveDisplay) {
+                mScheduler->onScreenAcquired(mAppConnectionHandle);
+            }
             ALOGI("Force repainting for DOZE_SUSPEND -> DOZE or ON.");
             mVisibleRegionsDirty = true;
             scheduleRepaint();
-            mScheduler->onScreenAcquired(mAppConnectionHandle);
-            mScheduler->resyncToHardwareVsync(true, refreshRate);
+            mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, refreshRate);
         }
     } else if (mode == hal::PowerMode::DOZE_SUSPEND) {
         // Leave display going to doze
         if (isActiveDisplay) {
-            mScheduler->disableHardwareVsync(true);
             mScheduler->onScreenReleased(mAppConnectionHandle);
         }
+        mScheduler->disableHardwareVsync(displayId, true);
         getHwComposer().setPowerMode(displayId, mode);
     } else {
         ALOGE("Attempting to set unknown power mode: %d\n", mode);
@@ -5037,8 +5287,8 @@
     if (isActiveDisplay) {
         mTimeStats->setPowerMode(mode);
         mRefreshRateStats->setPowerMode(mode);
-        mScheduler->setDisplayPowerMode(mode);
     }
+    mScheduler->setDisplayPowerMode(displayId, mode);
 
     ALOGD("Finished setting power mode %d on display %s", mode, to_string(displayId).c_str());
 }
@@ -5216,14 +5466,6 @@
 
     mScheduler->dump(dumper);
 
-    // TODO(b/241286146): Move to Scheduler.
-    {
-        utils::Dumper::Indent indent(dumper);
-        dumper.dump("lastHwcVsyncState"sv, mLastHWCVsyncState);
-        dumper.dump("pendingHwcVsyncState"sv, mHWCVsyncPendingState);
-    }
-    dumper.eol();
-
     // TODO(b/241285876): Move to DisplayModeController.
     dumper.dump("debugDisplayModeSetByBackdoor"sv, mDebugDisplayModeSetByBackdoor);
     dumper.eol();
@@ -6455,10 +6697,15 @@
                                          args.useIdentityTransform, args.captureSecureLayers);
     });
 
-    auto traverseLayers = [this, args, layerStack](const LayerVector::Visitor& visitor) {
-        traverseLayersInLayerStack(layerStack, args.uid, visitor);
-    };
-    auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+    GetLayerSnapshotsFunction getLayerSnapshots;
+    if (mLayerLifecycleManagerEnabled) {
+        getLayerSnapshots = getLayerSnapshotsForScreenshots(layerStack, args.uid);
+    } else {
+        auto traverseLayers = [this, args, layerStack](const LayerVector::Visitor& visitor) {
+            traverseLayersInLayerStack(layerStack, args.uid, visitor);
+        };
+        getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+    }
 
     auto future = captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, reqSize,
                                       args.pixelFormat, args.allowProtected, args.grayscale,
@@ -6492,10 +6739,15 @@
                                          false /* captureSecureLayers */);
     });
 
-    auto traverseLayers = [this, layerStack](const LayerVector::Visitor& visitor) {
-        traverseLayersInLayerStack(layerStack, CaptureArgs::UNSET_UID, visitor);
-    };
-    auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+    GetLayerSnapshotsFunction getLayerSnapshots;
+    if (mLayerLifecycleManagerEnabled) {
+        getLayerSnapshots = getLayerSnapshotsForScreenshots(layerStack, CaptureArgs::UNSET_UID);
+    } else {
+        auto traverseLayers = [this, layerStack](const LayerVector::Visitor& visitor) {
+            traverseLayersInLayerStack(layerStack, CaptureArgs::UNSET_UID, visitor);
+        };
+        getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+    }
 
     if (captureListener == nullptr) {
         ALOGE("capture screen must provide a capture listener callback");
@@ -6590,29 +6842,37 @@
         return std::make_unique<LayerRenderArea>(*this, parent, crop, reqSize, dataspace,
                                                  childrenOnly, args.captureSecureLayers);
     });
-
-    auto traverseLayers = [parent, args, excludeLayerIds](const LayerVector::Visitor& visitor) {
-        parent->traverseChildrenInZOrder(LayerVector::StateSet::Drawing, [&](Layer* layer) {
-            if (!layer->isVisible()) {
-                return;
-            } else if (args.childrenOnly && layer == parent.get()) {
-                return;
-            } else if (args.uid != CaptureArgs::UNSET_UID && args.uid != layer->getOwnerUid()) {
-                return;
-            }
-
-            auto p = sp<Layer>::fromExisting(layer);
-            while (p != nullptr) {
-                if (excludeLayerIds.count(p->sequence) != 0) {
+    GetLayerSnapshotsFunction getLayerSnapshots;
+    if (mLayerLifecycleManagerEnabled) {
+        FloatRect parentCrop = crop.isEmpty() ? FloatRect(0, 0, reqSize.width, reqSize.height)
+                                              : crop.toFloatRect();
+        getLayerSnapshots = getLayerSnapshotsForScreenshots(parent->sequence, args.uid,
+                                                            std::move(excludeLayerIds),
+                                                            args.childrenOnly, parentCrop);
+    } else {
+        auto traverseLayers = [parent, args, excludeLayerIds](const LayerVector::Visitor& visitor) {
+            parent->traverseChildrenInZOrder(LayerVector::StateSet::Drawing, [&](Layer* layer) {
+                if (!layer->isVisible()) {
+                    return;
+                } else if (args.childrenOnly && layer == parent.get()) {
+                    return;
+                } else if (args.uid != CaptureArgs::UNSET_UID && args.uid != layer->getOwnerUid()) {
                     return;
                 }
-                p = p->getParent();
-            }
 
-            visitor(layer);
-        });
-    };
-    auto getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+                auto p = sp<Layer>::fromExisting(layer);
+                while (p != nullptr) {
+                    if (excludeLayerIds.count(p->sequence) != 0) {
+                        return;
+                    }
+                    p = p->getParent();
+                }
+
+                visitor(layer);
+            });
+        };
+        getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
+    }
 
     if (captureListener == nullptr) {
         ALOGE("capture screen must provide a capture listener callback");
@@ -7394,24 +7654,18 @@
     return true;
 }
 
-bool SurfaceFlinger::commitCreatedLayers(VsyncId vsyncId) {
-    std::vector<LayerCreatedState> createdLayers;
-    {
-        std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
-        createdLayers = std::move(mCreatedLayers);
-        mCreatedLayers.clear();
-        if (createdLayers.size() == 0) {
-            return false;
-        }
+bool SurfaceFlinger::commitCreatedLayers(VsyncId vsyncId,
+                                         std::vector<LayerCreatedState>& createdLayers) {
+    if (createdLayers.size() == 0) {
+        return false;
     }
 
     Mutex::Autolock _l(mStateLock);
     for (const auto& createdLayer : createdLayers) {
         handleLayerCreatedLocked(createdLayer, vsyncId);
     }
-    createdLayers.clear();
     mLayersAdded = true;
-    return true;
+    return mLayersAdded;
 }
 
 void SurfaceFlinger::updateLayerMetadataSnapshot() {
@@ -7439,6 +7693,150 @@
     });
 }
 
+void SurfaceFlinger::moveSnapshotsFromCompositionArgs(
+        compositionengine::CompositionRefreshArgs& refreshArgs,
+        std::vector<std::pair<Layer*, LayerFE*>>& layers) {
+    if (mLayerLifecycleManagerEnabled) {
+        std::vector<std::unique_ptr<frontend::LayerSnapshot>>& snapshots =
+                mLayerSnapshotBuilder.getSnapshots();
+        for (auto [_, layerFE] : layers) {
+            auto i = layerFE->mSnapshot->globalZ;
+            snapshots[i] = std::move(layerFE->mSnapshot);
+        }
+    }
+    if (mLegacyFrontEndEnabled && !mLayerLifecycleManagerEnabled) {
+        for (auto [layer, layerFE] : layers) {
+            layer->updateLayerSnapshot(std::move(layerFE->mSnapshot));
+        }
+    }
+}
+
+std::vector<std::pair<Layer*, LayerFE*>> SurfaceFlinger::moveSnapshotsToCompositionArgs(
+        compositionengine::CompositionRefreshArgs& refreshArgs, bool cursorOnly, int64_t vsyncId) {
+    std::vector<std::pair<Layer*, LayerFE*>> layers;
+    if (mLayerLifecycleManagerEnabled) {
+        mLayerSnapshotBuilder.forEachVisibleSnapshot(
+                [&](std::unique_ptr<frontend::LayerSnapshot>& snapshot) {
+                    if (cursorOnly &&
+                        snapshot->compositionType !=
+                                aidl::android::hardware::graphics::composer3::Composition::CURSOR) {
+                        return;
+                    }
+
+                    if (!snapshot->hasSomethingToDraw()) {
+                        return;
+                    }
+
+                    auto it = mLegacyLayers.find(snapshot->sequence);
+                    LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(),
+                                        "Couldnt find layer object for %s",
+                                        snapshot->getDebugString().c_str());
+                    auto& legacyLayer = it->second;
+                    sp<LayerFE> layerFE = legacyLayer->getCompositionEngineLayerFE(snapshot->path);
+                    layerFE->mSnapshot = std::move(snapshot);
+                    refreshArgs.layers.push_back(layerFE);
+                    layers.emplace_back(legacyLayer.get(), layerFE.get());
+                });
+    }
+    if (mLegacyFrontEndEnabled && !mLayerLifecycleManagerEnabled) {
+        mDrawingState.traverseInZOrder([&refreshArgs, cursorOnly, &layers](Layer* layer) {
+            if (auto layerFE = layer->getCompositionEngineLayerFE()) {
+                if (cursorOnly &&
+                    layer->getLayerSnapshot()->compositionType !=
+                            aidl::android::hardware::graphics::composer3::Composition::CURSOR)
+                    return;
+                layer->updateSnapshot(/* refreshArgs.updatingGeometryThisFrame */ true);
+                layerFE->mSnapshot = layer->stealLayerSnapshot();
+                refreshArgs.layers.push_back(layerFE);
+                layers.emplace_back(layer, layerFE.get());
+            }
+        });
+    }
+
+    return layers;
+}
+
+std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()>
+SurfaceFlinger::getLayerSnapshotsForScreenshots(std::optional<ui::LayerStack> layerStack,
+                                                uint32_t uid) {
+    return [this, layerStack, uid]() {
+        std::vector<std::pair<Layer*, sp<LayerFE>>> layers;
+        for (auto& snapshot : mLayerSnapshotBuilder.getSnapshots()) {
+            if (layerStack && snapshot->outputFilter.layerStack != *layerStack) {
+                continue;
+            }
+            if (uid != CaptureArgs::UNSET_UID && snapshot->inputInfo.ownerUid != uid) {
+                continue;
+            }
+            if (!snapshot->isVisible || !snapshot->hasSomethingToDraw()) {
+                continue;
+            }
+
+            auto it = mLegacyLayers.find(snapshot->sequence);
+            LOG_ALWAYS_FATAL_IF(it == mLegacyLayers.end(), "Couldnt find layer object for %s",
+                                snapshot->getDebugString().c_str());
+            auto& legacyLayer = it->second;
+            sp<LayerFE> layerFE = getFactory().createLayerFE(legacyLayer->getName());
+            layerFE->mSnapshot = std::make_unique<frontend::LayerSnapshot>(*snapshot);
+            layers.emplace_back(legacyLayer.get(), std::move(layerFE));
+        }
+
+        return layers;
+    };
+}
+
+std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()>
+SurfaceFlinger::getLayerSnapshotsForScreenshots(uint32_t rootLayerId, uint32_t uid,
+                                                std::unordered_set<uint32_t> excludeLayerIds,
+                                                bool childrenOnly, const FloatRect& parentCrop) {
+    return [this, excludeLayerIds = std::move(excludeLayerIds), uid, rootLayerId, childrenOnly,
+            parentCrop]() {
+        frontend::LayerSnapshotBuilder::Args
+                args{.root = mLayerHierarchyBuilder.getPartialHierarchy(rootLayerId, childrenOnly),
+                     .layerLifecycleManager = mLayerLifecycleManager,
+                     .displays = mFrontEndDisplayInfos,
+                     .displayChanges = true,
+                     .globalShadowSettings = mDrawingState.globalShadowSettings,
+                     .supportsBlur = mSupportsBlur,
+                     .forceFullDamage = mForceFullDamage,
+                     .parentCrop = {parentCrop},
+                     .excludeLayerIds = std::move(excludeLayerIds)};
+        mLayerSnapshotBuilder.update(args);
+
+        auto getLayerSnapshotsFn = getLayerSnapshotsForScreenshots({}, uid);
+        std::vector<std::pair<Layer*, sp<LayerFE>>> layers = getLayerSnapshotsFn();
+        args.root = mLayerHierarchyBuilder.getHierarchy();
+        args.parentCrop.reset();
+        args.excludeLayerIds.clear();
+        mLayerSnapshotBuilder.update(args);
+        return layers;
+    };
+}
+
+SurfaceFlinger::LifecycleUpdate SurfaceFlinger::flushLifecycleUpdates() {
+    LifecycleUpdate update;
+    ATRACE_NAME("TransactionHandler:flushTransactions");
+    // Locking:
+    // 1. to prevent onHandleDestroyed from being called while the state lock is held,
+    // we must keep a copy of the transactions (specifically the composer
+    // states) around outside the scope of the lock.
+    // 2. Transactions and created layers do not share a lock. To prevent applying
+    // transactions with layers still in the createdLayer queue, flush the transactions
+    // before committing the created layers.
+    update.transactions = mTransactionHandler.flushTransactions();
+    {
+        // TODO(b/238781169) lockless queue this and keep order.
+        std::scoped_lock<std::mutex> lock(mCreatedLayersLock);
+        update.layerCreatedStates = std::move(mCreatedLayers);
+        mCreatedLayers.clear();
+        update.newLayers = std::move(mNewLayers);
+        mNewLayers.clear();
+        update.destroyedHandles = std::move(mDestroyedHandles);
+        mDestroyedHandles.clear();
+    }
+    return update;
+}
+
 // gui::ISurfaceComposer
 
 binder::Status SurfaceComposerAIDL::bootFinished() {
@@ -7452,9 +7850,9 @@
 
 binder::Status SurfaceComposerAIDL::createDisplayEventConnection(
         VsyncSource vsyncSource, EventRegistration eventRegistration,
-        sp<IDisplayEventConnection>* outConnection) {
+        const sp<IBinder>& layerHandle, sp<IDisplayEventConnection>* outConnection) {
     sp<IDisplayEventConnection> conn =
-            mFlinger->createDisplayEventConnection(vsyncSource, eventRegistration);
+            mFlinger->createDisplayEventConnection(vsyncSource, eventRegistration, layerHandle);
     if (conn == nullptr) {
         *outConnection = nullptr;
         return binderStatusFromStatusT(BAD_VALUE);
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 5b9bfd8..0bd15dc 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -71,9 +71,12 @@
 #include "FlagManager.h"
 #include "FrontEnd/DisplayInfo.h"
 #include "FrontEnd/LayerCreationArgs.h"
+#include "FrontEnd/LayerLifecycleManager.h"
 #include "FrontEnd/LayerSnapshot.h"
+#include "FrontEnd/LayerSnapshotBuilder.h"
 #include "FrontEnd/TransactionHandler.h"
 #include "LayerVector.h"
+#include "Scheduler/ISchedulerCallback.h"
 #include "Scheduler/RefreshRateSelector.h"
 #include "Scheduler/RefreshRateStats.h"
 #include "Scheduler/Scheduler.h"
@@ -449,6 +452,26 @@
         FINISHED,
     };
 
+    struct LayerCreatedState {
+        LayerCreatedState(const wp<Layer>& layer, const wp<Layer>& parent, bool addToRoot)
+              : layer(layer), initialParent(parent), addToRoot(addToRoot) {}
+        wp<Layer> layer;
+        // Indicates the initial parent of the created layer, only used for creating layer in
+        // SurfaceFlinger. If nullptr, it may add the created layer into the current root layers.
+        wp<Layer> initialParent;
+        // Indicates whether the layer getting created should be added at root if there's no parent
+        // and has permission ACCESS_SURFACE_FLINGER. If set to false and no parent, the layer will
+        // be added offscreen.
+        bool addToRoot;
+    };
+
+    struct LifecycleUpdate {
+        std::vector<TransactionState> transactions;
+        std::vector<LayerCreatedState> layerCreatedStates;
+        std::vector<std::unique_ptr<frontend::RequestedLayerState>> newLayers;
+        std::vector<uint32_t> destroyedHandles;
+    };
+
     template <typename F, std::enable_if_t<!std::is_member_function_pointer_v<F>>* = nullptr>
     static Dumper dumper(F&& dump) {
         using namespace std::placeholders;
@@ -508,7 +531,8 @@
     sp<IDisplayEventConnection> createDisplayEventConnection(
             gui::ISurfaceComposer::VsyncSource vsyncSource =
                     gui::ISurfaceComposer::VsyncSource::eVsyncSourceApp,
-            EventRegistrationFlags eventRegistration = {});
+            EventRegistrationFlags eventRegistration = {},
+            const sp<IBinder>& layerHandle = nullptr);
 
     status_t captureDisplay(const DisplayCaptureArgs&, const sp<IScreenCaptureListener>&);
     status_t captureDisplay(DisplayId, const sp<IScreenCaptureListener>&);
@@ -618,7 +642,7 @@
 
     // Toggles hardware VSYNC by calling into HWC.
     // TODO(b/241286146): Rename for self-explanatory API.
-    void setVsyncEnabled(bool) override;
+    void setVsyncEnabled(PhysicalDisplayId, bool) override;
     void requestDisplayModes(std::vector<display::DisplayModeRequest>) override;
     void kernelTimerChanged(bool expired) override;
     void triggerOnFrameRateOverridesChanged() override;
@@ -687,6 +711,17 @@
 
     void updateLayerGeometry();
     void updateLayerMetadataSnapshot();
+    std::vector<std::pair<Layer*, LayerFE*>> moveSnapshotsToCompositionArgs(
+            compositionengine::CompositionRefreshArgs& refreshArgs, bool cursorOnly,
+            int64_t vsyncId);
+    void moveSnapshotsFromCompositionArgs(compositionengine::CompositionRefreshArgs& refreshArgs,
+                                          std::vector<std::pair<Layer*, LayerFE*>>& layers);
+    bool updateLayerSnapshotsLegacy(VsyncId vsyncId, LifecycleUpdate& update,
+                                    bool transactionsFlushed, bool& out)
+            REQUIRES(kMainThreadContext);
+    bool updateLayerSnapshots(VsyncId vsyncId, LifecycleUpdate& update, bool transactionsFlushed,
+                              bool& out) REQUIRES(kMainThreadContext);
+    LifecycleUpdate flushLifecycleUpdates() REQUIRES(kMainThreadContext);
 
     void updateInputFlinger();
     void persistDisplayBrightness(bool needsComposite) REQUIRES(kMainThreadContext);
@@ -714,6 +749,8 @@
     bool flushTransactionQueues(VsyncId) REQUIRES(kMainThreadContext);
 
     bool applyTransactions(std::vector<TransactionState>&, VsyncId) REQUIRES(kMainThreadContext);
+    bool applyAndCommitDisplayTransactionStates(std::vector<TransactionState>& transactions)
+            REQUIRES(kMainThreadContext);
 
     // Returns true if there is at least one transaction that needs to be flushed
     bool transactionFlushNeeded();
@@ -729,7 +766,10 @@
                                   int64_t desiredPresentTime, bool isAutoTimestamp,
                                   int64_t postTime, uint32_t permissions, uint64_t transactionId)
             REQUIRES(mStateLock);
-
+    uint32_t updateLayerCallbacksAndStats(const FrameTimelineInfo&, ResolvedComposerState&,
+                                          int64_t desiredPresentTime, bool isAutoTimestamp,
+                                          int64_t postTime, uint32_t permissions,
+                                          uint64_t transactionId) REQUIRES(mStateLock);
     uint32_t getTransactionFlags() const;
 
     // Sets the masked bits, and schedules a commit if needed.
@@ -887,7 +927,7 @@
 
     // mark a region of a layer stack dirty. this updates the dirty
     // region of all screens presenting this layer stack.
-    void invalidateLayerStack(const sp<const Layer>& layer, const Region& dirty);
+    void invalidateLayerStack(const ui::LayerFilter& layerFilter, const Region& dirty);
 
     ui::LayerFilter makeLayerFilterForDisplay(DisplayId displayId, ui::LayerStack layerStack)
             REQUIRES(mStateLock) {
@@ -953,9 +993,9 @@
      */
     nsecs_t getVsyncPeriodFromHWC() const REQUIRES(mStateLock);
 
-    void setHWCVsyncEnabled(PhysicalDisplayId id, hal::Vsync enabled) {
-        mLastHWCVsyncState = enabled;
-        getHwComposer().setVsyncEnabled(id, enabled);
+    void setHWCVsyncEnabled(PhysicalDisplayId id, bool enabled) {
+        hal::Vsync halState = enabled ? hal::Vsync::ENABLE : hal::Vsync::DISABLE;
+        getHwComposer().setVsyncEnabled(id, halState);
     }
 
     using FenceTimePtr = std::shared_ptr<FenceTime>;
@@ -1090,7 +1130,15 @@
     pid_t mPid;
     std::future<void> mRenderEnginePrimeCacheFuture;
 
-    // access must be protected by mStateLock
+    // mStateLock has conventions related to the current thread, because only
+    // the main thread should modify variables protected by mStateLock.
+    // - read access from a non-main thread must lock mStateLock, since the main
+    // thread may modify these variables.
+    // - write access from a non-main thread is not permitted.
+    // - read access from the main thread can use an ftl::FakeGuard, since other
+    // threads must not modify these variables.
+    // - write access from the main thread must lock mStateLock, since another
+    // thread may be reading these variables.
     mutable Mutex mStateLock;
     State mCurrentState{LayerVector::StateSet::Current};
     std::atomic<int32_t> mTransactionFlags = 0;
@@ -1145,6 +1193,7 @@
     // Set if LayerMetadata has changed since the last LayerMetadata snapshot.
     bool mLayerMetadataSnapshotNeeded = false;
 
+    // TODO(b/238781169) validate these on composition
     // Tracks layers that have pending frames which are candidates for being
     // latched.
     std::unordered_set<sp<Layer>, SpHash<Layer>> mLayersWithQueuedFrames;
@@ -1281,9 +1330,6 @@
     TimePoint mScheduledPresentTime GUARDED_BY(kMainThreadContext);
     TimePoint mExpectedPresentTime GUARDED_BY(kMainThreadContext);
 
-    hal::Vsync mHWCVsyncPendingState = hal::Vsync::DISABLE;
-    hal::Vsync mLastHWCVsyncState = hal::Vsync::DISABLE;
-
     // below flags are set by main thread only
     bool mSetActiveModePending = false;
 
@@ -1319,23 +1365,11 @@
             GUARDED_BY(mStateLock);
 
     mutable std::mutex mCreatedLayersLock;
-    struct LayerCreatedState {
-        LayerCreatedState(const wp<Layer>& layer, const wp<Layer> parent, bool addToRoot)
-              : layer(layer), initialParent(parent), addToRoot(addToRoot) {}
-        wp<Layer> layer;
-        // Indicates the initial parent of the created layer, only used for creating layer in
-        // SurfaceFlinger. If nullptr, it may add the created layer into the current root layers.
-        wp<Layer> initialParent;
-        // Indicates whether the layer getting created should be added at root if there's no parent
-        // and has permission ACCESS_SURFACE_FLINGER. If set to false and no parent, the layer will
-        // be added offscreen.
-        bool addToRoot;
-    };
 
     // A temporay pool that store the created layers and will be added to current state in main
     // thread.
     std::vector<LayerCreatedState> mCreatedLayers GUARDED_BY(mCreatedLayersLock);
-    bool commitCreatedLayers(VsyncId);
+    bool commitCreatedLayers(VsyncId, std::vector<LayerCreatedState>& createdLayers);
     void handleLayerCreatedLocked(const LayerCreatedState&, VsyncId) REQUIRES(mStateLock);
 
     mutable std::mutex mMirrorDisplayLock;
@@ -1357,6 +1391,11 @@
         return hasDisplay(
                 [](const auto& display) { return display.isRefreshRateOverlayEnabled(); });
     }
+    std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()> getLayerSnapshotsForScreenshots(
+            std::optional<ui::LayerStack> layerStack, uint32_t uid);
+    std::function<std::vector<std::pair<Layer*, sp<LayerFE>>>()> getLayerSnapshotsForScreenshots(
+            uint32_t rootLayerId, uint32_t uid, std::unordered_set<uint32_t> excludeLayerIds,
+            bool childrenOnly, const FloatRect& parentCrop);
 
     const sp<WindowInfosListenerInvoker> mWindowInfosListenerInvoker;
 
@@ -1369,6 +1408,18 @@
 
     bool mPowerHintSessionEnabled;
 
+    bool mLayerLifecycleManagerEnabled = false;
+    bool mLegacyFrontEndEnabled = true;
+
+    frontend::LayerLifecycleManager mLayerLifecycleManager;
+    frontend::LayerHierarchyBuilder mLayerHierarchyBuilder{{}};
+    frontend::LayerSnapshotBuilder mLayerSnapshotBuilder;
+
+    std::vector<uint32_t> mDestroyedHandles;
+    std::vector<std::unique_ptr<frontend::RequestedLayerState>> mNewLayers;
+    // These classes do not store any client state but help with managing transaction callbacks
+    // and stats.
+    std::unordered_map<uint32_t, sp<Layer>> mLegacyLayers;
     struct {
         bool late = false;
         bool early = false;
@@ -1376,6 +1427,7 @@
 
     TransactionHandler mTransactionHandler;
     display::DisplayMap<ui::LayerStack, frontend::DisplayInfo> mFrontEndDisplayInfos;
+    bool mFrontEndDisplayInfosChanged = false;
 };
 
 class SurfaceComposerAIDL : public gui::BnSurfaceComposer {
@@ -1385,6 +1437,7 @@
     binder::Status bootFinished() override;
     binder::Status createDisplayEventConnection(
             VsyncSource vsyncSource, EventRegistration eventRegistration,
+            const sp<IBinder>& layerHandle,
             sp<gui::IDisplayEventConnection>* outConnection) override;
     binder::Status createConnection(sp<gui::ISurfaceComposerClient>* outClient) override;
     binder::Status createDisplay(const std::string& displayName, bool secure,
diff --git a/services/surfaceflinger/TransactionCallbackInvoker.cpp b/services/surfaceflinger/TransactionCallbackInvoker.cpp
index e5de759..3da98d4 100644
--- a/services/surfaceflinger/TransactionCallbackInvoker.cpp
+++ b/services/surfaceflinger/TransactionCallbackInvoker.cpp
@@ -137,7 +137,6 @@
             sp<Fence> currentFence = future.get().value_or(Fence::NO_FENCE);
             if (prevFence == nullptr && currentFence->getStatus() != Fence::Status::Invalid) {
                 prevFence = std::move(currentFence);
-                handle->previousReleaseFence = prevFence;
             } else if (prevFence != nullptr) {
                 // If both fences are signaled or both are unsignaled, we need to merge
                 // them to get an accurate timestamp.
@@ -147,8 +146,7 @@
                     snprintf(fenceName, 32, "%.28s", handle->name.c_str());
                     sp<Fence> mergedFence = Fence::merge(fenceName, prevFence, currentFence);
                     if (mergedFence->isValid()) {
-                        handle->previousReleaseFence = std::move(mergedFence);
-                        prevFence = handle->previousReleaseFence;
+                        prevFence = std::move(mergedFence);
                     }
                 } else if (currentFence->getStatus() == Fence::Status::Unsignaled) {
                     // If one fence has signaled and the other hasn't, the unsignaled
@@ -158,10 +156,11 @@
                     // by this point, they will have both signaled and only the timestamp
                     // will be slightly off; any dependencies after this point will
                     // already have been met.
-                    handle->previousReleaseFence = std::move(currentFence);
+                    prevFence = std::move(currentFence);
                 }
             }
         }
+        handle->previousReleaseFence = prevFence;
         handle->previousReleaseFences.clear();
 
         FrameEventHistoryStats eventStats(handle->frameNumber,
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
index cdffbb4..5303db3 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
@@ -226,27 +226,27 @@
     TestableScheduler(const std::shared_ptr<scheduler::RefreshRateSelector>& selectorPtr,
                       sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
           : TestableScheduler(std::make_unique<android::mock::VsyncController>(),
-                              std::make_unique<android::mock::VSyncTracker>(), selectorPtr,
+                              std::make_shared<android::mock::VSyncTracker>(), selectorPtr,
                               std::move(modulatorPtr), callback) {}
 
     TestableScheduler(std::unique_ptr<VsyncController> controller,
-                      std::unique_ptr<VSyncTracker> tracker,
+                      VsyncSchedule::TrackerPtr tracker,
                       std::shared_ptr<RefreshRateSelector> selectorPtr,
                       sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
           : Scheduler(*this, callback, Feature::kContentDetection, std::move(modulatorPtr)) {
-        mVsyncSchedule.emplace(VsyncSchedule(std::move(tracker), nullptr, std::move(controller)));
-
         const auto displayId = selectorPtr->getActiveMode().modePtr->getPhysicalDisplayId();
         registerDisplay(displayId, std::move(selectorPtr));
+        mVsyncSchedules.emplace_or_replace(displayId,
+                                           std::shared_ptr<VsyncSchedule>(
+                                                   new VsyncSchedule(displayId, std::move(tracker),
+                                                                     nullptr,
+                                                                     std::move(controller))));
     }
 
     ConnectionHandle createConnection(std::unique_ptr<EventThread> eventThread) {
         return Scheduler::createConnection(std::move(eventThread));
     }
 
-    auto &mutablePrimaryHWVsyncEnabled() { return mPrimaryHWVsyncEnabled; }
-    auto &mutableHWVsyncAvailable() { return mHWVsyncAvailable; }
-
     auto &mutableLayerHistory() { return mLayerHistory; }
 
     auto refreshRateSelector() { return leaderSelectorPtr(); }
@@ -649,10 +649,10 @@
 
     // The ISchedulerCallback argument can be nullptr for a no-op implementation.
     void setupScheduler(std::unique_ptr<scheduler::VsyncController> vsyncController,
-                        std::unique_ptr<scheduler::VSyncTracker> vsyncTracker,
+                        std::shared_ptr<scheduler::VSyncTracker> vsyncTracker,
                         std::unique_ptr<EventThread> appEventThread,
                         std::unique_ptr<EventThread> sfEventThread,
-                        scheduler::ISchedulerCallback *callback = nullptr,
+                        scheduler::ISchedulerCallback* callback = nullptr,
                         bool hasMultipleModes = false) {
         constexpr DisplayModeId kModeId60{0};
         DisplayModes modes = makeModes(mock::createDisplayMode(kModeId60, 60_Hz));
@@ -791,7 +791,7 @@
     }
 
 private:
-    void setVsyncEnabled(bool) override {}
+    void setVsyncEnabled(PhysicalDisplayId, bool) override {}
     void requestDisplayModes(std::vector<display::DisplayModeRequest>) override {}
     void kernelTimerChanged(bool) override {}
     void triggerOnFrameRateOverridesChanged() override {}
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp b/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
index 11719c4..c088e7b 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_layer_fuzzer.cpp
@@ -148,7 +148,7 @@
     layer->fenceHasSignaled();
     layer->onPreComposition(mFdp.ConsumeIntegral<int64_t>());
     const std::vector<sp<CallbackHandle>> callbacks;
-    layer->setTransactionCompletedListeners(callbacks);
+    layer->setTransactionCompletedListeners(callbacks, mFdp.ConsumeBool());
 
     std::shared_ptr<renderengine::ExternalTexture> texture = std::make_shared<
             renderengine::mock::FakeExternalTexture>(mFdp.ConsumeIntegral<uint32_t>(),
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp b/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
index 44805db..b7b42ab 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_scheduler_fuzzer.cpp
@@ -47,19 +47,23 @@
                                      PowerMode::DOZE_SUSPEND, PowerMode::ON_SUSPEND};
 
 constexpr uint16_t kRandomStringLength = 256;
-constexpr std::chrono::duration kSyncPeriod(16ms);
-
 template <typename T>
 void dump(T* component, FuzzedDataProvider* fdp) {
     std::string res = fdp->ConsumeRandomLengthString(kRandomStringLength);
     component->dump(res);
 }
 
-class SchedulerFuzzer {
+class SchedulerFuzzer : public IEventThreadCallback {
 public:
     SchedulerFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
     void process();
 
+    // IEventThreadCallback overrides.
+    bool isVsyncTargetForUid(TimePoint /* expectedVsyncTime */, uid_t) const override {
+        return true;
+    }
+    Fps getLeaderRenderFrameRate(uid_t) const override { return 60_Hz; }
+
 private:
     void fuzzRefreshRateSelection();
     void fuzzRefreshRateSelector();
@@ -76,7 +80,7 @@
 
     FuzzedDataProvider mFdp;
 
-    std::optional<scheduler::VsyncSchedule> mVsyncSchedule;
+    std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
 };
 
 PhysicalDisplayId SchedulerFuzzer::getPhysicalDisplayId() {
@@ -90,12 +94,12 @@
 }
 
 void SchedulerFuzzer::fuzzEventThread() {
-    mVsyncSchedule.emplace(scheduler::VsyncSchedule(std::make_unique<mock::VSyncTracker>(),
-                                                    std::make_unique<mock::VSyncDispatch>(),
-                                                    nullptr));
-    const auto getVsyncPeriod = [](uid_t /* uid */) { return kSyncPeriod.count(); };
+    mVsyncSchedule = std::shared_ptr<scheduler::VsyncSchedule>(
+            new scheduler::VsyncSchedule(getPhysicalDisplayId(),
+                                         std::make_shared<mock::VSyncTracker>(),
+                                         std::make_shared<mock::VSyncDispatch>(), nullptr));
     std::unique_ptr<android::impl::EventThread> thread = std::make_unique<
-            android::impl::EventThread>("fuzzer", *mVsyncSchedule, nullptr, nullptr, getVsyncPeriod,
+            android::impl::EventThread>("fuzzer", mVsyncSchedule, *this, nullptr /* TokenManager */,
                                         (std::chrono::nanoseconds)mFdp.ConsumeIntegral<uint64_t>(),
                                         (std::chrono::nanoseconds)mFdp.ConsumeIntegral<uint64_t>());
 
@@ -132,7 +136,7 @@
 }
 
 void SchedulerFuzzer::fuzzVSyncDispatchTimerQueue() {
-    FuzzImplVSyncTracker stubTracker{mFdp.ConsumeIntegral<nsecs_t>()};
+    auto stubTracker = std::make_shared<FuzzImplVSyncTracker>(mFdp.ConsumeIntegral<nsecs_t>());
     scheduler::VSyncDispatchTimerQueue
             mDispatch{std::make_unique<scheduler::ControllableClock>(), stubTracker,
                       mFdp.ConsumeIntegral<nsecs_t>() /*dispatchGroupThreshold*/,
@@ -145,17 +149,17 @@
     scheduler::VSyncDispatchTimerQueueEntry entry(
             "fuzz", [](auto, auto, auto) {},
             mFdp.ConsumeIntegral<nsecs_t>() /*vSyncMoveThreshold*/);
-    entry.update(stubTracker, 0);
+    entry.update(*stubTracker, 0);
     entry.schedule({.workDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .readyDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .earliestVsync = mFdp.ConsumeIntegral<nsecs_t>()},
-                   stubTracker, 0);
+                   *stubTracker, 0);
     entry.disarm();
     entry.ensureNotRunning();
     entry.schedule({.workDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .readyDuration = mFdp.ConsumeIntegral<nsecs_t>(),
                     .earliestVsync = mFdp.ConsumeIntegral<nsecs_t>()},
-                   stubTracker, 0);
+                   *stubTracker, 0);
     auto const wakeup = entry.wakeupTime();
     auto const ready = entry.readyTime();
     entry.callback(entry.executing(), *wakeup, *ready);
@@ -169,8 +173,8 @@
     uint16_t now = mFdp.ConsumeIntegral<uint16_t>();
     uint16_t historySize = mFdp.ConsumeIntegralInRange<uint16_t>(1, UINT16_MAX);
     uint16_t minimumSamplesForPrediction = mFdp.ConsumeIntegralInRange<uint16_t>(1, UINT16_MAX);
-    scheduler::VSyncPredictor tracker{mFdp.ConsumeIntegral<uint16_t>() /*period*/, historySize,
-                                      minimumSamplesForPrediction,
+    scheduler::VSyncPredictor tracker{"predictor", mFdp.ConsumeIntegral<uint16_t>() /*period*/,
+                                      historySize, minimumSamplesForPrediction,
                                       mFdp.ConsumeIntegral<uint32_t>() /*outlierTolerancePercent*/};
     uint16_t period = mFdp.ConsumeIntegral<uint16_t>();
     tracker.setPeriod(period);
@@ -242,13 +246,15 @@
 
 void SchedulerFuzzer::fuzzVSyncReactor() {
     std::shared_ptr<FuzzImplVSyncTracker> vSyncTracker = std::make_shared<FuzzImplVSyncTracker>();
-    scheduler::VSyncReactor reactor(std::make_unique<ClockWrapper>(
+    scheduler::VSyncReactor reactor("fuzzer_reactor",
+                                    std::make_unique<ClockWrapper>(
                                             std::make_shared<FuzzImplClock>()),
                                     *vSyncTracker, mFdp.ConsumeIntegral<uint8_t>() /*pendingLimit*/,
                                     false);
 
-    reactor.startPeriodTransition(mFdp.ConsumeIntegral<nsecs_t>());
-    bool periodFlushed = mFdp.ConsumeBool();
+    reactor.startPeriodTransition(mFdp.ConsumeIntegral<nsecs_t>(), mFdp.ConsumeBool());
+    bool periodFlushed = false; // Value does not matter, since this is an out
+                                // param from addHwVsyncTimestamp.
     reactor.addHwVsyncTimestamp(0, std::nullopt, &periodFlushed);
     reactor.addHwVsyncTimestamp(mFdp.ConsumeIntegral<nsecs_t>() /*newPeriod*/, std::nullopt,
                                 &periodFlushed);
diff --git a/services/surfaceflinger/tests/unittests/CompositionTest.cpp b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
index 0416e93..419c818 100644
--- a/services/surfaceflinger/tests/unittests/CompositionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
@@ -139,7 +139,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index e0b508a..214b028 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -74,8 +74,8 @@
                                                              mock::EventThread::kCallingUid,
                                                              ResyncCallback())));
 
-    mFlinger.setupScheduler(std::unique_ptr<scheduler::VsyncController>(mVsyncController),
-                            std::unique_ptr<scheduler::VSyncTracker>(mVSyncTracker),
+    mFlinger.setupScheduler(std::make_unique<mock::VsyncController>(),
+                            std::make_shared<mock::VSyncTracker>(),
                             std::unique_ptr<EventThread>(mEventThread),
                             std::unique_ptr<EventThread>(mSFEventThread),
                             TestableSurfaceFlinger::SchedulerCallbackImpl::kMock);
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
index 223f4db..c9245d6 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
@@ -128,8 +128,6 @@
     renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
     Hwc2::mock::Composer* mComposer = nullptr;
 
-    mock::VsyncController* mVsyncController = new mock::VsyncController;
-    mock::VSyncTracker* mVSyncTracker = new mock::VSyncTracker;
     mock::EventThread* mEventThread = new mock::EventThread;
     mock::EventThread* mSFEventThread = new mock::EventThread;
 
diff --git a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
index b3aba37..5cecb8e 100644
--- a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
+++ b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
@@ -52,11 +52,9 @@
 constexpr PhysicalDisplayId DISPLAY_ID_64BIT =
         PhysicalDisplayId::fromEdid(0xffu, 0xffffu, 0xffff'ffffu);
 
-constexpr std::chrono::duration VSYNC_PERIOD(16ms);
-
 } // namespace
 
-class EventThreadTest : public testing::Test {
+class EventThreadTest : public testing::Test, public IEventThreadCallback {
 protected:
     static constexpr std::chrono::nanoseconds kWorkDuration = 0ms;
     static constexpr std::chrono::nanoseconds kReadyDuration = 3ms;
@@ -97,7 +95,7 @@
     void expectConfigChangedEventReceivedByConnection(PhysicalDisplayId expectedDisplayId,
                                                       int32_t expectedConfigId,
                                                       nsecs_t expectedVsyncPeriod);
-    void expectThrottleVsyncReceived(nsecs_t expectedTimestamp, uid_t);
+    void expectThrottleVsyncReceived(TimePoint expectedTimestamp, uid_t);
     void expectUidFrameRateMappingEventReceivedByConnection(PhysicalDisplayId expectedDisplayId,
                                                             std::vector<FrameRateOverride>);
 
@@ -106,6 +104,14 @@
         mThread->onVsync(expectedPresentationTime, timestamp, deadlineTimestamp);
     }
 
+    // IEventThreadCallback overrides.
+    bool isVsyncTargetForUid(TimePoint expectedVsyncTime, uid_t uid) const override {
+        mThrottleVsyncCallRecorder.getInvocable()(expectedVsyncTime, uid);
+        return uid != mThrottledConnectionUid;
+    }
+
+    Fps getLeaderRenderFrameRate(uid_t uid) const override { return 60_Hz; }
+
     AsyncCallRecorderWithCannedReturn<
             scheduler::ScheduleResult (*)(scheduler::VSyncDispatch::CallbackToken,
                                           scheduler::VSyncDispatch::ScheduleTiming)>
@@ -121,11 +127,11 @@
     AsyncCallRecorder<void (*)(scheduler::VSyncDispatch::CallbackToken)>
             mVSyncCallbackUnregisterRecorder;
     AsyncCallRecorder<void (*)()> mResyncCallRecorder;
-    AsyncCallRecorder<void (*)(nsecs_t, uid_t)> mThrottleVsyncCallRecorder;
+    mutable AsyncCallRecorder<void (*)(TimePoint, uid_t)> mThrottleVsyncCallRecorder;
     ConnectionEventRecorder mConnectionEventCallRecorder{0};
     ConnectionEventRecorder mThrottledConnectionEventCallRecorder{0};
 
-    std::optional<scheduler::VsyncSchedule> mVsyncSchedule;
+    std::shared_ptr<scheduler::VsyncSchedule> mVsyncSchedule;
     std::unique_ptr<impl::EventThread> mThread;
     sp<MockEventThreadConnection> mConnection;
     sp<MockEventThreadConnection> mThrottledConnection;
@@ -140,12 +146,12 @@
             ::testing::UnitTest::GetInstance()->current_test_info();
     ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
 
-    mVsyncSchedule.emplace(scheduler::VsyncSchedule(std::make_unique<mock::VSyncTracker>(),
-                                                    std::make_unique<mock::VSyncDispatch>(),
-                                                    nullptr));
-
-    mock::VSyncDispatch& mockDispatch =
-            *static_cast<mock::VSyncDispatch*>(&mVsyncSchedule->getDispatch());
+    auto mockDispatchPtr = std::make_shared<mock::VSyncDispatch>();
+    mVsyncSchedule = std::shared_ptr<scheduler::VsyncSchedule>(
+            new scheduler::VsyncSchedule(INTERNAL_DISPLAY_ID,
+                                         std::make_shared<mock::VSyncTracker>(), mockDispatchPtr,
+                                         nullptr));
+    mock::VSyncDispatch& mockDispatch = *mockDispatchPtr;
     EXPECT_CALL(mockDispatch, registerCallback(_, _))
             .WillRepeatedly(Invoke(mVSyncCallbackRegisterRecorder.getInvocable()));
     EXPECT_CALL(mockDispatch, schedule(_, _))
@@ -180,19 +186,10 @@
 }
 
 void EventThreadTest::createThread() {
-    const auto throttleVsync = [&](nsecs_t expectedVsyncTimestamp, uid_t uid) {
-        mThrottleVsyncCallRecorder.getInvocable()(expectedVsyncTimestamp, uid);
-        return (uid == mThrottledConnectionUid);
-    };
-    const auto getVsyncPeriod = [](uid_t uid) {
-        return VSYNC_PERIOD.count();
-    };
-
     mTokenManager = std::make_unique<frametimeline::impl::TokenManager>();
     mThread =
-            std::make_unique<impl::EventThread>(/*std::move(source), */ "EventThreadTest",
-                                                *mVsyncSchedule, mTokenManager.get(), throttleVsync,
-                                                getVsyncPeriod, kWorkDuration, kReadyDuration);
+            std::make_unique<impl::EventThread>("EventThreadTest", mVsyncSchedule, *this,
+                                                mTokenManager.get(), kWorkDuration, kReadyDuration);
 
     // EventThread should register itself as VSyncSource callback.
     EXPECT_TRUE(mVSyncCallbackRegisterRecorder.waitForCall().has_value());
@@ -225,7 +222,7 @@
     EXPECT_EQ(expectedReadyDuration.count(), std::get<1>(args.value()).readyDuration);
 }
 
-void EventThreadTest::expectThrottleVsyncReceived(nsecs_t expectedTimestamp, uid_t uid) {
+void EventThreadTest::expectThrottleVsyncReceived(TimePoint expectedTimestamp, uid_t uid) {
     auto args = mThrottleVsyncCallRecorder.waitForCall();
     ASSERT_TRUE(args.has_value());
     EXPECT_EQ(expectedTimestamp, std::get<0>(args.value()));
@@ -376,7 +373,7 @@
     // Use the received callback to signal a first vsync event.
     // The throttler should receive the event, as well as the connection.
     onVSyncEvent(123, 456, 789);
-    expectThrottleVsyncReceived(456, mConnectionUid);
+    expectThrottleVsyncReceived(TimePoint::fromNs(456), mConnectionUid);
     expectVsyncEventReceivedByConnection(123, 1u);
 
     // EventThread is requesting one more callback due to VsyncRequest::SingleSuppressCallback
@@ -494,17 +491,17 @@
     // Send a vsync event. EventThread should then make a call to the
     // throttler, and the connection.
     onVSyncEvent(123, 456, 789);
-    expectThrottleVsyncReceived(456, mConnectionUid);
+    expectThrottleVsyncReceived(TimePoint::fromNs(456), mConnectionUid);
     expectVsyncEventReceivedByConnection(123, 1u);
 
     // A second event should go to the same places.
     onVSyncEvent(456, 123, 0);
-    expectThrottleVsyncReceived(123, mConnectionUid);
+    expectThrottleVsyncReceived(TimePoint::fromNs(123), mConnectionUid);
     expectVsyncEventReceivedByConnection(456, 2u);
 
     // A third event should go to the same places.
     onVSyncEvent(789, 777, 111);
-    expectThrottleVsyncReceived(777, mConnectionUid);
+    expectThrottleVsyncReceived(TimePoint::fromNs(777), mConnectionUid);
     expectVsyncEventReceivedByConnection(789, 3u);
 }
 
@@ -743,7 +740,7 @@
     // Use the received callback to signal a first vsync event.
     // The throttler should receive the event, but not the connection.
     onVSyncEvent(123, 456, 789);
-    expectThrottleVsyncReceived(456, mThrottledConnectionUid);
+    expectThrottleVsyncReceived(TimePoint::fromNs(456), mThrottledConnectionUid);
     mThrottledConnectionEventCallRecorder.waitForUnexpectedCall();
     expectVSyncCallbackScheduleReceived(true);
 
@@ -751,7 +748,7 @@
     // The throttler should receive the event, but the connection should
     // not as it was only interested in the first.
     onVSyncEvent(456, 123, 0);
-    expectThrottleVsyncReceived(123, mThrottledConnectionUid);
+    expectThrottleVsyncReceived(TimePoint::fromNs(123), mThrottledConnectionUid);
     EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
     expectVSyncCallbackScheduleReceived(true);
 
diff --git a/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp b/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
index 1cd9e49..248061c 100644
--- a/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FpsReporterTest.cpp
@@ -137,7 +137,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
index ac63a0e..ff7c2c9 100644
--- a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
@@ -125,7 +125,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/GameModeTest.cpp b/services/surfaceflinger/tests/unittests/GameModeTest.cpp
index 29aa717..ddf871b 100644
--- a/services/surfaceflinger/tests/unittests/GameModeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/GameModeTest.cpp
@@ -76,7 +76,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/HWComposerTest.cpp b/services/surfaceflinger/tests/unittests/HWComposerTest.cpp
index afbc57a..9534f3b 100644
--- a/services/surfaceflinger/tests/unittests/HWComposerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/HWComposerTest.cpp
@@ -118,6 +118,34 @@
     }
 }
 
+TEST_F(HWComposerTest, onVsync) {
+    constexpr hal::HWDisplayId kHwcDisplayId = 1;
+    expectHotplugConnect(kHwcDisplayId);
+
+    const auto info = mHwc.onHotplug(kHwcDisplayId, hal::Connection::CONNECTED);
+    ASSERT_TRUE(info);
+
+    const auto physicalDisplayId = info->id;
+
+    // Deliberately chosen not to match DisplayData.lastPresentTimestamp's
+    // initial value.
+    constexpr nsecs_t kTimestamp = 1;
+    auto displayIdOpt = mHwc.onVsync(kHwcDisplayId, kTimestamp);
+    ASSERT_TRUE(displayIdOpt);
+    EXPECT_EQ(physicalDisplayId, displayIdOpt);
+
+    // Attempt to send the same time stamp again.
+    displayIdOpt = mHwc.onVsync(kHwcDisplayId, kTimestamp);
+    EXPECT_FALSE(displayIdOpt);
+}
+
+TEST_F(HWComposerTest, onVsyncInvalid) {
+    constexpr hal::HWDisplayId kInvalidHwcDisplayId = 2;
+    constexpr nsecs_t kTimestamp = 1;
+    const auto displayIdOpt = mHwc.onVsync(kInvalidHwcDisplayId, kTimestamp);
+    EXPECT_FALSE(displayIdOpt);
+}
+
 struct MockHWC2ComposerCallback final : StrictMock<HWC2::ComposerCallback> {
     MOCK_METHOD2(onComposerHalHotplug, void(hal::HWDisplayId, hal::Connection));
     MOCK_METHOD1(onComposerHalRefresh, void(hal::HWDisplayId));
diff --git a/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp b/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
index ee42e19..23506b1 100644
--- a/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
+++ b/services/surfaceflinger/tests/unittests/LayerTestUtils.cpp
@@ -64,7 +64,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
index 7aa5201..8f1b450 100644
--- a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
@@ -67,12 +67,12 @@
 
 struct MessageQueueTest : testing::Test {
     void SetUp() override {
-        EXPECT_CALL(mVSyncDispatch, registerCallback(_, "sf")).WillOnce(Return(mCallbackToken));
+        EXPECT_CALL(*mVSyncDispatch, registerCallback(_, "sf")).WillOnce(Return(mCallbackToken));
         EXPECT_NO_FATAL_FAILURE(mEventQueue.initVsync(mVSyncDispatch, mTokenManager, kDuration));
-        EXPECT_CALL(mVSyncDispatch, unregisterCallback(mCallbackToken)).Times(1);
+        EXPECT_CALL(*mVSyncDispatch, unregisterCallback(mCallbackToken)).Times(1);
     }
 
-    mock::VSyncDispatch mVSyncDispatch;
+    std::shared_ptr<mock::VSyncDispatch> mVSyncDispatch = std::make_shared<mock::VSyncDispatch>();
     MockTokenManager mTokenManager;
     TestableMessageQueue mEventQueue;
 
@@ -90,7 +90,7 @@
                                                                  .earliestVsync = 0};
     EXPECT_FALSE(mEventQueue.getScheduledFrameTime());
 
-    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -103,13 +103,13 @@
                                                                  .readyDuration = 0,
                                                                  .earliestVsync = 0};
 
-    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
     EXPECT_EQ(1234, mEventQueue.getScheduledFrameTime()->time_since_epoch().count());
 
-    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(4567));
+    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(4567));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -122,7 +122,7 @@
                                                                  .readyDuration = 0,
                                                                  .earliestVsync = 0};
 
-    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
+    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
@@ -149,7 +149,7 @@
                                                      .readyDuration = 0,
                                                      .earliestVsync = kPresentTime.ns()};
 
-    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timingAfterCallback)).WillOnce(Return(0));
+    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timingAfterCallback)).WillOnce(Return(0));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 }
 
@@ -161,7 +161,7 @@
                                                      .readyDuration = 0,
                                                      .earliestVsync = 0};
 
-    EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(0));
+    EXPECT_CALL(*mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(0));
     EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 }
 
diff --git a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
index 4b15385..f0dd06d 100644
--- a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include <ftl/fake_guard.h>
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 #include <log/log.h>
@@ -176,7 +177,7 @@
     ASSERT_EQ(0u, mScheduler->getNumActiveLayers());
 
     constexpr hal::PowerMode kPowerModeOn = hal::PowerMode::ON;
-    mScheduler->setDisplayPowerMode(kPowerModeOn);
+    FTL_FAKE_GUARD(kMainThreadContext, mScheduler->setDisplayPowerMode(kDisplayId1, kPowerModeOn));
 
     constexpr uint32_t kDisplayArea = 999'999;
     mScheduler->onActiveDisplayAreaChanged(kDisplayArea);
@@ -248,7 +249,7 @@
     mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
 
     constexpr hal::PowerMode kPowerModeOn = hal::PowerMode::ON;
-    mScheduler->setDisplayPowerMode(kPowerModeOn);
+    FTL_FAKE_GUARD(kMainThreadContext, mScheduler->setDisplayPowerMode(kDisplayId1, kPowerModeOn));
 
     constexpr uint32_t kDisplayArea = 999'999;
     mScheduler->onActiveDisplayAreaChanged(kDisplayArea);
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
index ad3bd35..31f948f 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
@@ -100,7 +100,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
index f553a23..98644aa 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
@@ -44,7 +44,10 @@
     // We expect a scheduled commit for the display transaction.
     EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
-    EXPECT_CALL(*mVSyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
+    EXPECT_CALL(static_cast<mock::VSyncTracker&>(
+                        mFlinger.scheduler()->getVsyncSchedule()->getTracker()),
+                nextAnticipatedVSyncTimeFrom(_))
+            .WillRepeatedly(Return(0));
 
     // --------------------------------------------------------------------
     // Invocation
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
index 622717f..2a0f2ef 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_PowerHintTest.cpp
@@ -116,7 +116,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
index ab732ed..80ad22c 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
@@ -61,7 +61,7 @@
 struct EventThreadBaseSupportedVariant {
     static void setupVsyncAndEventThreadNoCallExpectations(DisplayTransactionTest* test) {
         // The callback should not be notified to toggle VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_)).Times(0);
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, _)).Times(0);
 
         // The event thread should not be notified.
         EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
@@ -71,24 +71,28 @@
 
 struct EventThreadNotSupportedVariant : public EventThreadBaseSupportedVariant {
     static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
-        // These calls are only expected for the primary display.
+        // The callback should be notified to enable VSYNC.
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, true)).Times(1);
 
-        // Instead expect no calls.
-        setupVsyncAndEventThreadNoCallExpectations(test);
+        // The event thread should not be notified.
+        EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
+        EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(0);
     }
 
     static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
-        // These calls are only expected for the primary display.
+        // The callback should be notified to disable VSYNC.
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, false)).Times(1);
 
-        // Instead expect no calls.
-        setupVsyncAndEventThreadNoCallExpectations(test);
+        // The event thread should not be notified.
+        EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
+        EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(0);
     }
 };
 
 struct EventThreadIsSupportedVariant : public EventThreadBaseSupportedVariant {
     static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
         // The callback should be notified to enable VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(true)).Times(1);
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, true)).Times(1);
 
         // The event thread should be notified that the screen was acquired.
         EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(1);
@@ -96,7 +100,7 @@
 
     static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
         // The callback should be notified to disable VSYNC.
-        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(false)).Times(1);
+        EXPECT_CALL(test->mFlinger.mockSchedulerCallback(), setVsyncEnabled(_, false)).Times(1);
 
         // The event thread should not be notified that the screen was released.
         EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(1);
@@ -105,8 +109,12 @@
 
 struct DispSyncIsSupportedVariant {
     static void setupResetModelCallExpectations(DisplayTransactionTest* test) {
-        EXPECT_CALL(*test->mVsyncController, startPeriodTransition(DEFAULT_VSYNC_PERIOD)).Times(1);
-        EXPECT_CALL(*test->mVSyncTracker, resetModel()).Times(1);
+        auto vsyncSchedule = test->mFlinger.scheduler()->getVsyncSchedule();
+        EXPECT_CALL(static_cast<mock::VsyncController&>(vsyncSchedule->getController()),
+                    startPeriodTransition(DEFAULT_VSYNC_PERIOD, false))
+                .Times(1);
+        EXPECT_CALL(static_cast<mock::VSyncTracker&>(vsyncSchedule->getTracker()), resetModel())
+                .Times(1);
     }
 };
 
@@ -262,8 +270,9 @@
         return display;
     }
 
-    static void setInitialPrimaryHWVsyncEnabled(DisplayTransactionTest* test, bool enabled) {
-        test->mFlinger.scheduler()->mutablePrimaryHWVsyncEnabled() = enabled;
+    static void setInitialHwVsyncEnabled(DisplayTransactionTest* test, PhysicalDisplayId id,
+                                         bool enabled) {
+        test->mFlinger.scheduler()->setInitialHwVsyncEnabled(id, enabled);
     }
 
     static void setupRepaintEverythingCallExpectations(DisplayTransactionTest* test) {
@@ -300,6 +309,11 @@
 // A sample configuration for the external display.
 // In addition to not having event thread support, we emulate not having doze
 // support.
+// FIXME (b/267483230): ExternalDisplay supports the features tracked in
+// DispSyncIsSupportedVariant, but is the follower, so the
+// expectations set by DispSyncIsSupportedVariant don't match (wrong schedule).
+// We need a way to retrieve the proper DisplayId from
+// setupResetModelCallExpectations (or pass it in).
 template <typename TransitionVariant>
 using ExternalDisplayPowerCase =
         DisplayPowerCase<ExternalDisplayVariant, DozeNotSupportedVariant<ExternalDisplayVariant>,
@@ -329,9 +343,12 @@
     Case::Doze::setupComposerCallExpectations(this);
     auto display =
             Case::injectDisplayWithInitialPowerMode(this, Case::Transition::INITIAL_POWER_MODE);
-    Case::setInitialPrimaryHWVsyncEnabled(this,
-                                          PowerModeInitialVSyncEnabled<
-                                                  Case::Transition::INITIAL_POWER_MODE>::value);
+    auto displayId = display->getId();
+    if (auto physicalDisplayId = PhysicalDisplayId::tryCast(displayId)) {
+        Case::setInitialHwVsyncEnabled(this, *physicalDisplayId,
+                                       PowerModeInitialVSyncEnabled<
+                                               Case::Transition::INITIAL_POWER_MODE>::value);
+    }
 
     // --------------------------------------------------------------------
     // Call Expectations
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
index fed6a1a..7e14588 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_UpdateLayerMetadataSnapshotTest.cpp
@@ -34,7 +34,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TestableScheduler.h b/services/surfaceflinger/tests/unittests/TestableScheduler.h
index 6cf6141..c360f93 100644
--- a/services/surfaceflinger/tests/unittests/TestableScheduler.h
+++ b/services/surfaceflinger/tests/unittests/TestableScheduler.h
@@ -37,19 +37,16 @@
 public:
     TestableScheduler(RefreshRateSelectorPtr selectorPtr, ISchedulerCallback& callback)
           : TestableScheduler(std::make_unique<mock::VsyncController>(),
-                              std::make_unique<mock::VSyncTracker>(), std::move(selectorPtr),
+                              std::make_shared<mock::VSyncTracker>(), std::move(selectorPtr),
                               /* modulatorPtr */ nullptr, callback) {}
 
     TestableScheduler(std::unique_ptr<VsyncController> controller,
-                      std::unique_ptr<VSyncTracker> tracker, RefreshRateSelectorPtr selectorPtr,
+                      std::shared_ptr<VSyncTracker> tracker, RefreshRateSelectorPtr selectorPtr,
                       sp<VsyncModulator> modulatorPtr, ISchedulerCallback& callback)
           : Scheduler(*this, callback, Feature::kContentDetection, std::move(modulatorPtr)) {
-        mVsyncSchedule.emplace(VsyncSchedule(std::move(tracker),
-                                             std::make_unique<mock::VSyncDispatch>(),
-                                             std::move(controller)));
-
         const auto displayId = selectorPtr->getActiveMode().modePtr->getPhysicalDisplayId();
-        registerDisplay(displayId, std::move(selectorPtr));
+        registerDisplay(displayId, std::move(selectorPtr), std::move(controller),
+                        std::move(tracker));
 
         ON_CALL(*this, postMessage).WillByDefault([](sp<MessageHandler>&& handler) {
             // Execute task to prevent broken promise exception on destruction.
@@ -66,13 +63,6 @@
         return Scheduler::createConnection(std::move(eventThread));
     }
 
-    /* ------------------------------------------------------------------------
-     * Read-write access to private data to set up preconditions and assert
-     * post-conditions.
-     */
-    auto& mutablePrimaryHWVsyncEnabled() { return mPrimaryHWVsyncEnabled; }
-    auto& mutableHWVsyncAvailable() { return mHWVsyncAvailable; }
-
     auto refreshRateSelector() { return leaderSelectorPtr(); }
 
     const auto& refreshRateSelectors() const NO_THREAD_SAFETY_ANALYSIS {
@@ -80,8 +70,21 @@
     }
 
     void registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr) {
+        registerDisplay(displayId, std::move(selectorPtr),
+                        std::make_unique<mock::VsyncController>(),
+                        std::make_shared<mock::VSyncTracker>());
+    }
+
+    void registerDisplay(PhysicalDisplayId displayId, RefreshRateSelectorPtr selectorPtr,
+                         std::unique_ptr<VsyncController> controller,
+                         std::shared_ptr<VSyncTracker> tracker) {
         ftl::FakeGuard guard(kMainThreadContext);
-        Scheduler::registerDisplay(displayId, std::move(selectorPtr));
+        Scheduler::registerDisplayInternal(displayId, std::move(selectorPtr),
+                                           std::shared_ptr<VsyncSchedule>(
+                                                   new VsyncSchedule(displayId, std::move(tracker),
+                                                                     std::make_shared<
+                                                                             mock::VSyncDispatch>(),
+                                                                     std::move(controller))));
     }
 
     void unregisterDisplay(PhysicalDisplayId displayId) {
@@ -157,6 +160,13 @@
         Scheduler::onNonPrimaryDisplayModeChanged(handle, mode);
     }
 
+    void setInitialHwVsyncEnabled(PhysicalDisplayId id, bool enabled) {
+        auto schedule = getVsyncSchedule(id);
+        std::lock_guard<std::mutex> lock(schedule->mHwVsyncLock);
+        schedule->mHwVsyncState = enabled ? VsyncSchedule::HwVsyncState::Enabled
+                                          : VsyncSchedule::HwVsyncState::Disabled;
+    }
+
 private:
     // ICompositor overrides:
     void configure() override {}
diff --git a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
index 68c738f..63b79a4 100644
--- a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
+++ b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
@@ -201,7 +201,7 @@
             std::variant<OneDisplayMode, TwoDisplayModes, RefreshRateSelectorPtr>;
 
     void setupScheduler(std::unique_ptr<scheduler::VsyncController> vsyncController,
-                        std::unique_ptr<scheduler::VSyncTracker> vsyncTracker,
+                        std::shared_ptr<scheduler::VSyncTracker> vsyncTracker,
                         std::unique_ptr<EventThread> appEventThread,
                         std::unique_ptr<EventThread> sfEventThread,
                         SchedulerCallbackImpl callbackImpl = SchedulerCallbackImpl::kNoOp,
@@ -253,7 +253,7 @@
                                                           std::move(modulatorPtr), callback);
         }
 
-        mScheduler->initVsync(mScheduler->getVsyncSchedule().getDispatch(), *mTokenManager, 0ms);
+        mScheduler->initVsync(mScheduler->getVsyncSchedule()->getDispatch(), *mTokenManager, 0ms);
 
         mFlinger->mAppConnectionHandle = mScheduler->createConnection(std::move(appEventThread));
         mFlinger->mSfConnectionHandle = mScheduler->createConnection(std::move(sfEventThread));
diff --git a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
index 859f702..a9a617b 100644
--- a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
@@ -83,15 +83,14 @@
 
         mFlinger.setupComposer(std::make_unique<Hwc2::mock::Composer>());
         mFlinger.setupScheduler(std::unique_ptr<mock::VsyncController>(mVsyncController),
-                                std::unique_ptr<mock::VSyncTracker>(mVSyncTracker),
-                                std::move(eventThread), std::move(sfEventThread));
+                                mVSyncTracker, std::move(eventThread), std::move(sfEventThread));
         mFlinger.flinger()->addTransactionReadyFilters();
     }
 
     TestableSurfaceFlinger mFlinger;
 
     mock::VsyncController* mVsyncController = new mock::VsyncController();
-    mock::VSyncTracker* mVSyncTracker = new mock::VSyncTracker();
+    std::shared_ptr<mock::VSyncTracker> mVSyncTracker = std::make_shared<mock::VSyncTracker>();
 
     struct TransactionInfo {
         Vector<ComposerState> states;
diff --git a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
index 1173d1c..b228bcb 100644
--- a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
@@ -85,7 +85,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
index ae03db4..bfebecd 100644
--- a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
@@ -84,7 +84,7 @@
                                                                  ResyncCallback())));
 
         auto vsyncController = std::make_unique<mock::VsyncController>();
-        auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+        auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
         EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
         EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
index da87f1d..aa33716 100644
--- a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
@@ -117,7 +117,7 @@
                                                              ResyncCallback())));
 
     auto vsyncController = std::make_unique<mock::VsyncController>();
-    auto vsyncTracker = std::make_unique<mock::VSyncTracker>();
+    auto vsyncTracker = std::make_shared<mock::VSyncTracker>();
 
     EXPECT_CALL(*vsyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
     EXPECT_CALL(*vsyncTracker, currentPeriod())
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
index 47c2dee..fcd2f56 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -109,7 +109,8 @@
 
 class RepeatingCallbackReceiver {
 public:
-    RepeatingCallbackReceiver(VSyncDispatch& dispatch, nsecs_t workload, nsecs_t readyDuration)
+    RepeatingCallbackReceiver(std::shared_ptr<VSyncDispatch> dispatch, nsecs_t workload,
+                              nsecs_t readyDuration)
           : mWorkload(workload),
             mReadyDuration(readyDuration),
             mCallback(
@@ -166,9 +167,10 @@
 };
 
 TEST_F(VSyncDispatchRealtimeTest, triple_alarm) {
-    FixedRateIdealStubTracker tracker;
-    VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
-                                     mVsyncMoveThreshold);
+    auto tracker = std::make_shared<FixedRateIdealStubTracker>();
+    auto dispatch =
+            std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
+                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
 
     static size_t constexpr num_clients = 3;
     std::array<RepeatingCallbackReceiver, num_clients>
@@ -195,14 +197,15 @@
 // starts at 333hz, slides down to 43hz
 TEST_F(VSyncDispatchRealtimeTest, vascillating_vrr) {
     auto next_vsync_interval = toNs(3ms);
-    VRRStubTracker tracker(next_vsync_interval);
-    VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
-                                     mVsyncMoveThreshold);
+    auto tracker = std::make_shared<VRRStubTracker>(next_vsync_interval);
+    auto dispatch =
+            std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
+                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
 
     RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
 
     auto const on_each_frame = [&](nsecs_t last_known) {
-        tracker.set_interval(next_vsync_interval += toNs(1ms), last_known);
+        tracker->set_interval(next_vsync_interval += toNs(1ms), last_known);
     };
 
     std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
@@ -213,9 +216,10 @@
 
 // starts at 333hz, jumps to 200hz at frame 10
 TEST_F(VSyncDispatchRealtimeTest, fixed_jump) {
-    VRRStubTracker tracker(toNs(3ms));
-    VSyncDispatchTimerQueue dispatch(std::make_unique<Timer>(), tracker, mDispatchGroupThreshold,
-                                     mVsyncMoveThreshold);
+    auto tracker = std::make_shared<VRRStubTracker>(toNs(3ms));
+    auto dispatch =
+            std::make_shared<VSyncDispatchTimerQueue>(std::make_unique<Timer>(), tracker,
+                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
 
     RepeatingCallbackReceiver cb_receiver(dispatch, toNs(1ms), toNs(5ms));
 
@@ -223,7 +227,7 @@
     auto constexpr jump_frame_at = 10u;
     auto const on_each_frame = [&](nsecs_t last_known) {
         if (jump_frame_counter++ == jump_frame_at) {
-            tracker.set_interval(toNs(5ms), last_known);
+            tracker->set_interval(toNs(5ms), last_known);
         }
     };
     std::thread eventThread([&] { cb_receiver.repeatedly_schedule(mIterations, on_each_frame); });
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
index 14a2860..82daffd 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
@@ -116,13 +116,14 @@
 
 class CountingCallback {
 public:
-    CountingCallback(VSyncDispatch& dispatch)
-          : mDispatch(dispatch),
-            mToken(dispatch.registerCallback(std::bind(&CountingCallback::counter, this,
-                                                       std::placeholders::_1, std::placeholders::_2,
-                                                       std::placeholders::_3),
-                                             "test")) {}
-    ~CountingCallback() { mDispatch.unregisterCallback(mToken); }
+    CountingCallback(std::shared_ptr<VSyncDispatch> dispatch)
+          : mDispatch(std::move(dispatch)),
+            mToken(mDispatch->registerCallback(std::bind(&CountingCallback::counter, this,
+                                                         std::placeholders::_1,
+                                                         std::placeholders::_2,
+                                                         std::placeholders::_3),
+                                               "test")) {}
+    ~CountingCallback() { mDispatch->unregisterCallback(mToken); }
 
     operator VSyncDispatch::CallbackToken() const { return mToken; }
 
@@ -132,7 +133,7 @@
         mReadyTime.push_back(readyTime);
     }
 
-    VSyncDispatch& mDispatch;
+    std::shared_ptr<VSyncDispatch> mDispatch;
     VSyncDispatch::CallbackToken mToken;
     std::vector<nsecs_t> mCalls;
     std::vector<nsecs_t> mWakeupTime;
@@ -141,12 +142,12 @@
 
 class PausingCallback {
 public:
-    PausingCallback(VSyncDispatch& dispatch, std::chrono::milliseconds pauseAmount)
-          : mDispatch(dispatch),
-            mToken(dispatch.registerCallback(std::bind(&PausingCallback::pause, this,
-                                                       std::placeholders::_1,
-                                                       std::placeholders::_2),
-                                             "test")),
+    PausingCallback(std::shared_ptr<VSyncDispatch> dispatch, std::chrono::milliseconds pauseAmount)
+          : mDispatch(std::move(dispatch)),
+            mToken(mDispatch->registerCallback(std::bind(&PausingCallback::pause, this,
+                                                         std::placeholders::_1,
+                                                         std::placeholders::_2),
+                                               "test")),
             mRegistered(true),
             mPauseAmount(pauseAmount) {}
     ~PausingCallback() { unregister(); }
@@ -181,12 +182,12 @@
 
     void unregister() {
         if (mRegistered) {
-            mDispatch.unregisterCallback(mToken);
+            mDispatch->unregisterCallback(mToken);
             mRegistered = false;
         }
     }
 
-    VSyncDispatch& mDispatch;
+    std::shared_ptr<VSyncDispatch> mDispatch;
     VSyncDispatch::CallbackToken mToken;
     bool mRegistered = true;
 
@@ -231,22 +232,26 @@
     static nsecs_t constexpr mDispatchGroupThreshold = 5;
     nsecs_t const mPeriod = 1000;
     nsecs_t const mVsyncMoveThreshold = 300;
-    NiceMock<MockVSyncTracker> mStubTracker{mPeriod};
-    VSyncDispatchTimerQueue mDispatch{createTimeKeeper(), mStubTracker, mDispatchGroupThreshold,
-                                      mVsyncMoveThreshold};
+    std::shared_ptr<NiceMock<MockVSyncTracker>> mStubTracker =
+            std::make_shared<NiceMock<MockVSyncTracker>>(mPeriod);
+    std::shared_ptr<VSyncDispatch> mDispatch =
+            std::make_shared<VSyncDispatchTimerQueue>(createTimeKeeper(), mStubTracker,
+                                                      mDispatchGroupThreshold, mVsyncMoveThreshold);
 };
 
 TEST_F(VSyncDispatchTimerQueueTest, unregistersSetAlarmOnDestruction) {
     EXPECT_CALL(mMockClock, alarmAt(_, 900));
     EXPECT_CALL(mMockClock, alarmCancel());
     {
-        VSyncDispatchTimerQueue mDispatch{createTimeKeeper(), mStubTracker, mDispatchGroupThreshold,
-                                          mVsyncMoveThreshold};
+        std::shared_ptr<VSyncDispatch> mDispatch =
+                std::make_shared<VSyncDispatchTimerQueue>(createTimeKeeper(), mStubTracker,
+                                                          mDispatchGroupThreshold,
+                                                          mVsyncMoveThreshold);
         CountingCallback cb(mDispatch);
-        const auto result = mDispatch.schedule(cb,
-                                               {.workDuration = 100,
-                                                .readyDuration = 0,
-                                                .earliestVsync = 1000});
+        const auto result = mDispatch->schedule(cb,
+                                                {.workDuration = 100,
+                                                 .readyDuration = 0,
+                                                 .earliestVsync = 1000});
         EXPECT_TRUE(result.has_value());
         EXPECT_EQ(900, *result);
     }
@@ -257,10 +262,10 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 900));
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch.schedule(cb,
-                                           {.workDuration = 100,
-                                            .readyDuration = 0,
-                                            .earliestVsync = intended});
+    const auto result = mDispatch->schedule(cb,
+                                            {.workDuration = 100,
+                                             .readyDuration = 0,
+                                             .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 
@@ -277,14 +282,15 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 700)).InSequence(seq);
 
     CountingCallback cb(mDispatch);
-    auto result = mDispatch.schedule(cb,
-                                     {.workDuration = 100,
-                                      .readyDuration = 0,
-                                      .earliestVsync = intended});
+    auto result = mDispatch->schedule(cb,
+                                      {.workDuration = 100,
+                                       .readyDuration = 0,
+                                       .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 
-    result = mDispatch.update(cb,
+    result =
+            mDispatch->update(cb,
                               {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(700, *result);
@@ -302,17 +308,17 @@
 
     CountingCallback cb(mDispatch);
     const auto result =
-            mDispatch.update(cb,
-                             {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
+            mDispatch->update(cb,
+                              {.workDuration = 300, .readyDuration = 0, .earliestVsync = intended});
     EXPECT_FALSE(result.has_value());
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicAlarmSettingFutureWithAdjustmentToTrueVsync) {
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000)).WillOnce(Return(1150));
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000)).WillOnce(Return(1150));
     EXPECT_CALL(mMockClock, alarmAt(_, 1050));
 
     CountingCallback cb(mDispatch);
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
     advanceToNextCallback();
 
     ASSERT_THAT(cb.mCalls.size(), Eq(1));
@@ -323,15 +329,15 @@
     auto const now = 234;
     mMockClock.advanceBy(234);
     auto const workDuration = 10 * mPeriod;
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(now + workDuration))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(now + workDuration))
             .WillOnce(Return(mPeriod * 11));
     EXPECT_CALL(mMockClock, alarmAt(_, mPeriod));
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch.schedule(cb,
-                                           {.workDuration = workDuration,
-                                            .readyDuration = 0,
-                                            .earliestVsync = mPeriod});
+    const auto result = mDispatch->schedule(cb,
+                                            {.workDuration = workDuration,
+                                             .readyDuration = 0,
+                                             .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod, *result);
 }
@@ -341,12 +347,13 @@
     EXPECT_CALL(mMockClock, alarmCancel());
 
     CountingCallback cb(mDispatch);
-    const auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    const auto result = mDispatch->schedule(cb,
+                                            {.workDuration = 100,
+                                             .readyDuration = 0,
+                                             .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
-    EXPECT_EQ(mDispatch.cancel(cb), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch->cancel(cb), CancelResult::Cancelled);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicAlarmCancelTooLate) {
@@ -354,13 +361,14 @@
     EXPECT_CALL(mMockClock, alarmCancel());
 
     CountingCallback cb(mDispatch);
-    const auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    const auto result = mDispatch->schedule(cb,
+                                            {.workDuration = 100,
+                                             .readyDuration = 0,
+                                             .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
     mMockClock.advanceBy(950);
-    EXPECT_EQ(mDispatch.cancel(cb), CancelResult::TooLate);
+    EXPECT_EQ(mDispatch->cancel(cb), CancelResult::TooLate);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicAlarmCancelTooLateWhenRunning) {
@@ -368,15 +376,16 @@
     EXPECT_CALL(mMockClock, alarmCancel());
 
     PausingCallback cb(mDispatch, std::chrono::duration_cast<std::chrono::milliseconds>(1s));
-    const auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    const auto result = mDispatch->schedule(cb,
+                                            {.workDuration = 100,
+                                             .readyDuration = 0,
+                                             .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
 
     std::thread pausingThread([&] { mMockClock.advanceToNextCallback(); });
     EXPECT_TRUE(cb.waitForPause());
-    EXPECT_EQ(mDispatch.cancel(cb), CancelResult::TooLate);
+    EXPECT_EQ(mDispatch->cancel(cb), CancelResult::TooLate);
     cb.unpause();
     pausingThread.join();
 }
@@ -389,9 +398,10 @@
 
     PausingCallback cb(mDispatch, 50ms);
     cb.stashResource(resource);
-    const auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    const auto result = mDispatch->schedule(cb,
+                                            {.workDuration = 100,
+                                             .readyDuration = 0,
+                                             .earliestVsync = mPeriod});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(mPeriod - 100, *result);
 
@@ -408,7 +418,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, basicTwoAlarmSetting) {
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
             .Times(4)
             .WillOnce(Return(1055))
             .WillOnce(Return(1063))
@@ -423,8 +433,8 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
-    mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch->schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
 
     advanceToNextCallback();
     advanceToNextCallback();
@@ -436,7 +446,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, noCloseCallbacksAfterPeriodChange) {
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
             .Times(4)
             .WillOnce(Return(1000))
             .WillOnce(Return(2000))
@@ -450,21 +460,21 @@
 
     CountingCallback cb(mDispatch);
 
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 0});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 0});
 
     advanceToNextCallback();
 
     ASSERT_THAT(cb.mCalls.size(), Eq(1));
     EXPECT_THAT(cb.mCalls[0], Eq(1000));
 
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
 
     ASSERT_THAT(cb.mCalls.size(), Eq(2));
     EXPECT_THAT(cb.mCalls[1], Eq(2000));
 
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
 
     advanceToNextCallback();
 
@@ -473,7 +483,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, rearmsFaroutTimeoutWhenCancellingCloseOne) {
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
             .Times(4)
             .WillOnce(Return(10000))
             .WillOnce(Return(1000))
@@ -488,10 +498,10 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch.schedule(cb0,
-                       {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod * 10});
-    mDispatch.schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
-    mDispatch.cancel(cb1);
+    mDispatch->schedule(cb0,
+                        {.workDuration = 100, .readyDuration = 0, .earliestVsync = mPeriod * 10});
+    mDispatch->schedule(cb1, {.workDuration = 250, .readyDuration = 0, .earliestVsync = mPeriod});
+    mDispatch->cancel(cb1);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, noUnnecessaryRearmsWhenRescheduling) {
@@ -502,9 +512,9 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 300, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 300, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
 }
 
@@ -517,9 +527,9 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
 }
 
@@ -537,10 +547,10 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1,
-                       {.workDuration = closeOffset, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1,
+                        {.workDuration = closeOffset, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
     ASSERT_THAT(cb0.mCalls.size(), Eq(1));
@@ -548,9 +558,11 @@
     ASSERT_THAT(cb1.mCalls.size(), Eq(1));
     EXPECT_THAT(cb1.mCalls[0], Eq(mPeriod));
 
-    mDispatch.schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 2000});
-    mDispatch.schedule(cb1,
-                       {.workDuration = notCloseOffset, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch->schedule(cb0, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch->schedule(cb1,
+                        {.workDuration = notCloseOffset,
+                         .readyDuration = 0,
+                         .earliestVsync = 2000});
     advanceToNextCallback();
     ASSERT_THAT(cb1.mCalls.size(), Eq(2));
     EXPECT_THAT(cb1.mCalls[1], Eq(2000));
@@ -570,32 +582,32 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
-    EXPECT_EQ(mDispatch.cancel(cb0), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch->cancel(cb0), CancelResult::Cancelled);
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, setAlarmCallsAtCorrectTimeWithChangingVsync) {
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
             .Times(3)
             .WillOnce(Return(950))
             .WillOnce(Return(1975))
             .WillOnce(Return(2950));
 
     CountingCallback cb(mDispatch);
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 920});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 920});
 
     mMockClock.advanceBy(850);
     EXPECT_THAT(cb.mCalls.size(), Eq(1));
 
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1900});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1900});
     mMockClock.advanceBy(900);
     EXPECT_THAT(cb.mCalls.size(), Eq(1));
     mMockClock.advanceBy(125);
     EXPECT_THAT(cb.mCalls.size(), Eq(2));
 
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2900});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2900});
     mMockClock.advanceBy(975);
     EXPECT_THAT(cb.mCalls.size(), Eq(3));
 }
@@ -606,48 +618,48 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
 
     VSyncDispatch::CallbackToken tmp;
-    tmp = mDispatch.registerCallback(
+    tmp = mDispatch->registerCallback(
             [&](auto, auto, auto) {
-                mDispatch.schedule(tmp,
-                                   {.workDuration = 100,
-                                    .readyDuration = 0,
-                                    .earliestVsync = 2000});
+                mDispatch->schedule(tmp,
+                                    {.workDuration = 100,
+                                     .readyDuration = 0,
+                                     .earliestVsync = 2000});
             },
             "o.o");
 
-    mDispatch.schedule(tmp, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(tmp, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, callbackReentrantWithPastWakeup) {
     VSyncDispatch::CallbackToken tmp;
     std::optional<nsecs_t> lastTarget;
-    tmp = mDispatch.registerCallback(
+    tmp = mDispatch->registerCallback(
             [&](auto timestamp, auto, auto) {
                 auto result =
-                        mDispatch.schedule(tmp,
-                                           {.workDuration = 400,
-                                            .readyDuration = 0,
-                                            .earliestVsync = timestamp - mVsyncMoveThreshold});
-                EXPECT_TRUE(result.has_value());
-                EXPECT_EQ(mPeriod + timestamp - 400, *result);
-                result = mDispatch.schedule(tmp,
+                        mDispatch->schedule(tmp,
                                             {.workDuration = 400,
                                              .readyDuration = 0,
-                                             .earliestVsync = timestamp});
+                                             .earliestVsync = timestamp - mVsyncMoveThreshold});
                 EXPECT_TRUE(result.has_value());
                 EXPECT_EQ(mPeriod + timestamp - 400, *result);
-                result = mDispatch.schedule(tmp,
-                                            {.workDuration = 400,
-                                             .readyDuration = 0,
-                                             .earliestVsync = timestamp + mVsyncMoveThreshold});
+                result = mDispatch->schedule(tmp,
+                                             {.workDuration = 400,
+                                              .readyDuration = 0,
+                                              .earliestVsync = timestamp});
+                EXPECT_TRUE(result.has_value());
+                EXPECT_EQ(mPeriod + timestamp - 400, *result);
+                result = mDispatch->schedule(tmp,
+                                             {.workDuration = 400,
+                                              .readyDuration = 0,
+                                              .earliestVsync = timestamp + mVsyncMoveThreshold});
                 EXPECT_TRUE(result.has_value());
                 EXPECT_EQ(mPeriod + timestamp - 400, *result);
                 lastTarget = timestamp;
             },
             "oo");
 
-    mDispatch.schedule(tmp, {.workDuration = 999, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(tmp, {.workDuration = 999, .readyDuration = 0, .earliestVsync = 1000});
     advanceToNextCallback();
     EXPECT_THAT(lastTarget, Eq(1000));
 
@@ -663,16 +675,16 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 1900)).InSequence(seq);
 
     CountingCallback cb(mDispatch);
-    mDispatch.schedule(cb, {.workDuration = 0, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb, {.workDuration = 0, .readyDuration = 0, .earliestVsync = 1000});
 
     mMockClock.advanceBy(750);
-    mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
-    mDispatch.schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch->schedule(cb, {.workDuration = 50, .readyDuration = 0, .earliestVsync = 2000});
 
     mMockClock.advanceBy(800);
-    mDispatch.schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch->schedule(cb, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, lateModifications) {
@@ -685,12 +697,12 @@
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
 
-    mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
-    mDispatch.schedule(cb0, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 2000});
-    mDispatch.schedule(cb1, {.workDuration = 150, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb0, {.workDuration = 200, .readyDuration = 0, .earliestVsync = 2000});
+    mDispatch->schedule(cb1, {.workDuration = 150, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
     advanceToNextCallback();
@@ -702,8 +714,8 @@
 
     CountingCallback cb0(mDispatch);
     CountingCallback cb1(mDispatch);
-    mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 20000});
+    mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb1, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 20000});
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, setsTimerAfterCancellation) {
@@ -713,29 +725,30 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 900)).InSequence(seq);
 
     CountingCallback cb0(mDispatch);
-    mDispatch.schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.cancel(cb0);
-    mDispatch.schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb0, {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->cancel(cb0);
+    mDispatch->schedule(cb0, {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, makingUpIdsError) {
     VSyncDispatch::CallbackToken token(100);
-    EXPECT_FALSE(mDispatch
-                         .schedule(token,
-                                   {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000})
-                         .has_value());
-    EXPECT_THAT(mDispatch.cancel(token), Eq(CancelResult::Error));
+    EXPECT_FALSE(
+            mDispatch
+                    ->schedule(token,
+                               {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000})
+                    .has_value());
+    EXPECT_THAT(mDispatch->cancel(token), Eq(CancelResult::Error));
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, canMoveCallbackBackwardsInTime) {
     CountingCallback cb0(mDispatch);
     auto result =
-            mDispatch.schedule(cb0,
-                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb0,
+                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
-    result = mDispatch.schedule(cb0,
-                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch->schedule(cb0,
+                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 }
@@ -745,14 +758,14 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 500));
     CountingCallback cb(mDispatch);
     auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb,
+                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     mMockClock.advanceBy(400);
 
-    result = mDispatch.schedule(cb,
-                                {.workDuration = 800, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch->schedule(cb,
+                                 {.workDuration = 800, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1200, *result);
     advanceToNextCallback();
@@ -760,19 +773,19 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueTest, targetOffsetMovingBackALittleCanStillSchedule) {
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
             .Times(2)
             .WillOnce(Return(1000))
             .WillOnce(Return(1002));
     CountingCallback cb(mDispatch);
     auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb,
+                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     mMockClock.advanceBy(400);
-    result = mDispatch.schedule(cb,
-                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch->schedule(cb,
+                                 {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(602, *result);
 }
@@ -780,13 +793,13 @@
 TEST_F(VSyncDispatchTimerQueueTest, canScheduleNegativeOffsetAgainstDifferentPeriods) {
     CountingCallback cb0(mDispatch);
     auto result =
-            mDispatch.schedule(cb0,
-                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb0,
+                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     advanceToNextCallback();
-    result = mDispatch.schedule(cb0,
-                                {.workDuration = 1100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch->schedule(cb0,
+                                 {.workDuration = 1100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
 }
@@ -797,13 +810,13 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 1100)).InSequence(seq);
     CountingCallback cb0(mDispatch);
     auto result =
-            mDispatch.schedule(cb0,
-                               {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb0,
+                                {.workDuration = 500, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(500, *result);
     advanceToNextCallback();
-    result = mDispatch.schedule(cb0,
-                                {.workDuration = 1900, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch->schedule(cb0,
+                                 {.workDuration = 1900, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1100, *result);
 }
@@ -813,13 +826,13 @@
 
     CountingCallback cb(mDispatch);
     auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb,
+                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
-    result = mDispatch.schedule(cb,
-                                {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch->schedule(cb,
+                                 {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
@@ -865,16 +878,16 @@
     CountingCallback cb2(mDispatch);
 
     auto result =
-            mDispatch.schedule(cb1,
-                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb1,
+                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    result = mDispatch.schedule(cb2,
-                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch->schedule(cb2,
+                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1900, *result);
     mMockClock.advanceBy(80);
@@ -893,16 +906,16 @@
     CountingCallback cb(mDispatch);
 
     auto result =
-            mDispatch.schedule(cb,
-                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb,
+                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    result = mDispatch.schedule(cb,
-                                {.workDuration = 370, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch->schedule(cb,
+                                 {.workDuration = 370, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1630, *result);
     mMockClock.advanceBy(80);
@@ -919,19 +932,19 @@
     CountingCallback cb2(mDispatch);
 
     auto result =
-            mDispatch.schedule(cb1,
-                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb1,
+                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
-    result = mDispatch.schedule(cb2,
-                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch->schedule(cb2,
+                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1900, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    EXPECT_EQ(mDispatch.cancel(cb2), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch->cancel(cb2), CancelResult::Cancelled);
 
     mMockClock.advanceBy(80);
 
@@ -948,19 +961,19 @@
     CountingCallback cb2(mDispatch);
 
     auto result =
-            mDispatch.schedule(cb1,
-                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb1,
+                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
-    result = mDispatch.schedule(cb2,
-                                {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
+    result = mDispatch->schedule(cb2,
+                                 {.workDuration = 100, .readyDuration = 0, .earliestVsync = 2000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(1900, *result);
 
     mMockClock.setLag(100);
     mMockClock.advanceBy(620);
 
-    EXPECT_EQ(mDispatch.cancel(cb1), CancelResult::Cancelled);
+    EXPECT_EQ(mDispatch->cancel(cb1), CancelResult::Cancelled);
 
     EXPECT_THAT(cb1.mCalls.size(), Eq(0));
     EXPECT_THAT(cb2.mCalls.size(), Eq(0));
@@ -975,21 +988,21 @@
     CountingCallback cb2(mDispatch);
 
     Sequence seq;
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
             .InSequence(seq)
             .WillOnce(Return(1000));
     EXPECT_CALL(mMockClock, alarmAt(_, 600)).InSequence(seq);
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000))
             .InSequence(seq)
             .WillOnce(Return(1000));
 
     auto result =
-            mDispatch.schedule(cb1,
-                               {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+            mDispatch->schedule(cb1,
+                                {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(600, *result);
-    result = mDispatch.schedule(cb2,
-                                {.workDuration = 390, .readyDuration = 0, .earliestVsync = 1000});
+    result = mDispatch->schedule(cb2,
+                                 {.workDuration = 390, .readyDuration = 0, .earliestVsync = 1000});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(610, *result);
 
@@ -1011,10 +1024,10 @@
     EXPECT_CALL(mMockClock, alarmAt(_, 900));
 
     CountingCallback cb(mDispatch);
-    const auto result = mDispatch.schedule(cb,
-                                           {.workDuration = 70,
-                                            .readyDuration = 30,
-                                            .earliestVsync = intended});
+    const auto result = mDispatch->schedule(cb,
+                                            {.workDuration = 70,
+                                             .readyDuration = 30,
+                                             .earliestVsync = intended});
     EXPECT_TRUE(result.has_value());
     EXPECT_EQ(900, *result);
     advanceToNextCallback();
@@ -1033,8 +1046,8 @@
 
     CountingCallback cb(mDispatch);
 
-    mDispatch.schedule(cb, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
-    mDispatch.schedule(cb, {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb, {.workDuration = 400, .readyDuration = 0, .earliestVsync = 1000});
+    mDispatch->schedule(cb, {.workDuration = 1400, .readyDuration = 0, .earliestVsync = 1000});
 
     advanceToNextCallback();
 
@@ -1052,7 +1065,8 @@
 protected:
     nsecs_t const mPeriod = 1000;
     nsecs_t const mVsyncMoveThreshold = 200;
-    NiceMock<MockVSyncTracker> mStubTracker{mPeriod};
+    std::shared_ptr<NiceMock<MockVSyncTracker>> mStubTracker =
+            std::make_shared<NiceMock<MockVSyncTracker>>(mPeriod);
 };
 
 TEST_F(VSyncDispatchTimerQueueEntryTest, stateAfterInitialization) {
@@ -1070,7 +1084,7 @@
 
     EXPECT_FALSE(entry.wakeupTime());
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1084,7 +1098,7 @@
     auto const duration = 500;
     auto const now = 8750;
 
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(now + duration))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(now + duration))
             .Times(1)
             .WillOnce(Return(10000));
     VSyncDispatchTimerQueueEntry entry(
@@ -1092,7 +1106,7 @@
 
     EXPECT_FALSE(entry.wakeupTime());
     EXPECT_TRUE(entry.schedule({.workDuration = 500, .readyDuration = 0, .earliestVsync = 994},
-                               mStubTracker, now)
+                               *mStubTracker.get(), now)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1115,7 +1129,7 @@
             mVsyncMoveThreshold);
 
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1137,7 +1151,7 @@
 }
 
 TEST_F(VSyncDispatchTimerQueueEntryTest, updateCallback) {
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(_))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(_))
             .Times(2)
             .WillOnce(Return(1000))
             .WillOnce(Return(1020));
@@ -1146,17 +1160,17 @@
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
 
     EXPECT_FALSE(entry.wakeupTime());
-    entry.update(mStubTracker, 0);
+    entry.update(*mStubTracker.get(), 0);
     EXPECT_FALSE(entry.wakeupTime());
 
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     auto wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
     EXPECT_THAT(wakeup, Eq(900));
 
-    entry.update(mStubTracker, 0);
+    entry.update(*mStubTracker.get(), 0);
     wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
     EXPECT_THAT(*wakeup, Eq(920));
@@ -1166,9 +1180,9 @@
     VSyncDispatchTimerQueueEntry entry(
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
-    entry.update(mStubTracker, 0);
+    entry.update(*mStubTracker.get(), 0);
 
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
@@ -1179,24 +1193,24 @@
     VSyncDispatchTimerQueueEntry entry(
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     entry.executing(); // 1000 is executing
     // had 1000 not been executing, this could have been scheduled for time 800.
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
     EXPECT_THAT(*entry.readyTime(), Eq(2000));
 
     EXPECT_TRUE(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     EXPECT_THAT(*entry.wakeupTime(), Eq(1950));
     EXPECT_THAT(*entry.readyTime(), Eq(2000));
 
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 1001},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     EXPECT_THAT(*entry.wakeupTime(), Eq(1800));
     EXPECT_THAT(*entry.readyTime(), Eq(2000));
@@ -1208,24 +1222,24 @@
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
 
     Sequence seq;
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(500))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(500))
             .InSequence(seq)
             .WillOnce(Return(1000));
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(500))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(500))
             .InSequence(seq)
             .WillOnce(Return(1000));
-    EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000 + mVsyncMoveThreshold))
+    EXPECT_CALL(*mStubTracker.get(), nextAnticipatedVSyncTimeFrom(1000 + mVsyncMoveThreshold))
             .InSequence(seq)
             .WillOnce(Return(2000));
 
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
 
     entry.executing(); // 1000 is executing
 
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
 }
 
@@ -1233,16 +1247,16 @@
     VSyncDispatchTimerQueueEntry entry(
             "test", [](auto, auto, auto) {}, mVsyncMoveThreshold);
     EXPECT_TRUE(entry.schedule({.workDuration = 100, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     EXPECT_TRUE(entry.schedule({.workDuration = 200, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     EXPECT_TRUE(entry.schedule({.workDuration = 50, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     EXPECT_TRUE(entry.schedule({.workDuration = 1200, .readyDuration = 0, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
 }
 
@@ -1255,7 +1269,7 @@
     entry.addPendingWorkloadUpdate(
             {.workDuration = effectualOffset, .readyDuration = 0, .earliestVsync = 400});
     EXPECT_TRUE(entry.hasPendingWorkloadUpdate());
-    entry.update(mStubTracker, 0);
+    entry.update(*mStubTracker.get(), 0);
     EXPECT_FALSE(entry.hasPendingWorkloadUpdate());
     EXPECT_THAT(*entry.wakeupTime(), Eq(mPeriod - effectualOffset));
 }
@@ -1276,7 +1290,7 @@
             mVsyncMoveThreshold);
 
     EXPECT_TRUE(entry.schedule({.workDuration = 70, .readyDuration = 30, .earliestVsync = 500},
-                               mStubTracker, 0)
+                               *mStubTracker.get(), 0)
                         .has_value());
     auto const wakeup = entry.wakeupTime();
     ASSERT_TRUE(wakeup);
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
index 3095e8a..7947a5e 100644
--- a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -55,7 +55,7 @@
     static constexpr size_t kOutlierTolerancePercent = 25;
     static constexpr nsecs_t mMaxRoundingError = 100;
 
-    VSyncPredictor tracker{mPeriod, kHistorySize, kMinimumSamplesForPrediction,
+    VSyncPredictor tracker{"tracker", mPeriod, kHistorySize, kMinimumSamplesForPrediction,
                            kOutlierTolerancePercent};
 };
 
@@ -376,7 +376,8 @@
 
 // See b/151146131
 TEST_F(VSyncPredictorTest, hasEnoughPrecision) {
-    VSyncPredictor tracker{mPeriod, 20, kMinimumSamplesForPrediction, kOutlierTolerancePercent};
+    VSyncPredictor tracker{"tracker", mPeriod, 20, kMinimumSamplesForPrediction,
+                           kOutlierTolerancePercent};
     std::vector<nsecs_t> const simulatedVsyncs{840873348817, 840890049444, 840906762675,
                                                840923581635, 840940161584, 840956868096,
                                                840973702473, 840990256277, 841007116851,
diff --git a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
index 1fb2709..a2de136 100644
--- a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
@@ -96,8 +96,8 @@
     VSyncReactorTest()
           : mMockTracker(std::make_shared<NiceMock<MockVSyncTracker>>()),
             mMockClock(std::make_shared<NiceMock<MockClock>>()),
-            mReactor(std::make_unique<ClockWrapper>(mMockClock), *mMockTracker, kPendingLimit,
-                     false /* supportKernelIdleTimer */) {
+            mReactor("reactor", std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
+                     kPendingLimit, false /* supportKernelIdleTimer */) {
         ON_CALL(*mMockClock, now()).WillByDefault(Return(mFakeNow));
         ON_CALL(*mMockTracker, currentPeriod()).WillByDefault(Return(period));
     }
@@ -192,7 +192,7 @@
     mReactor.setIgnorePresentFences(true);
 
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(0, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -205,7 +205,7 @@
 TEST_F(VSyncReactorTest, setPeriodCalledOnceConfirmedChange) {
     nsecs_t const newPeriod = 5000;
     EXPECT_CALL(*mMockTracker, setPeriod(_)).Times(0);
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(10000, std::nullopt, &periodFlushed));
@@ -224,7 +224,7 @@
 TEST_F(VSyncReactorTest, changingPeriodBackAbortsConfirmationProcess) {
     nsecs_t sampleTime = 0;
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -232,7 +232,7 @@
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
 
-    mReactor.startPeriodTransition(period);
+    mReactor.startPeriodTransition(period, false);
     EXPECT_FALSE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
 }
@@ -242,13 +242,13 @@
     nsecs_t const secondPeriod = 5000;
     nsecs_t const thirdPeriod = 2000;
 
-    mReactor.startPeriodTransition(secondPeriod);
+    mReactor.startPeriodTransition(secondPeriod, false);
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(sampleTime += period, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
-    mReactor.startPeriodTransition(thirdPeriod);
+    mReactor.startPeriodTransition(thirdPeriod, false);
     EXPECT_TRUE(
             mReactor.addHwVsyncTimestamp(sampleTime += secondPeriod, std::nullopt, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -289,14 +289,14 @@
 
 TEST_F(VSyncReactorTest, presentFenceAdditionDoesNotInterruptConfirmationProcess) {
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
     EXPECT_TRUE(mReactor.addPresentFence(generateSignalledFenceWithTime(0)));
 }
 
 TEST_F(VSyncReactorTest, setPeriodCalledFirstTwoEventsNewPeriod) {
     nsecs_t const newPeriod = 5000;
     EXPECT_CALL(*mMockTracker, setPeriod(_)).Times(0);
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     bool periodFlushed = true;
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(5000, std::nullopt, &periodFlushed));
@@ -321,7 +321,7 @@
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
 
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     auto time = 0;
     auto constexpr numTimestampSubmissions = 10;
@@ -346,7 +346,7 @@
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
 
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     auto time = 0;
     // If the power mode is not DOZE or DOZE_SUSPEND, it is still collecting timestamps.
@@ -363,7 +363,7 @@
     auto time = 0;
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     time += period;
     mReactor.addHwVsyncTimestamp(time, std::nullopt, &periodFlushed);
@@ -379,7 +379,7 @@
     auto time = 0;
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     static auto constexpr numSamplesWithNewPeriod = 4;
     Sequence seq;
@@ -406,7 +406,7 @@
     auto time = 0;
     bool periodFlushed = false;
     nsecs_t const newPeriod = 4000;
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     Sequence seq;
     EXPECT_CALL(*mMockTracker, needsMoreSamples())
@@ -426,7 +426,7 @@
     nsecs_t const newPeriod1 = 4000;
     nsecs_t const newPeriod2 = 7000;
 
-    mReactor.startPeriodTransition(newPeriod1);
+    mReactor.startPeriodTransition(newPeriod1, false);
 
     Sequence seq;
     EXPECT_CALL(*mMockTracker, needsMoreSamples())
@@ -445,7 +445,7 @@
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
 
-    mReactor.startPeriodTransition(newPeriod2);
+    mReactor.startPeriodTransition(newPeriod2, false);
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod1, std::nullopt, &periodFlushed));
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod2, std::nullopt, &periodFlushed));
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(time += newPeriod2, std::nullopt, &periodFlushed));
@@ -458,7 +458,7 @@
     mReactor.setIgnorePresentFences(true);
 
     nsecs_t const newPeriod = 5000;
-    mReactor.startPeriodTransition(newPeriod);
+    mReactor.startPeriodTransition(newPeriod, false);
 
     EXPECT_TRUE(mReactor.addHwVsyncTimestamp(0, 0, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
@@ -472,8 +472,9 @@
 
 TEST_F(VSyncReactorTest, periodIsMeasuredIfIgnoringComposer) {
     // Create a reactor which supports the kernel idle timer
-    auto idleReactor = VSyncReactor(std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
-                                    kPendingLimit, true /* supportKernelIdleTimer */);
+    auto idleReactor =
+            VSyncReactor("reactor", std::make_unique<ClockWrapper>(mMockClock), *mMockTracker,
+                         kPendingLimit, true /* supportKernelIdleTimer */);
 
     bool periodFlushed = true;
     EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(4);
@@ -481,7 +482,7 @@
 
     // First, set the same period, which should only be confirmed when we receive two
     // matching callbacks
-    idleReactor.startPeriodTransition(10000);
+    idleReactor.startPeriodTransition(10000, false);
     EXPECT_TRUE(idleReactor.addHwVsyncTimestamp(0, 0, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
     // Correct period but incorrect timestamp delta
@@ -494,7 +495,7 @@
     // Then, set a new period, which should be confirmed as soon as we receive a callback
     // reporting the new period
     nsecs_t const newPeriod = 5000;
-    idleReactor.startPeriodTransition(newPeriod);
+    idleReactor.startPeriodTransition(newPeriod, false);
     // Incorrect timestamp delta and period
     EXPECT_TRUE(idleReactor.addHwVsyncTimestamp(20000, 10000, &periodFlushed));
     EXPECT_FALSE(periodFlushed);
diff --git a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
index f8567bd..3a6068a 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockEventThread.h
@@ -29,27 +29,29 @@
     EventThread();
     ~EventThread() override;
 
-    MOCK_CONST_METHOD2(createEventConnection,
-                       sp<EventThreadConnection>(ResyncCallback, EventRegistrationFlags));
-    MOCK_METHOD0(onScreenReleased, void());
-    MOCK_METHOD0(onScreenAcquired, void());
-    MOCK_METHOD2(onHotplugReceived, void(PhysicalDisplayId, bool));
-    MOCK_METHOD1(onModeChanged, void(const scheduler::FrameRateMode &));
-    MOCK_METHOD2(onFrameRateOverridesChanged,
-                 void(PhysicalDisplayId, std::vector<FrameRateOverride>));
-    MOCK_CONST_METHOD1(dump, void(std::string&));
-    MOCK_METHOD2(setDuration,
-                 void(std::chrono::nanoseconds workDuration,
-                      std::chrono::nanoseconds readyDuration));
-    MOCK_METHOD1(registerDisplayEventConnection,
-                 status_t(const sp<android::EventThreadConnection> &));
-    MOCK_METHOD2(setVsyncRate, void(uint32_t, const sp<android::EventThreadConnection> &));
-    MOCK_METHOD1(requestNextVsync, void(const sp<android::EventThreadConnection> &));
+    MOCK_METHOD(sp<EventThreadConnection>, createEventConnection,
+                (ResyncCallback, EventRegistrationFlags), (const, override));
+    MOCK_METHOD(void, onScreenReleased, (), (override));
+    MOCK_METHOD(void, onScreenAcquired, (), (override));
+    MOCK_METHOD(void, onHotplugReceived, (PhysicalDisplayId, bool), (override));
+    MOCK_METHOD(void, onModeChanged, (const scheduler::FrameRateMode&), (override));
+    MOCK_METHOD(void, onFrameRateOverridesChanged,
+                (PhysicalDisplayId, std::vector<FrameRateOverride>), (override));
+    MOCK_METHOD(void, dump, (std::string&), (const, override));
+    MOCK_METHOD(void, setDuration,
+                (std::chrono::nanoseconds workDuration, std::chrono::nanoseconds readyDuration),
+                (override));
+    MOCK_METHOD(status_t, registerDisplayEventConnection,
+                (const sp<android::EventThreadConnection>&), (override));
+    MOCK_METHOD(void, setVsyncRate, (uint32_t, const sp<android::EventThreadConnection>&),
+                (override));
+    MOCK_METHOD(void, requestNextVsync, (const sp<android::EventThreadConnection>&), (override));
     MOCK_METHOD(VsyncEventData, getLatestVsyncEventData,
-                (const sp<android::EventThreadConnection> &), (const));
-    MOCK_METHOD1(requestLatestConfig, void(const sp<android::EventThreadConnection> &));
-    MOCK_METHOD1(pauseVsyncCallback, void(bool));
-    MOCK_METHOD0(getEventThreadConnectionCount, size_t());
+                (const sp<android::EventThreadConnection>&), (const, override));
+    MOCK_METHOD(void, requestLatestConfig, (const sp<android::EventThreadConnection>&));
+    MOCK_METHOD(void, pauseVsyncCallback, (bool));
+    MOCK_METHOD(size_t, getEventThreadConnectionCount, (), (override));
+    MOCK_METHOD(void, onNewVsyncSchedule, (std::shared_ptr<scheduler::VsyncSchedule>), (override));
 };
 
 } // namespace android::mock
diff --git a/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h b/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
index 7d4b159..a8eca21 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockSchedulerCallback.h
@@ -18,19 +18,19 @@
 
 #include <gmock/gmock.h>
 
-#include "Scheduler/Scheduler.h"
+#include "Scheduler/ISchedulerCallback.h"
 
 namespace android::scheduler::mock {
 
 struct SchedulerCallback final : ISchedulerCallback {
-    MOCK_METHOD(void, setVsyncEnabled, (bool), (override));
+    MOCK_METHOD(void, setVsyncEnabled, (PhysicalDisplayId, bool), (override));
     MOCK_METHOD(void, requestDisplayModes, (std::vector<display::DisplayModeRequest>), (override));
     MOCK_METHOD(void, kernelTimerChanged, (bool), (override));
     MOCK_METHOD(void, triggerOnFrameRateOverridesChanged, (), (override));
 };
 
 struct NoOpSchedulerCallback final : ISchedulerCallback {
-    void setVsyncEnabled(bool) override {}
+    void setVsyncEnabled(PhysicalDisplayId, bool) override {}
     void requestDisplayModes(std::vector<display::DisplayModeRequest>) override {}
     void kernelTimerChanged(bool) override {}
     void triggerOnFrameRateOverridesChanged() override {}
diff --git a/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h b/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
index 4ef91da..69ec60a 100644
--- a/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
+++ b/services/surfaceflinger/tests/unittests/mock/MockVsyncController.h
@@ -28,12 +28,12 @@
     ~VsyncController() override;
 
     MOCK_METHOD(bool, addPresentFence, (std::shared_ptr<FenceTime>), (override));
-    MOCK_METHOD3(addHwVsyncTimestamp, bool(nsecs_t, std::optional<nsecs_t>, bool*));
-    MOCK_METHOD1(startPeriodTransition, void(nsecs_t));
-    MOCK_METHOD1(setIgnorePresentFences, void(bool));
+    MOCK_METHOD(bool, addHwVsyncTimestamp, (nsecs_t, std::optional<nsecs_t>, bool*), (override));
+    MOCK_METHOD(void, startPeriodTransition, (nsecs_t, bool), (override));
+    MOCK_METHOD(void, setIgnorePresentFences, (bool), (override));
     MOCK_METHOD(void, setDisplayPowerMode, (hal::PowerMode), (override));
 
-    MOCK_CONST_METHOD1(dump, void(std::string&));
+    MOCK_METHOD(void, dump, (std::string&), (const, override));
 };
 
 } // namespace android::mock