Merge "TouchMode (6.2/n) Fully detaching touch mode from focus event (native)"
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 4dd4f79..3b2a9e7 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,4 +1,5 @@
 [Builtin Hooks]
+bpfmt = true
 clang_format = true
 bpfmt = true
 
diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp
index 39ef0b5..8d23efc 100644
--- a/cmds/installd/InstalldNativeService.cpp
+++ b/cmds/installd/InstalldNativeService.cpp
@@ -1455,7 +1455,7 @@
 }
 
 binder::Status InstalldNativeService::freeCache(const std::optional<std::string>& uuid,
-        int64_t targetFreeBytes, int64_t cacheReservedBytes, int32_t flags) {
+        int64_t targetFreeBytes, int32_t flags) {
     ENFORCE_UID(AID_SYSTEM);
     CHECK_ARGUMENT_UUID(uuid);
     std::lock_guard<std::recursive_mutex> lock(mLock);
@@ -1558,12 +1558,6 @@
                 break;
             }
 
-            // Only keep clearing when we haven't pushed into reserved area
-            if (cacheReservedBytes > 0 && cleared >= (cacheTotal - cacheReservedBytes)) {
-                LOG(DEBUG) << "Refusing to clear cached data in reserved space";
-                break;
-            }
-
             // Find the best tracker to work with; this might involve swapping
             // if the active tracker is no longer the most over quota
             bool nextBetter = active && !queue.empty()
diff --git a/cmds/installd/InstalldNativeService.h b/cmds/installd/InstalldNativeService.h
index 8cfda01..3fdb01a 100644
--- a/cmds/installd/InstalldNativeService.h
+++ b/cmds/installd/InstalldNativeService.h
@@ -145,7 +145,7 @@
 
     binder::Status rmPackageDir(const std::string& packageDir);
     binder::Status freeCache(const std::optional<std::string>& uuid, int64_t targetFreeBytes,
-            int64_t cacheReservedBytes, int32_t flags);
+            int32_t flags);
     binder::Status linkNativeLibraryDirectory(const std::optional<std::string>& uuid,
             const std::string& packageName, const std::string& nativeLibPath32, int32_t userId);
     binder::Status createOatDir(const std::string& oatDir, const std::string& instructionSet);
diff --git a/cmds/installd/binder/android/os/IInstalld.aidl b/cmds/installd/binder/android/os/IInstalld.aidl
index 637a9f2..9c51ff7 100644
--- a/cmds/installd/binder/android/os/IInstalld.aidl
+++ b/cmds/installd/binder/android/os/IInstalld.aidl
@@ -86,8 +86,7 @@
     void destroyProfileSnapshot(@utf8InCpp String packageName, @utf8InCpp String profileName);
 
     void rmPackageDir(@utf8InCpp String packageDir);
-    void freeCache(@nullable @utf8InCpp String uuid, long targetFreeBytes,
-            long cacheReservedBytes, int flags);
+    void freeCache(@nullable @utf8InCpp String uuid, long targetFreeBytes, int flags);
     void linkNativeLibraryDirectory(@nullable @utf8InCpp String uuid,
             @utf8InCpp String packageName, @utf8InCpp String nativeLibPath32, int userId);
     void createOatDir(@utf8InCpp String oatDir, @utf8InCpp String instructionSet);
diff --git a/cmds/installd/tests/installd_cache_test.cpp b/cmds/installd/tests/installd_cache_test.cpp
index 863cdfe..9a1e17e 100644
--- a/cmds/installd/tests/installd_cache_test.cpp
+++ b/cmds/installd/tests/installd_cache_test.cpp
@@ -145,7 +145,7 @@
     EXPECT_EQ(0, exists("com.example/cache/foo/one"));
     EXPECT_EQ(0, exists("com.example/cache/foo/two"));
 
-    service->freeCache(testUuid, kTbInBytes, 0,
+    service->freeCache(testUuid, kTbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(0, exists("com.example/normal"));
@@ -153,6 +153,33 @@
     EXPECT_EQ(-1, exists("com.example/cache/foo/two"));
 }
 
+TEST_F(CacheTest, FreeCache_NonAggressive) {
+    LOG(INFO) << "FreeCache_NonAggressive";
+
+    mkdir("com.example");
+    touch("com.example/normal", 1 * kMbInBytes, 60);
+    mkdir("com.example/cache");
+    mkdir("com.example/cache/foo");
+    touch("com.example/cache/foo/one", 65 * kMbInBytes, 60);
+    touch("com.example/cache/foo/two", 2 * kMbInBytes, 120);
+
+    EXPECT_EQ(0, exists("com.example/normal"));
+    EXPECT_EQ(0, exists("com.example/cache/foo/one"));
+    EXPECT_EQ(0, exists("com.example/cache/foo/two"));
+
+    service->freeCache(testUuid, kTbInBytes, FLAG_FREE_CACHE_V2);
+
+    EXPECT_EQ(0, exists("com.example/normal"));
+    EXPECT_EQ(-1, exists("com.example/cache/foo/one"));
+    EXPECT_EQ(0, exists("com.example/cache/foo/two"));
+
+    service->freeCache(testUuid, kTbInBytes, FLAG_FREE_CACHE_V2);
+
+    EXPECT_EQ(0, exists("com.example/normal"));
+    EXPECT_EQ(-1, exists("com.example/cache/foo/one"));
+    EXPECT_EQ(0, exists("com.example/cache/foo/two"));
+}
+
 TEST_F(CacheTest, FreeCache_Age) {
     LOG(INFO) << "FreeCache_Age";
 
@@ -162,13 +189,13 @@
     touch("com.example/cache/foo/one", kMbInBytes, 60);
     touch("com.example/cache/foo/two", kMbInBytes, 120);
 
-    service->freeCache(testUuid, free() + kKbInBytes, 0,
+    service->freeCache(testUuid, free() + kKbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(-1, exists("com.example/cache/foo/one"));
     EXPECT_EQ(0, exists("com.example/cache/foo/two"));
 
-    service->freeCache(testUuid, free() + kKbInBytes, 0,
+    service->freeCache(testUuid, free() + kKbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(-1, exists("com.example/cache/foo/one"));
@@ -196,7 +223,7 @@
     EXPECT_EQ(2 * kMbInBytes, size("com.example/cache/bar/bar1"));
     EXPECT_EQ(2 * kMbInBytes, size("com.example/cache/bar/bar2"));
 
-    service->freeCache(testUuid, kTbInBytes, 0,
+    service->freeCache(testUuid, kTbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(-1, exists("com.example/cache/foo/foo1"));
@@ -218,7 +245,7 @@
 
     setxattr("com.example/cache/foo", "user.cache_group");
 
-    service->freeCache(testUuid, free() + kKbInBytes, 0,
+    service->freeCache(testUuid, free() + kKbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(-1, exists("com.example/cache/foo/foo1"));
@@ -263,7 +290,7 @@
     setxattr("com.example/cache/tomb", "user.cache_tombstone");
     setxattr("com.example/cache/tomb/group", "user.cache_group");
 
-    service->freeCache(testUuid, free() + kKbInBytes, 0,
+    service->freeCache(testUuid, free() + kKbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(kMbInBytes, size("com.example/cache/group/file1"));
@@ -284,7 +311,7 @@
     EXPECT_EQ(0, size("com.example/cache/tomb/group/dir/file1"));
     EXPECT_EQ(0, size("com.example/cache/tomb/group/dir/file2"));
 
-    service->freeCache(testUuid, free() + kKbInBytes, 0,
+    service->freeCache(testUuid, free() + kKbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(-1, size("com.example/cache/group/file1"));
@@ -305,7 +332,7 @@
     EXPECT_EQ(0, size("com.example/cache/tomb/group/dir/file1"));
     EXPECT_EQ(0, size("com.example/cache/tomb/group/dir/file2"));
 
-    service->freeCache(testUuid, kTbInBytes, 0,
+    service->freeCache(testUuid, kTbInBytes,
             FLAG_FREE_CACHE_V2 | FLAG_FREE_CACHE_V2_DEFY_QUOTA);
 
     EXPECT_EQ(-1, size("com.example/cache/group/file1"));
diff --git a/data/etc/Android.bp b/data/etc/Android.bp
index 5fe4ea1..931c5e3 100644
--- a/data/etc/Android.bp
+++ b/data/etc/Android.bp
@@ -89,6 +89,12 @@
 }
 
 prebuilt_etc {
+    name: "android.hardware.fingerprint.prebuilt.xml",
+    src: "android.hardware.fingerprint.xml",
+    defaults: ["frameworks_native_data_etc_defaults"],
+}
+
+prebuilt_etc {
     name: "android.hardware.location.gps.prebuilt.xml",
     src: "android.hardware.location.gps.xml",
     defaults: ["frameworks_native_data_etc_defaults"],
diff --git a/libs/battery/MultiStateCounter.h b/libs/battery/MultiStateCounter.h
index 03e1f2a..a2b59a8 100644
--- a/libs/battery/MultiStateCounter.h
+++ b/libs/battery/MultiStateCounter.h
@@ -195,10 +195,18 @@
                         << ", which is lower than the previous value " << valueToString(lastValue)
                         << "\n";
                     ALOGE("%s", str.str().c_str());
+
+                    for (int i = 0; i < stateCount; i++) {
+                        states[i].timeInStateSinceUpdate = 0;
+                    }
                 }
             } else if (timestamp < lastUpdateTimestamp) {
                 ALOGE("updateValue is called with an earlier timestamp: %lu, previous: %lu\n",
                       (unsigned long)timestamp, (unsigned long)lastUpdateTimestamp);
+
+                for (int i = 0; i < stateCount; i++) {
+                    states[i].timeInStateSinceUpdate = 0;
+                }
             }
         }
     }
diff --git a/libs/battery/MultiStateCounterTest.cpp b/libs/battery/MultiStateCounterTest.cpp
index 848fd10..876bf74 100644
--- a/libs/battery/MultiStateCounterTest.cpp
+++ b/libs/battery/MultiStateCounterTest.cpp
@@ -176,7 +176,7 @@
     testCounter.setState(0, 0);
     testCounter.updateValue(6.0, 2000);
 
-    // Time moves back. The negative delta from 2000 to 1000 is ignored
+    // Time moves back. The delta over the negative interval from 2000 to 1000 is ignored
     testCounter.updateValue(8.0, 1000);
     double delta = testCounter.updateValue(11.0, 3000);
 
@@ -189,6 +189,27 @@
     EXPECT_DOUBLE_EQ(3.0, delta);
 }
 
+TEST_F(MultiStateCounterTest, updateValue_nonmonotonic) {
+    DoubleMultiStateCounter testCounter(2, 0);
+    testCounter.updateValue(0, 0);
+    testCounter.setState(0, 0);
+    testCounter.updateValue(6.0, 2000);
+
+    // Value goes down. The negative delta from 6.0 to 4.0 is ignored
+    testCounter.updateValue(4.0, 3000);
+
+    // Value goes up again. The positive delta from 4.0 to 7.0 is accumulated.
+    double delta = testCounter.updateValue(7.0, 4000);
+
+    // The total accumulated count is:
+    //  6.0          // For the period 0-2000
+    //  +(7.0-4.0)   // For the period 3000-4000
+    EXPECT_DOUBLE_EQ(9.0, testCounter.getCount(0));
+
+    //  7.0-4.0
+    EXPECT_DOUBLE_EQ(3.0, delta);
+}
+
 TEST_F(MultiStateCounterTest, addValue) {
     DoubleMultiStateCounter testCounter(1, 0);
     testCounter.updateValue(0, 0);
diff --git a/libs/binder/include_rpc_unstable/binder_rpc_unstable.hpp b/libs/binder/include_rpc_unstable/binder_rpc_unstable.hpp
index 34f1cbf..5baa4d7 100644
--- a/libs/binder/include_rpc_unstable/binder_rpc_unstable.hpp
+++ b/libs/binder/include_rpc_unstable/binder_rpc_unstable.hpp
@@ -32,9 +32,11 @@
 bool RunRpcServerCallback(AIBinder* service, unsigned int port, void (*readyCallback)(void* param),
                           void* param);
 
-// Starts an RPC server on a given port and a given root IBinder object.
-// This function sets up the server, calls readyCallback with a given param, and
-// then joins before returning.
+// Starts an RPC server on a given port and a given root IBinder factory.
+// RunRpcServerWithFactory acts like RunRpcServerCallback, but instead of
+// assigning single root IBinder object to all connections, factory is called
+// whenever a client connects, making it possible to assign unique IBinder
+// object to each client.
 bool RunRpcServerWithFactory(AIBinder* (*factory)(unsigned int cid, void* context),
                           void* factoryContext, unsigned int port);
 
diff --git a/libs/binder/ndk/libbinder_ndk.map.txt b/libs/binder/ndk/libbinder_ndk.map.txt
index d63a8d0..197c0a1 100644
--- a/libs/binder/ndk/libbinder_ndk.map.txt
+++ b/libs/binder/ndk/libbinder_ndk.map.txt
@@ -119,11 +119,11 @@
     AIBinder_setRequestingSid; # apex
     AParcel_markSensitive; # systemapi llndk
     AServiceManager_forEachDeclaredInstance; # apex llndk
-    AServiceManager_forceLazyServicesPersist; # llndk
+    AServiceManager_forceLazyServicesPersist; # apex llndk
     AServiceManager_isDeclared; # apex llndk
     AServiceManager_isUpdatableViaApex; # apex
     AServiceManager_reRegister; # llndk
-    AServiceManager_registerLazyService; # llndk
+    AServiceManager_registerLazyService; # apex llndk
     AServiceManager_setActiveServicesCallback; # llndk
     AServiceManager_tryUnregister; # llndk
     AServiceManager_waitForService; # apex llndk
diff --git a/libs/binder/rust/Android.bp b/libs/binder/rust/Android.bp
index d323022..4561d6e 100644
--- a/libs/binder/rust/Android.bp
+++ b/libs/binder/rust/Android.bp
@@ -20,6 +20,7 @@
         "libdowncast_rs",
     ],
     host_supported: true,
+    vendor_available: true,
     target: {
         darwin: {
             enabled: false,
@@ -64,6 +65,7 @@
         "libbinder_ndk",
     ],
     host_supported: true,
+    vendor_available: true,
     target: {
         darwin: {
             enabled: false,
@@ -105,6 +107,7 @@
         "libbinder_ndk",
     ],
     host_supported: true,
+    vendor_available: true,
 
     // Currently necessary for host builds
     // TODO(b/31559095): bionic on host should define this
diff --git a/libs/binder/rust/src/binder.rs b/libs/binder/rust/src/binder.rs
index 4e048d7..d09ac83 100644
--- a/libs/binder/rust/src/binder.rs
+++ b/libs/binder/rust/src/binder.rs
@@ -17,7 +17,7 @@
 //! Trait definitions for binder objects
 
 use crate::error::{status_t, Result, StatusCode};
-use crate::parcel::{OwnedParcel, Parcel};
+use crate::parcel::{Parcel, BorrowedParcel};
 use crate::proxy::{DeathRecipient, SpIBinder, WpIBinder};
 use crate::sys;
 
@@ -129,7 +129,7 @@
     /// Handle and reply to a request to invoke a transaction on this object.
     ///
     /// `reply` may be [`None`] if the sender does not expect a reply.
-    fn on_transact(&self, code: TransactionCode, data: &Parcel, reply: &mut Parcel) -> Result<()>;
+    fn on_transact(&self, code: TransactionCode, data: &BorrowedParcel<'_>, reply: &mut BorrowedParcel<'_>) -> Result<()>;
 
     /// Handle a request to invoke the dump transaction on this
     /// object.
@@ -167,6 +167,7 @@
     fn ping_binder(&mut self) -> Result<()>;
 
     /// Indicate that the service intends to receive caller security contexts.
+    #[cfg(not(android_vndk))]
     fn set_requesting_sid(&mut self, enable: bool);
 
     /// Dump this object to the given file handle
@@ -177,25 +178,25 @@
     fn get_extension(&mut self) -> Result<Option<SpIBinder>>;
 
     /// Create a Parcel that can be used with `submit_transact`.
-    fn prepare_transact(&self) -> Result<OwnedParcel>;
+    fn prepare_transact(&self) -> Result<Parcel>;
 
     /// Perform a generic operation with the object.
     ///
-    /// The provided [`OwnedParcel`] must have been created by a call to
+    /// The provided [`Parcel`] must have been created by a call to
     /// `prepare_transact` on the same binder.
     ///
     /// # Arguments
     ///
     /// * `code` - Transaction code for the operation.
-    /// * `data` - [`OwnedParcel`] with input data.
+    /// * `data` - [`Parcel`] with input data.
     /// * `flags` - Transaction flags, e.g. marking the transaction as
     ///   asynchronous ([`FLAG_ONEWAY`](FLAG_ONEWAY)).
     fn submit_transact(
         &self,
         code: TransactionCode,
-        data: OwnedParcel,
+        data: Parcel,
         flags: TransactionFlags,
-    ) -> Result<OwnedParcel>;
+    ) -> Result<Parcel>;
 
     /// Perform a generic operation with the object. This is a convenience
     /// method that internally calls `prepare_transact` followed by
@@ -206,15 +207,15 @@
     /// * `flags` - Transaction flags, e.g. marking the transaction as
     ///   asynchronous ([`FLAG_ONEWAY`](FLAG_ONEWAY))
     /// * `input_callback` A callback for building the `Parcel`.
-    fn transact<F: FnOnce(&mut Parcel) -> Result<()>>(
+    fn transact<F: FnOnce(BorrowedParcel<'_>) -> Result<()>>(
         &self,
         code: TransactionCode,
         flags: TransactionFlags,
         input_callback: F,
     ) -> Result<Parcel> {
         let mut parcel = self.prepare_transact()?;
-        input_callback(&mut parcel.borrowed())?;
-        self.submit_transact(code, parcel, flags).map(OwnedParcel::into_parcel)
+        input_callback(parcel.borrowed())?;
+        self.submit_transact(code, parcel, flags)
     }
 }
 
@@ -475,8 +476,8 @@
 ///     fn on_transact(
 ///         &self,
 ///         code: TransactionCode,
-///         data: &Parcel,
-///         reply: &mut Parcel,
+///         data: &BorrowedParcel,
+///         reply: &mut BorrowedParcel,
 ///     ) -> Result<()> {
 ///         // ...
 ///     }
@@ -635,6 +636,7 @@
 pub struct BinderFeatures {
     /// Indicates that the service intends to receive caller security contexts. This must be true
     /// for `ThreadState::with_calling_sid` to work.
+    #[cfg(not(android_vndk))]
     pub set_requesting_sid: bool,
     // Ensure that clients include a ..BinderFeatures::default() to preserve backwards compatibility
     // when new fields are added. #[non_exhaustive] doesn't work because it prevents struct
@@ -655,13 +657,13 @@
 /// have the following type:
 ///
 /// ```
-/// # use binder::{Interface, TransactionCode, Parcel};
+/// # use binder::{Interface, TransactionCode, BorrowedParcel};
 /// # trait Placeholder {
 /// fn on_transact(
 ///     service: &dyn Interface,
 ///     code: TransactionCode,
-///     data: &Parcel,
-///     reply: &mut Parcel,
+///     data: &BorrowedParcel,
+///     reply: &mut BorrowedParcel,
 /// ) -> binder::Result<()>;
 /// # }
 /// ```
@@ -676,7 +678,7 @@
 /// using the provided function, `on_transact`.
 ///
 /// ```
-/// use binder::{declare_binder_interface, Binder, Interface, TransactionCode, Parcel};
+/// use binder::{declare_binder_interface, Binder, Interface, TransactionCode, BorrowedParcel};
 ///
 /// pub trait IServiceManager: Interface {
 ///     // remote methods...
@@ -692,8 +694,8 @@
 /// fn on_transact(
 ///     service: &dyn IServiceManager,
 ///     code: TransactionCode,
-///     data: &Parcel,
-///     reply: &mut Parcel,
+///     data: &BorrowedParcel,
+///     reply: &mut BorrowedParcel,
 /// ) -> binder::Result<()> {
 ///     // ...
 ///     Ok(())
@@ -837,6 +839,7 @@
             /// Create a new binder service.
             pub fn new_binder<T: $interface + Sync + Send + 'static>(inner: T, features: $crate::BinderFeatures) -> $crate::Strong<dyn $interface> {
                 let mut binder = $crate::Binder::new_with_stability($native(Box::new(inner)), $stability);
+                #[cfg(not(android_vndk))]
                 $crate::IBinderInternal::set_requesting_sid(&mut binder, features.set_requesting_sid);
                 $crate::Strong::new(Box::new(binder))
             }
@@ -847,7 +850,7 @@
                 $descriptor
             }
 
-            fn on_transact(&self, code: $crate::TransactionCode, data: &$crate::Parcel, reply: &mut $crate::Parcel) -> $crate::Result<()> {
+            fn on_transact(&self, code: $crate::TransactionCode, data: &$crate::BorrowedParcel<'_>, reply: &mut $crate::BorrowedParcel<'_>) -> $crate::Result<()> {
                 match $on_transact(&*self.0, code, data, reply) {
                     // The C++ backend converts UNEXPECTED_NULL into an exception
                     Err($crate::StatusCode::UNEXPECTED_NULL) => {
@@ -922,14 +925,14 @@
         where
             dyn $interface: $crate::Interface
         {
-            fn serialize(&self, parcel: &mut $crate::parcel::Parcel) -> $crate::Result<()> {
+            fn serialize(&self, parcel: &mut $crate::parcel::BorrowedParcel<'_>) -> $crate::Result<()> {
                 let binder = $crate::Interface::as_binder(self);
                 parcel.write(&binder)
             }
         }
 
         impl $crate::parcel::SerializeOption for dyn $interface + '_ {
-            fn serialize_option(this: Option<&Self>, parcel: &mut $crate::parcel::Parcel) -> $crate::Result<()> {
+            fn serialize_option(this: Option<&Self>, parcel: &mut $crate::parcel::BorrowedParcel<'_>) -> $crate::Result<()> {
                 parcel.write(&this.map($crate::Interface::as_binder))
             }
         }
@@ -988,14 +991,14 @@
         }
 
         impl<P: $crate::BinderAsyncPool> $crate::parcel::Serialize for dyn $async_interface<P> + '_ {
-            fn serialize(&self, parcel: &mut $crate::parcel::Parcel) -> $crate::Result<()> {
+            fn serialize(&self, parcel: &mut $crate::parcel::BorrowedParcel<'_>) -> $crate::Result<()> {
                 let binder = $crate::Interface::as_binder(self);
                 parcel.write(&binder)
             }
         }
 
         impl<P: $crate::BinderAsyncPool> $crate::parcel::SerializeOption for dyn $async_interface<P> + '_ {
-            fn serialize_option(this: Option<&Self>, parcel: &mut $crate::parcel::Parcel) -> $crate::Result<()> {
+            fn serialize_option(this: Option<&Self>, parcel: &mut $crate::parcel::BorrowedParcel<'_>) -> $crate::Result<()> {
                 parcel.write(&this.map($crate::Interface::as_binder))
             }
         }
@@ -1040,26 +1043,26 @@
         }
 
         impl $crate::parcel::Serialize for $enum {
-            fn serialize(&self, parcel: &mut $crate::parcel::Parcel) -> $crate::Result<()> {
+            fn serialize(&self, parcel: &mut $crate::parcel::BorrowedParcel<'_>) -> $crate::Result<()> {
                 parcel.write(&self.0)
             }
         }
 
         impl $crate::parcel::SerializeArray for $enum {
-            fn serialize_array(slice: &[Self], parcel: &mut $crate::parcel::Parcel) -> $crate::Result<()> {
+            fn serialize_array(slice: &[Self], parcel: &mut $crate::parcel::BorrowedParcel<'_>) -> $crate::Result<()> {
                 let v: Vec<$backing> = slice.iter().map(|x| x.0).collect();
                 <$backing as binder::parcel::SerializeArray>::serialize_array(&v[..], parcel)
             }
         }
 
         impl $crate::parcel::Deserialize for $enum {
-            fn deserialize(parcel: &$crate::parcel::Parcel) -> $crate::Result<Self> {
+            fn deserialize(parcel: &$crate::parcel::BorrowedParcel<'_>) -> $crate::Result<Self> {
                 parcel.read().map(Self)
             }
         }
 
         impl $crate::parcel::DeserializeArray for $enum {
-            fn deserialize_array(parcel: &$crate::parcel::Parcel) -> $crate::Result<Option<Vec<Self>>> {
+            fn deserialize_array(parcel: &$crate::parcel::BorrowedParcel<'_>) -> $crate::Result<Option<Vec<Self>>> {
                 let v: Option<Vec<$backing>> =
                     <$backing as binder::parcel::DeserializeArray>::deserialize_array(parcel)?;
                 Ok(v.map(|v| v.into_iter().map(Self).collect()))
diff --git a/libs/binder/rust/src/lib.rs b/libs/binder/rust/src/lib.rs
index 2ac2d2f..cce55c0 100644
--- a/libs/binder/rust/src/lib.rs
+++ b/libs/binder/rust/src/lib.rs
@@ -50,8 +50,8 @@
 //! fn on_transact(
 //!     service: &dyn ITest,
 //!     code: TransactionCode,
-//!     _data: &Parcel,
-//!     reply: &mut Parcel,
+//!     _data: &BorrowedParcel,
+//!     reply: &mut BorrowedParcel,
 //! ) -> binder::Result<()> {
 //!     match code {
 //!         SpIBinder::FIRST_CALL_TRANSACTION => {
@@ -115,7 +115,7 @@
 pub use crate::binder_async::{BoxFuture, BinderAsyncPool};
 pub use error::{status_t, ExceptionCode, Result, Status, StatusCode};
 pub use native::{add_service, force_lazy_services_persist, register_lazy_service, Binder};
-pub use parcel::{OwnedParcel, Parcel};
+pub use parcel::{BorrowedParcel, Parcel};
 pub use proxy::{get_interface, get_service, wait_for_interface, wait_for_service};
 pub use proxy::{AssociateClass, DeathRecipient, Proxy, SpIBinder, WpIBinder};
 pub use state::{ProcessState, ThreadState};
diff --git a/libs/binder/rust/src/native.rs b/libs/binder/rust/src/native.rs
index a91092e..e183ea3 100644
--- a/libs/binder/rust/src/native.rs
+++ b/libs/binder/rust/src/native.rs
@@ -18,7 +18,7 @@
     AsNative, Interface, InterfaceClassMethods, Remotable, Stability, TransactionCode,
 };
 use crate::error::{status_result, status_t, Result, StatusCode};
-use crate::parcel::{Parcel, Serialize};
+use crate::parcel::{BorrowedParcel, Serialize};
 use crate::proxy::SpIBinder;
 use crate::sys;
 
@@ -161,8 +161,8 @@
     ///        # fn on_transact(
     ///        #     service: &dyn IBar,
     ///        #     code: TransactionCode,
-    ///        #     data: &Parcel,
-    ///        #     reply: &mut Parcel,
+    ///        #     data: &BorrowedParcel,
+    ///        #     reply: &mut BorrowedParcel,
     ///        # ) -> binder::Result<()> {
     ///        #     Ok(())
     ///        # }
@@ -212,7 +212,7 @@
 
     /// Mark this binder object with local stability, which is vendor if we are
     /// building for the VNDK and system otherwise.
-    #[cfg(vendor_ndk)]
+    #[cfg(any(vendor_ndk, android_vndk))]
     fn mark_local_stability(&mut self) {
         unsafe {
             // Safety: Self always contains a valid `AIBinder` pointer, so
@@ -223,7 +223,7 @@
 
     /// Mark this binder object with local stability, which is vendor if we are
     /// building for the VNDK and system otherwise.
-    #[cfg(not(vendor_ndk))]
+    #[cfg(not(any(vendor_ndk, android_vndk)))]
     fn mark_local_stability(&mut self) {
         unsafe {
             // Safety: Self always contains a valid `AIBinder` pointer, so
@@ -277,8 +277,8 @@
         reply: *mut sys::AParcel,
     ) -> status_t {
         let res = {
-            let mut reply = Parcel::borrowed(reply).unwrap();
-            let data = Parcel::borrowed(data as *mut sys::AParcel).unwrap();
+            let mut reply = BorrowedParcel::from_raw(reply).unwrap();
+            let data = BorrowedParcel::from_raw(data as *mut sys::AParcel).unwrap();
             let object = sys::AIBinder_getUserData(binder);
             let binder: &T = &*(object as *const T);
             binder.on_transact(code, &data, &mut reply)
@@ -384,7 +384,7 @@
 }
 
 impl<B: Remotable> Serialize for Binder<B> {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         parcel.write_binder(Some(&self.as_binder()))
     }
 }
@@ -503,8 +503,8 @@
     fn on_transact(
         &self,
         _code: TransactionCode,
-        _data: &Parcel,
-        _reply: &mut Parcel,
+        _data: &BorrowedParcel<'_>,
+        _reply: &mut BorrowedParcel<'_>,
     ) -> Result<()> {
         Ok(())
     }
diff --git a/libs/binder/rust/src/parcel.rs b/libs/binder/rust/src/parcel.rs
index a0e1478..206b90c 100644
--- a/libs/binder/rust/src/parcel.rs
+++ b/libs/binder/rust/src/parcel.rs
@@ -21,11 +21,10 @@
 use crate::proxy::SpIBinder;
 use crate::sys;
 
-use std::cell::RefCell;
 use std::convert::TryInto;
 use std::marker::PhantomData;
 use std::mem::ManuallyDrop;
-use std::ptr;
+use std::ptr::{self, NonNull};
 use std::fmt;
 
 mod file_descriptor;
@@ -46,53 +45,41 @@
 /// other side of the IPC, and references to live Binder objects that will
 /// result in the other side receiving a proxy Binder connected with the
 /// original Binder in the Parcel.
-pub enum Parcel {
-    /// Owned parcel pointer
-    Owned(*mut sys::AParcel),
-    /// Borrowed parcel pointer (will not be destroyed on drop)
-    Borrowed(*mut sys::AParcel),
-}
-
-/// A variant of Parcel that is known to be owned.
-pub struct OwnedParcel {
-    ptr: *mut sys::AParcel,
+///
+/// This type represents a parcel that is owned by Rust code.
+#[repr(transparent)]
+pub struct Parcel {
+    ptr: NonNull<sys::AParcel>,
 }
 
 /// # Safety
 ///
 /// This type guarantees that it owns the AParcel and that all access to
-/// the AParcel happens through the OwnedParcel, so it is ok to send across
+/// the AParcel happens through the Parcel, so it is ok to send across
 /// threads.
-unsafe impl Send for OwnedParcel {}
+unsafe impl Send for Parcel {}
 
-/// A variant of Parcel that is known to be borrowed.
+/// Container for a message (data and object references) that can be sent
+/// through Binder.
+///
+/// This object is a borrowed variant of [`Parcel`]. It is a separate type from
+/// `&mut Parcel` because it is not valid to `mem::swap` two parcels.
+#[repr(transparent)]
 pub struct BorrowedParcel<'a> {
-    inner: Parcel,
+    ptr: NonNull<sys::AParcel>,
     _lifetime: PhantomData<&'a mut Parcel>,
 }
 
-impl OwnedParcel {
-    /// Create a new empty `OwnedParcel`.
-    pub fn new() -> OwnedParcel {
+impl Parcel {
+    /// Create a new empty `Parcel`.
+    pub fn new() -> Parcel {
         let ptr = unsafe {
             // Safety: If `AParcel_create` succeeds, it always returns
             // a valid pointer. If it fails, the process will crash.
             sys::AParcel_create()
         };
-        assert!(!ptr.is_null());
-        Self { ptr }
-    }
-
-    /// Convert the provided parcel to an owned parcel, or return `None` if it
-    /// is borrowed.
-    pub fn try_from(parcel: Parcel) -> Option<OwnedParcel> {
-        match &parcel {
-            Parcel::Owned(ptr) => {
-                let ptr = *ptr;
-                std::mem::forget(parcel);
-                Some(OwnedParcel { ptr })
-            }
-            Parcel::Borrowed(_) => None,
+        Self {
+            ptr: NonNull::new(ptr).expect("AParcel_create returned null pointer")
         }
     }
 
@@ -107,108 +94,43 @@
     ///
     /// Additionally, the caller must guarantee that it is valid to take
     /// ownership of the AParcel object. All future access to the AParcel
-    /// must happen through this `OwnedParcel`.
+    /// must happen through this `Parcel`.
     ///
-    /// Because `OwnedParcel` implements `Send`, the pointer must never point
-    /// to any thread-local data, e.g., a variable on the stack, either directly
-    /// or indirectly.
-    pub unsafe fn from_raw(ptr: *mut sys::AParcel) -> Option<OwnedParcel> {
-        ptr.as_mut().map(|ptr| Self { ptr })
+    /// Because `Parcel` implements `Send`, the pointer must never point to any
+    /// thread-local data, e.g., a variable on the stack, either directly or
+    /// indirectly.
+    pub unsafe fn from_raw(ptr: *mut sys::AParcel) -> Option<Parcel> {
+        NonNull::new(ptr).map(|ptr| Self { ptr })
     }
 
     /// Consume the parcel, transferring ownership to the caller.
     pub(crate) fn into_raw(self) -> *mut sys::AParcel {
-        let ptr = self.ptr;
+        let ptr = self.ptr.as_ptr();
         let _ = ManuallyDrop::new(self);
         ptr
     }
 
-    /// Convert this `OwnedParcel` into an owned `Parcel`.
-    pub fn into_parcel(self) -> Parcel {
-        Parcel::Owned(self.into_raw())
-    }
-
     /// Get a borrowed view into the contents of this `Parcel`.
     pub fn borrowed(&mut self) -> BorrowedParcel<'_> {
+        // Safety: The raw pointer is a valid pointer to an AParcel, and the
+        // lifetime of the returned `BorrowedParcel` is tied to `self`, so the
+        // borrow checker will ensure that the `AParcel` can only be accessed
+        // via the `BorrowParcel` until it goes out of scope.
         BorrowedParcel {
-            inner: Parcel::Borrowed(self.ptr),
+            ptr: self.ptr,
             _lifetime: PhantomData,
         }
     }
-}
 
-impl Default for OwnedParcel {
-    fn default() -> Self {
-        Self::new()
-    }
-}
-
-impl Clone for OwnedParcel {
-    fn clone(&self) -> Self {
-        let mut new_parcel = Self::new();
-        new_parcel
-            .borrowed()
-            .append_all_from(&Parcel::Borrowed(self.ptr))
-            .expect("Failed to append from Parcel");
-        new_parcel
-    }
-}
-
-impl<'a> std::ops::Deref for BorrowedParcel<'a> {
-    type Target = Parcel;
-    fn deref(&self) -> &Parcel {
-        &self.inner
-    }
-}
-impl<'a> std::ops::DerefMut for BorrowedParcel<'a> {
-    fn deref_mut(&mut self) -> &mut Parcel {
-        &mut self.inner
-    }
-}
-
-/// # Safety
-///
-/// The `Parcel` constructors guarantee that a `Parcel` object will always
-/// contain a valid pointer to an `AParcel`.
-unsafe impl AsNative<sys::AParcel> for Parcel {
-    fn as_native(&self) -> *const sys::AParcel {
-        match *self {
-            Self::Owned(x) | Self::Borrowed(x) => x,
+    /// Get an immutable borrowed view into the contents of this `Parcel`.
+    pub fn borrowed_ref(&self) -> &BorrowedParcel<'_> {
+        // Safety: Parcel and BorrowedParcel are both represented in the same
+        // way as a NonNull<sys::AParcel> due to their use of repr(transparent),
+        // so casting references as done here is valid.
+        unsafe {
+            &*(self as *const Parcel as *const BorrowedParcel<'_>)
         }
     }
-
-    fn as_native_mut(&mut self) -> *mut sys::AParcel {
-        match *self {
-            Self::Owned(x) | Self::Borrowed(x) => x,
-        }
-    }
-}
-
-impl Parcel {
-    /// Create a new empty `Parcel`.
-    ///
-    /// Creates a new owned empty parcel that can be written to
-    /// using the serialization methods and appended to and
-    /// from using `append_from` and `append_from_all`.
-    pub fn new() -> Parcel {
-        let parcel = unsafe {
-            // Safety: If `AParcel_create` succeeds, it always returns
-            // a valid pointer. If it fails, the process will crash.
-            sys::AParcel_create()
-        };
-        assert!(!parcel.is_null());
-        Self::Owned(parcel)
-    }
-
-    /// Create a borrowed reference to a parcel object from a raw pointer.
-    ///
-    /// # Safety
-    ///
-    /// This constructor is safe if the raw pointer parameter is either null
-    /// (resulting in `None`), or a valid pointer to an `AParcel` object.
-    pub(crate) unsafe fn borrowed(ptr: *mut sys::AParcel) -> Option<Parcel> {
-        ptr.as_mut().map(|ptr| Self::Borrowed(ptr))
-    }
 }
 
 impl Default for Parcel {
@@ -221,14 +143,77 @@
     fn clone(&self) -> Self {
         let mut new_parcel = Self::new();
         new_parcel
-            .append_all_from(self)
+            .borrowed()
+            .append_all_from(self.borrowed_ref())
             .expect("Failed to append from Parcel");
         new_parcel
     }
 }
 
+impl<'a> BorrowedParcel<'a> {
+    /// Create a borrowed reference to a parcel object from a raw pointer.
+    ///
+    /// # Safety
+    ///
+    /// This constructor is safe if the raw pointer parameter is either null
+    /// (resulting in `None`), or a valid pointer to an `AParcel` object.
+    ///
+    /// Since the raw pointer is not restricted by any lifetime, the lifetime on
+    /// the returned `BorrowedParcel` object can be chosen arbitrarily by the
+    /// caller. The caller must ensure it is valid to mutably borrow the AParcel
+    /// for the duration of the lifetime that the caller chooses. Note that
+    /// since this is a mutable borrow, it must have exclusive access to the
+    /// AParcel for the duration of the borrow.
+    pub unsafe fn from_raw(ptr: *mut sys::AParcel) -> Option<BorrowedParcel<'a>> {
+        Some(Self {
+            ptr: NonNull::new(ptr)?,
+            _lifetime: PhantomData,
+        })
+    }
+
+    /// Get a sub-reference to this reference to the parcel.
+    pub fn reborrow(&mut self) -> BorrowedParcel<'_> {
+        // Safety: The raw pointer is a valid pointer to an AParcel, and the
+        // lifetime of the returned `BorrowedParcel` is tied to `self`, so the
+        // borrow checker will ensure that the `AParcel` can only be accessed
+        // via the `BorrowParcel` until it goes out of scope.
+        BorrowedParcel {
+            ptr: self.ptr,
+            _lifetime: PhantomData,
+        }
+    }
+}
+
+/// # Safety
+///
+/// The `Parcel` constructors guarantee that a `Parcel` object will always
+/// contain a valid pointer to an `AParcel`.
+unsafe impl AsNative<sys::AParcel> for Parcel {
+    fn as_native(&self) -> *const sys::AParcel {
+        self.ptr.as_ptr()
+    }
+
+    fn as_native_mut(&mut self) -> *mut sys::AParcel {
+        self.ptr.as_ptr()
+    }
+}
+
+/// # Safety
+///
+/// The `BorrowedParcel` constructors guarantee that a `BorrowedParcel` object
+/// will always contain a valid pointer to an `AParcel`.
+unsafe impl<'a> AsNative<sys::AParcel> for BorrowedParcel<'a> {
+    fn as_native(&self) -> *const sys::AParcel {
+        self.ptr.as_ptr()
+    }
+
+    fn as_native_mut(&mut self) -> *mut sys::AParcel {
+        self.ptr.as_ptr()
+    }
+}
+
 // Data serialization methods
-impl Parcel {
+impl<'a> BorrowedParcel<'a> {
     /// Data written to parcelable is zero'd before being deleted or reallocated.
     pub fn mark_sensitive(&mut self) {
         unsafe {
@@ -237,12 +222,12 @@
         }
     }
 
-    /// Write a type that implements [`Serialize`] to the `Parcel`.
+    /// Write a type that implements [`Serialize`] to the parcel.
     pub fn write<S: Serialize + ?Sized>(&mut self, parcelable: &S) -> Result<()> {
         parcelable.serialize(self)
     }
 
-    /// Writes the length of a slice to the `Parcel`.
+    /// Writes the length of a slice to the parcel.
     ///
     /// This is used in AIDL-generated client side code to indicate the
     /// allocated space for an output array parameter.
@@ -255,7 +240,7 @@
         }
     }
 
-    /// Perform a series of writes to the `Parcel`, prepended with the length
+    /// Perform a series of writes to the parcel, prepended with the length
     /// (in bytes) of the written data.
     ///
     /// The length `0i32` will be written to the parcel first, followed by the
@@ -269,7 +254,7 @@
     ///
     /// ```
     /// # use binder::{Binder, Interface, Parcel};
-    /// # let mut parcel = Parcel::Owned(std::ptr::null_mut());
+    /// # let mut parcel = Parcel::new();
     /// parcel.sized_write(|subparcel| {
     ///     subparcel.write(&1u32)?;
     ///     subparcel.write(&2u32)?;
@@ -283,14 +268,14 @@
     /// [16i32, 1u32, 2u32, 3u32]
     /// ```
     pub fn sized_write<F>(&mut self, f: F) -> Result<()>
-    where for<'a>
-        F: Fn(&'a WritableSubParcel<'a>) -> Result<()>
+    where
+        for<'b> F: FnOnce(&'b mut WritableSubParcel<'b>) -> Result<()>
     {
         let start = self.get_data_position();
         self.write(&0i32)?;
         {
-            let subparcel = WritableSubParcel(RefCell::new(self));
-            f(&subparcel)?;
+            let mut subparcel = WritableSubParcel(self.reborrow());
+            f(&mut subparcel)?;
         }
         let end = self.get_data_position();
         unsafe {
@@ -307,8 +292,8 @@
     /// Returns the current position in the parcel data.
     pub fn get_data_position(&self) -> i32 {
         unsafe {
-            // Safety: `Parcel` always contains a valid pointer to an `AParcel`,
-            // and this call is otherwise safe.
+            // Safety: `BorrowedParcel` always contains a valid pointer to an
+            // `AParcel`, and this call is otherwise safe.
             sys::AParcel_getDataPosition(self.as_native())
         }
     }
@@ -316,8 +301,8 @@
     /// Returns the total size of the parcel.
     pub fn get_data_size(&self) -> i32 {
         unsafe {
-            // Safety: `Parcel` always contains a valid pointer to an `AParcel`,
-            // and this call is otherwise safe.
+            // Safety: `BorrowedParcel` always contains a valid pointer to an
+            // `AParcel`, and this call is otherwise safe.
             sys::AParcel_getDataSize(self.as_native())
         }
     }
@@ -335,11 +320,11 @@
         status_result(sys::AParcel_setDataPosition(self.as_native(), pos))
     }
 
-    /// Append a subset of another `Parcel`.
+    /// Append a subset of another parcel.
     ///
     /// This appends `size` bytes of data from `other` starting at offset
-    /// `start` to the current `Parcel`, or returns an error if not possible.
-    pub fn append_from(&mut self, other: &Self, start: i32, size: i32) -> Result<()> {
+    /// `start` to the current parcel, or returns an error if not possible.
+    pub fn append_from(&mut self, other: &impl AsNative<sys::AParcel>, start: i32, size: i32) -> Result<()> {
         let status = unsafe {
             // Safety: `Parcel::appendFrom` from C++ checks that `start`
             // and `size` are in bounds, and returns an error otherwise.
@@ -354,33 +339,125 @@
         status_result(status)
     }
 
-    /// Append the contents of another `Parcel`.
-    pub fn append_all_from(&mut self, other: &Self) -> Result<()> {
-        self.append_from(other, 0, other.get_data_size())
+    /// Append the contents of another parcel.
+    pub fn append_all_from(&mut self, other: &impl AsNative<sys::AParcel>) -> Result<()> {
+        // Safety: `BorrowedParcel` always contains a valid pointer to an
+        // `AParcel`, and this call is otherwise safe.
+        let size = unsafe { sys::AParcel_getDataSize(other.as_native()) };
+        self.append_from(other, 0, size)
     }
 }
 
-/// A segment of a writable parcel, used for [`Parcel::sized_write`].
-pub struct WritableSubParcel<'a>(RefCell<&'a mut Parcel>);
+/// A segment of a writable parcel, used for [`BorrowedParcel::sized_write`].
+pub struct WritableSubParcel<'a>(BorrowedParcel<'a>);
 
 impl<'a> WritableSubParcel<'a> {
     /// Write a type that implements [`Serialize`] to the sub-parcel.
-    pub fn write<S: Serialize + ?Sized>(&self, parcelable: &S) -> Result<()> {
-        parcelable.serialize(&mut *self.0.borrow_mut())
+    pub fn write<S: Serialize + ?Sized>(&mut self, parcelable: &S) -> Result<()> {
+        parcelable.serialize(&mut self.0)
+    }
+}
+
+impl Parcel {
+    /// Data written to parcelable is zero'd before being deleted or reallocated.
+    pub fn mark_sensitive(&mut self) {
+        self.borrowed().mark_sensitive()
+    }
+
+    /// Write a type that implements [`Serialize`] to the parcel.
+    pub fn write<S: Serialize + ?Sized>(&mut self, parcelable: &S) -> Result<()> {
+        self.borrowed().write(parcelable)
+    }
+
+    /// Writes the length of a slice to the parcel.
+    ///
+    /// This is used in AIDL-generated client side code to indicate the
+    /// allocated space for an output array parameter.
+    pub fn write_slice_size<T>(&mut self, slice: Option<&[T]>) -> Result<()> {
+        self.borrowed().write_slice_size(slice)
+    }
+
+    /// Perform a series of writes to the parcel, prepended with the length
+    /// (in bytes) of the written data.
+    ///
+    /// The length `0i32` will be written to the parcel first, followed by the
+    /// writes performed by the callback. The initial length will then be
+    /// updated to the length of all data written by the callback, plus the
+    /// size of the length elemement itself (4 bytes).
+    ///
+    /// # Examples
+    ///
+    /// After the following call:
+    ///
+    /// ```
+    /// # use binder::{Binder, Interface, Parcel};
+    /// # let mut parcel = Parcel::new();
+    /// parcel.sized_write(|subparcel| {
+    ///     subparcel.write(&1u32)?;
+    ///     subparcel.write(&2u32)?;
+    ///     subparcel.write(&3u32)
+    /// });
+    /// ```
+    ///
+    /// `parcel` will contain the following:
+    ///
+    /// ```ignore
+    /// [16i32, 1u32, 2u32, 3u32]
+    /// ```
+    pub fn sized_write<F>(&mut self, f: F) -> Result<()>
+    where
+        for<'b> F: FnOnce(&'b mut WritableSubParcel<'b>) -> Result<()>
+    {
+        self.borrowed().sized_write(f)
+    }
+
+    /// Returns the current position in the parcel data.
+    pub fn get_data_position(&self) -> i32 {
+        self.borrowed_ref().get_data_position()
+    }
+
+    /// Returns the total size of the parcel.
+    pub fn get_data_size(&self) -> i32 {
+        self.borrowed_ref().get_data_size()
+    }
+
+    /// Move the current read/write position in the parcel.
+    ///
+    /// # Safety
+    ///
+    /// This method is safe if `pos` is less than the current size of the parcel
+    /// data buffer. Otherwise, we are relying on correct bounds checking in the
+    /// Parcel C++ code on every subsequent read or write to this parcel. If all
+    /// accesses are bounds checked, this call is still safe, but we can't rely
+    /// on that.
+    pub unsafe fn set_data_position(&self, pos: i32) -> Result<()> {
+        self.borrowed_ref().set_data_position(pos)
+    }
+
+    /// Append a subset of another parcel.
+    ///
+    /// This appends `size` bytes of data from `other` starting at offset
+    /// `start` to the current parcel, or returns an error if not possible.
+    pub fn append_from(&mut self, other: &impl AsNative<sys::AParcel>, start: i32, size: i32) -> Result<()> {
+        self.borrowed().append_from(other, start, size)
+    }
+
+    /// Append the contents of another parcel.
+    pub fn append_all_from(&mut self, other: &impl AsNative<sys::AParcel>) -> Result<()> {
+        self.borrowed().append_all_from(other)
     }
 }
 
 // Data deserialization methods
-impl Parcel {
-    /// Attempt to read a type that implements [`Deserialize`] from this
-    /// `Parcel`.
+impl<'a> BorrowedParcel<'a> {
+    /// Attempt to read a type that implements [`Deserialize`] from this parcel.
     pub fn read<D: Deserialize>(&self) -> Result<D> {
         D::deserialize(self)
     }
 
-    /// Attempt to read a type that implements [`Deserialize`] from this
-    /// `Parcel` onto an existing value. This operation will overwrite the old
-    /// value partially or completely, depending on how much data is available.
+    /// Attempt to read a type that implements [`Deserialize`] from this parcel
+    /// onto an existing value. This operation will overwrite the old value
+    /// partially or completely, depending on how much data is available.
     pub fn read_onto<D: Deserialize>(&self, x: &mut D) -> Result<()> {
         x.deserialize_from(self)
     }
@@ -413,9 +490,9 @@
     /// });
     /// ```
     ///
-    pub fn sized_read<F>(&self, mut f: F) -> Result<()>
+    pub fn sized_read<F>(&self, f: F) -> Result<()>
     where
-        for<'a> F: FnMut(ReadableSubParcel<'a>) -> Result<()>
+        for<'b> F: FnOnce(ReadableSubParcel<'b>) -> Result<()>
     {
         let start = self.get_data_position();
         let parcelable_size: i32 = self.read()?;
@@ -430,7 +507,10 @@
         }
 
         let subparcel = ReadableSubParcel {
-            parcel: self,
+            parcel: BorrowedParcel {
+                ptr: self.ptr,
+                _lifetime: PhantomData,
+            },
             end_position: end,
         };
         f(subparcel)?;
@@ -444,8 +524,8 @@
         Ok(())
     }
 
-    /// Read a vector size from the `Parcel` and resize the given output vector
-    /// to be correctly sized for that amount of data.
+    /// Read a vector size from the parcel and resize the given output vector to
+    /// be correctly sized for that amount of data.
     ///
     /// This method is used in AIDL-generated server side code for methods that
     /// take a mutable slice reference parameter.
@@ -463,7 +543,7 @@
         Ok(())
     }
 
-    /// Read a vector size from the `Parcel` and either create a correctly sized
+    /// Read a vector size from the parcel and either create a correctly sized
     /// vector for that amount of data or set the output parameter to None if
     /// the vector should be null.
     ///
@@ -491,7 +571,7 @@
 
 /// A segment of a readable parcel, used for [`Parcel::sized_read`].
 pub struct ReadableSubParcel<'a> {
-    parcel: &'a Parcel,
+    parcel: BorrowedParcel<'a>,
     end_position: i32,
 }
 
@@ -501,7 +581,7 @@
         // The caller should have checked this,
         // but it can't hurt to double-check
         assert!(self.has_more_data());
-        D::deserialize(self.parcel)
+        D::deserialize(&self.parcel)
     }
 
     /// Check if the sub-parcel has more data to read
@@ -510,11 +590,82 @@
     }
 }
 
-// Internal APIs
 impl Parcel {
+    /// Attempt to read a type that implements [`Deserialize`] from this parcel.
+    pub fn read<D: Deserialize>(&self) -> Result<D> {
+        self.borrowed_ref().read()
+    }
+
+    /// Attempt to read a type that implements [`Deserialize`] from this parcel
+    /// onto an existing value. This operation will overwrite the old value
+    /// partially or completely, depending on how much data is available.
+    pub fn read_onto<D: Deserialize>(&self, x: &mut D) -> Result<()> {
+        self.borrowed_ref().read_onto(x)
+    }
+
+    /// Safely read a sized parcelable.
+    ///
+    /// Read the size of a parcelable, compute the end position
+    /// of that parcelable, then build a sized readable sub-parcel
+    /// and call a closure with the sub-parcel as its parameter.
+    /// The closure can keep reading data from the sub-parcel
+    /// until it runs out of input data. The closure is responsible
+    /// for calling [`ReadableSubParcel::has_more_data`] to check for
+    /// more data before every read, at least until Rust generators
+    /// are stabilized.
+    /// After the closure returns, skip to the end of the current
+    /// parcelable regardless of how much the closure has read.
+    ///
+    /// # Examples
+    ///
+    /// ```no_run
+    /// let mut parcelable = Default::default();
+    /// parcel.sized_read(|subparcel| {
+    ///     if subparcel.has_more_data() {
+    ///         parcelable.a = subparcel.read()?;
+    ///     }
+    ///     if subparcel.has_more_data() {
+    ///         parcelable.b = subparcel.read()?;
+    ///     }
+    ///     Ok(())
+    /// });
+    /// ```
+    ///
+    pub fn sized_read<F>(&self, f: F) -> Result<()>
+    where
+        for<'b> F: FnOnce(ReadableSubParcel<'b>) -> Result<()>
+    {
+        self.borrowed_ref().sized_read(f)
+    }
+
+    /// Read a vector size from the parcel and resize the given output vector to
+    /// be correctly sized for that amount of data.
+    ///
+    /// This method is used in AIDL-generated server side code for methods that
+    /// take a mutable slice reference parameter.
+    pub fn resize_out_vec<D: Default + Deserialize>(&self, out_vec: &mut Vec<D>) -> Result<()> {
+        self.borrowed_ref().resize_out_vec(out_vec)
+    }
+
+    /// Read a vector size from the parcel and either create a correctly sized
+    /// vector for that amount of data or set the output parameter to None if
+    /// the vector should be null.
+    ///
+    /// This method is used in AIDL-generated server side code for methods that
+    /// take a mutable slice reference parameter.
+    pub fn resize_nullable_out_vec<D: Default + Deserialize>(
+        &self,
+        out_vec: &mut Option<Vec<D>>,
+    ) -> Result<()> {
+        self.borrowed_ref().resize_nullable_out_vec(out_vec)
+    }
+}
+
+// Internal APIs
+impl<'a> BorrowedParcel<'a> {
     pub(crate) fn write_binder(&mut self, binder: Option<&SpIBinder>) -> Result<()> {
         unsafe {
-            // Safety: `Parcel` always contains a valid pointer to an
+            // Safety: `BorrowedParcel` always contains a valid pointer to an
             // `AParcel`. `AsNative` for `Option<SpIBinder`> will either return
             // null or a valid pointer to an `AIBinder`, both of which are
             // valid, safe inputs to `AParcel_writeStrongBinder`.
@@ -534,7 +685,7 @@
     pub(crate) fn read_binder(&self) -> Result<Option<SpIBinder>> {
         let mut binder = ptr::null_mut();
         let status = unsafe {
-            // Safety: `Parcel` always contains a valid pointer to an
+            // Safety: `BorrowedParcel` always contains a valid pointer to an
             // `AParcel`. We pass a valid, mutable out pointer to the `binder`
             // parameter. After this call, `binder` will be either null or a
             // valid pointer to an `AIBinder` owned by the caller.
@@ -554,25 +705,11 @@
 impl Drop for Parcel {
     fn drop(&mut self) {
         // Run the C++ Parcel complete object destructor
-        if let Self::Owned(ptr) = *self {
-            unsafe {
-                // Safety: `Parcel` always contains a valid pointer to an
-                // `AParcel`. If we own the parcel, we can safely delete it
-                // here.
-                sys::AParcel_delete(ptr)
-            }
-        }
-    }
-}
-
-impl Drop for OwnedParcel {
-    fn drop(&mut self) {
-        // Run the C++ Parcel complete object destructor
         unsafe {
-            // Safety: `OwnedParcel` always contains a valid pointer to an
+            // Safety: `Parcel` always contains a valid pointer to an
             // `AParcel`. Since we own the parcel, we can safely delete it
             // here.
-            sys::AParcel_delete(self.ptr)
+            sys::AParcel_delete(self.ptr.as_ptr())
         }
     }
 }
@@ -584,9 +721,9 @@
     }
 }
 
-impl fmt::Debug for OwnedParcel {
+impl<'a> fmt::Debug for BorrowedParcel<'a> {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_struct("OwnedParcel")
+        f.debug_struct("BorrowedParcel")
             .finish()
     }
 }
@@ -608,7 +745,7 @@
     assert_eq!(parcel.read::<Option<String>>(), Ok(None));
     assert_eq!(parcel.read::<String>(), Err(StatusCode::UNEXPECTED_NULL));
 
-    assert_eq!(parcel.read_binder().err(), Some(StatusCode::BAD_TYPE));
+    assert_eq!(parcel.borrowed_ref().read_binder().err(), Some(StatusCode::BAD_TYPE));
 
     parcel.write(&1i32).unwrap();
 
diff --git a/libs/binder/rust/src/parcel/file_descriptor.rs b/libs/binder/rust/src/parcel/file_descriptor.rs
index 8bcc5d0..b0dea94 100644
--- a/libs/binder/rust/src/parcel/file_descriptor.rs
+++ b/libs/binder/rust/src/parcel/file_descriptor.rs
@@ -15,7 +15,7 @@
  */
 
 use super::{
-    Deserialize, DeserializeArray, DeserializeOption, Parcel, Serialize, SerializeArray,
+    Deserialize, DeserializeArray, DeserializeOption, BorrowedParcel, Serialize, SerializeArray,
     SerializeOption,
 };
 use crate::binder::AsNative;
@@ -61,7 +61,7 @@
 }
 
 impl Serialize for ParcelFileDescriptor {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         let fd = self.0.as_raw_fd();
         let status = unsafe {
             // Safety: `Parcel` always contains a valid pointer to an
@@ -78,7 +78,7 @@
 impl SerializeArray for ParcelFileDescriptor {}
 
 impl SerializeOption for ParcelFileDescriptor {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         if let Some(f) = this {
             f.serialize(parcel)
         } else {
@@ -95,7 +95,7 @@
 }
 
 impl DeserializeOption for ParcelFileDescriptor {
-    fn deserialize_option(parcel: &Parcel) -> Result<Option<Self>> {
+    fn deserialize_option(parcel: &BorrowedParcel<'_>) -> Result<Option<Self>> {
         let mut fd = -1i32;
         unsafe {
             // Safety: `Parcel` always contains a valid pointer to an
@@ -125,7 +125,7 @@
 }
 
 impl Deserialize for ParcelFileDescriptor {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         Deserialize::deserialize(parcel)
             .transpose()
             .unwrap_or(Err(StatusCode::UNEXPECTED_NULL))
diff --git a/libs/binder/rust/src/parcel/parcelable.rs b/libs/binder/rust/src/parcel/parcelable.rs
index db9d8b0..9007cba 100644
--- a/libs/binder/rust/src/parcel/parcelable.rs
+++ b/libs/binder/rust/src/parcel/parcelable.rs
@@ -16,14 +16,14 @@
 
 use crate::binder::{AsNative, FromIBinder, Stability, Strong};
 use crate::error::{status_result, status_t, Result, Status, StatusCode};
-use crate::parcel::Parcel;
+use crate::parcel::BorrowedParcel;
 use crate::proxy::SpIBinder;
 use crate::sys;
 
 use std::convert::{TryFrom, TryInto};
 use std::ffi::c_void;
 use std::os::raw::{c_char, c_ulong};
-use std::mem::{self, MaybeUninit};
+use std::mem::{self, MaybeUninit, ManuallyDrop};
 use std::ptr;
 use std::slice;
 
@@ -39,7 +39,7 @@
     /// `Serialize::serialize` and its variants are generally
     /// preferred over this function, since the former also
     /// prepend a header.
-    fn write_to_parcel(&self, parcel: &mut Parcel) -> Result<()>;
+    fn write_to_parcel(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()>;
 
     /// Internal deserialization function for parcelables.
     ///
@@ -47,26 +47,26 @@
     /// `Deserialize::deserialize` and its variants are generally
     /// preferred over this function, since the former also
     /// parse the additional header.
-    fn read_from_parcel(&mut self, parcel: &Parcel) -> Result<()>;
+    fn read_from_parcel(&mut self, parcel: &BorrowedParcel<'_>) -> Result<()>;
 }
 
 /// A struct whose instances can be written to a [`Parcel`].
 // Might be able to hook this up as a serde backend in the future?
 pub trait Serialize {
     /// Serialize this instance into the given [`Parcel`].
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()>;
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()>;
 }
 
 /// A struct whose instances can be restored from a [`Parcel`].
 // Might be able to hook this up as a serde backend in the future?
 pub trait Deserialize: Sized {
     /// Deserialize an instance from the given [`Parcel`].
-    fn deserialize(parcel: &Parcel) -> Result<Self>;
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self>;
 
     /// Deserialize an instance from the given [`Parcel`] onto the
     /// current object. This operation will overwrite the old value
     /// partially or completely, depending on how much data is available.
-    fn deserialize_from(&mut self, parcel: &Parcel) -> Result<()> {
+    fn deserialize_from(&mut self, parcel: &BorrowedParcel<'_>) -> Result<()> {
         *self = Self::deserialize(parcel)?;
         Ok(())
     }
@@ -80,8 +80,8 @@
 // We want the default implementation for most types, but an override for
 // a few special ones like `readByteArray` for `u8`.
 pub trait SerializeArray: Serialize + Sized {
-    /// Serialize an array of this type into the given [`Parcel`].
-    fn serialize_array(slice: &[Self], parcel: &mut Parcel) -> Result<()> {
+    /// Serialize an array of this type into the given parcel.
+    fn serialize_array(slice: &[Self], parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         let res = unsafe {
             // Safety: Safe FFI, slice will always be a safe pointer to pass.
             sys::AParcel_writeParcelableArray(
@@ -111,7 +111,7 @@
 
     let slice: &[T] = slice::from_raw_parts(array.cast(), index+1);
 
-    let mut parcel = match Parcel::borrowed(parcel) {
+    let mut parcel = match BorrowedParcel::from_raw(parcel) {
         None => return StatusCode::UNEXPECTED_NULL as status_t,
         Some(p) => p,
     };
@@ -126,8 +126,8 @@
 /// Defaults to calling Deserialize::deserialize() manually for every element,
 /// but can be overridden for custom implementations like `readByteArray`.
 pub trait DeserializeArray: Deserialize {
-    /// Deserialize an array of type from the given [`Parcel`].
-    fn deserialize_array(parcel: &Parcel) -> Result<Option<Vec<Self>>> {
+    /// Deserialize an array of type from the given parcel.
+    fn deserialize_array(parcel: &BorrowedParcel<'_>) -> Result<Option<Vec<Self>>> {
         let mut vec: Option<Vec<MaybeUninit<Self>>> = None;
         let res = unsafe {
             // Safety: Safe FFI, vec is the correct opaque type expected by
@@ -173,7 +173,7 @@
         None => return StatusCode::BAD_INDEX as status_t,
     };
 
-    let parcel = match Parcel::borrowed(parcel as *mut _) {
+    let parcel = match BorrowedParcel::from_raw(parcel as *mut _) {
         None => return StatusCode::UNEXPECTED_NULL as status_t,
         Some(p) => p,
     };
@@ -205,8 +205,8 @@
 // We also use it to provide a default implementation for AIDL-generated
 // parcelables.
 pub trait SerializeOption: Serialize {
-    /// Serialize an Option of this type into the given [`Parcel`].
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    /// Serialize an Option of this type into the given parcel.
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         if let Some(inner) = this {
             parcel.write(&NON_NULL_PARCELABLE_FLAG)?;
             parcel.write(inner)
@@ -218,8 +218,8 @@
 
 /// Helper trait for types that can be nullable when deserialized.
 pub trait DeserializeOption: Deserialize {
-    /// Deserialize an Option of this type from the given [`Parcel`].
-    fn deserialize_option(parcel: &Parcel) -> Result<Option<Self>> {
+    /// Deserialize an Option of this type from the given parcel.
+    fn deserialize_option(parcel: &BorrowedParcel<'_>) -> Result<Option<Self>> {
         let null: i32 = parcel.read()?;
         if null == NULL_PARCELABLE_FLAG {
             Ok(None)
@@ -228,10 +228,10 @@
         }
     }
 
-    /// Deserialize an Option of this type from the given [`Parcel`] onto the
+    /// Deserialize an Option of this type from the given parcel onto the
     /// current object. This operation will overwrite the current value
     /// partially or completely, depending on how much data is available.
-    fn deserialize_option_from(this: &mut Option<Self>, parcel: &Parcel) -> Result<()> {
+    fn deserialize_option_from(this: &mut Option<Self>, parcel: &BorrowedParcel<'_>) -> Result<()> {
         *this = Self::deserialize_option(parcel)?;
         Ok(())
     }
@@ -297,10 +297,23 @@
     };
 }
 
+/// Safety: All elements in the vector must be properly initialized.
+unsafe fn vec_assume_init<T>(vec: Vec<MaybeUninit<T>>) -> Vec<T> {
+    // We can convert from Vec<MaybeUninit<T>> to Vec<T> because MaybeUninit<T>
+    // has the same alignment and size as T, so the pointer to the vector
+    // allocation will be compatible.
+    let mut vec = ManuallyDrop::new(vec);
+    Vec::from_raw_parts(
+        vec.as_mut_ptr().cast(),
+        vec.len(),
+        vec.capacity(),
+    )
+}
+
 macro_rules! impl_parcelable {
     {Serialize, $ty:ty, $write_fn:path} => {
         impl Serialize for $ty {
-            fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+            fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
                 unsafe {
                     // Safety: `Parcel` always contains a valid pointer to an
                     // `AParcel`, and any `$ty` literal value is safe to pass to
@@ -313,7 +326,7 @@
 
     {Deserialize, $ty:ty, $read_fn:path} => {
         impl Deserialize for $ty {
-            fn deserialize(parcel: &Parcel) -> Result<Self> {
+            fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
                 let mut val = Self::default();
                 unsafe {
                     // Safety: `Parcel` always contains a valid pointer to an
@@ -329,7 +342,7 @@
 
     {SerializeArray, $ty:ty, $write_array_fn:path} => {
         impl SerializeArray for $ty {
-            fn serialize_array(slice: &[Self], parcel: &mut Parcel) -> Result<()> {
+            fn serialize_array(slice: &[Self], parcel: &mut BorrowedParcel<'_>) -> Result<()> {
                 let status = unsafe {
                     // Safety: `Parcel` always contains a valid pointer to an
                     // `AParcel`. If the slice is > 0 length, `slice.as_ptr()`
@@ -353,7 +366,7 @@
 
     {DeserializeArray, $ty:ty, $read_array_fn:path} => {
         impl DeserializeArray for $ty {
-            fn deserialize_array(parcel: &Parcel) -> Result<Option<Vec<Self>>> {
+            fn deserialize_array(parcel: &BorrowedParcel<'_>) -> Result<Option<Vec<Self>>> {
                 let mut vec: Option<Vec<MaybeUninit<Self>>> = None;
                 let status = unsafe {
                     // Safety: `Parcel` always contains a valid pointer to an
@@ -371,11 +384,8 @@
                     // Safety: We are assuming that the NDK correctly
                     // initialized every element of the vector by now, so we
                     // know that all the MaybeUninits are now properly
-                    // initialized. We can transmute from Vec<MaybeUninit<T>> to
-                    // Vec<T> because MaybeUninit<T> has the same alignment and
-                    // size as T, so the pointer to the vector allocation will
-                    // be compatible.
-                    mem::transmute(vec)
+                    // initialized.
+                    vec.map(|vec| vec_assume_init(vec))
                 };
                 Ok(vec)
             }
@@ -443,19 +453,19 @@
 impl DeserializeArray for bool {}
 
 impl Serialize for u8 {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         (*self as i8).serialize(parcel)
     }
 }
 
 impl Deserialize for u8 {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         i8::deserialize(parcel).map(|v| v as u8)
     }
 }
 
 impl SerializeArray for u8 {
-    fn serialize_array(slice: &[Self], parcel: &mut Parcel) -> Result<()> {
+    fn serialize_array(slice: &[Self], parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         let status = unsafe {
             // Safety: `Parcel` always contains a valid pointer to an
             // `AParcel`. If the slice is > 0 length, `slice.as_ptr()` will be a
@@ -474,19 +484,19 @@
 }
 
 impl Serialize for i16 {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         (*self as u16).serialize(parcel)
     }
 }
 
 impl Deserialize for i16 {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         u16::deserialize(parcel).map(|v| v as i16)
     }
 }
 
 impl SerializeArray for i16 {
-    fn serialize_array(slice: &[Self], parcel: &mut Parcel) -> Result<()> {
+    fn serialize_array(slice: &[Self], parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         let status = unsafe {
             // Safety: `Parcel` always contains a valid pointer to an
             // `AParcel`. If the slice is > 0 length, `slice.as_ptr()` will be a
@@ -505,7 +515,7 @@
 }
 
 impl SerializeOption for str {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         match this {
             None => unsafe {
                 // Safety: `Parcel` always contains a valid pointer to an
@@ -541,7 +551,7 @@
 }
 
 impl Serialize for str {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         Some(self).serialize(parcel)
     }
 }
@@ -549,7 +559,7 @@
 impl SerializeArray for &str {}
 
 impl Serialize for String {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         Some(self.as_str()).serialize(parcel)
     }
 }
@@ -557,13 +567,13 @@
 impl SerializeArray for String {}
 
 impl SerializeOption for String {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeOption::serialize_option(this.map(String::as_str), parcel)
     }
 }
 
 impl Deserialize for Option<String> {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         let mut vec: Option<Vec<u8>> = None;
         let status = unsafe {
             // Safety: `Parcel` always contains a valid pointer to an `AParcel`.
@@ -591,7 +601,7 @@
 impl DeserializeArray for Option<String> {}
 
 impl Deserialize for String {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         Deserialize::deserialize(parcel)
             .transpose()
             .unwrap_or(Err(StatusCode::UNEXPECTED_NULL))
@@ -601,19 +611,19 @@
 impl DeserializeArray for String {}
 
 impl<T: SerializeArray> Serialize for [T] {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeArray::serialize_array(self, parcel)
     }
 }
 
 impl<T: SerializeArray> Serialize for Vec<T> {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeArray::serialize_array(&self[..], parcel)
     }
 }
 
 impl<T: SerializeArray> SerializeOption for [T] {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         if let Some(v) = this {
             SerializeArray::serialize_array(v, parcel)
         } else {
@@ -623,13 +633,13 @@
 }
 
 impl<T: SerializeArray> SerializeOption for Vec<T> {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeOption::serialize_option(this.map(Vec::as_slice), parcel)
     }
 }
 
 impl<T: DeserializeArray> Deserialize for Vec<T> {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         DeserializeArray::deserialize_array(parcel)
             .transpose()
             .unwrap_or(Err(StatusCode::UNEXPECTED_NULL))
@@ -637,25 +647,25 @@
 }
 
 impl<T: DeserializeArray> DeserializeOption for Vec<T> {
-    fn deserialize_option(parcel: &Parcel) -> Result<Option<Self>> {
+    fn deserialize_option(parcel: &BorrowedParcel<'_>) -> Result<Option<Self>> {
         DeserializeArray::deserialize_array(parcel)
     }
 }
 
 impl Serialize for Stability {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         i32::from(*self).serialize(parcel)
     }
 }
 
 impl Deserialize for Stability {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         i32::deserialize(parcel).and_then(Stability::try_from)
     }
 }
 
 impl Serialize for Status {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         unsafe {
             // Safety: `Parcel` always contains a valid pointer to an `AParcel`
             // and `Status` always contains a valid pointer to an `AStatus`, so
@@ -670,7 +680,7 @@
 }
 
 impl Deserialize for Status {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         let mut status_ptr = ptr::null_mut();
         let ret_status = unsafe {
             // Safety: `Parcel` always contains a valid pointer to an
@@ -691,13 +701,13 @@
 }
 
 impl<T: Serialize + FromIBinder + ?Sized> Serialize for Strong<T> {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         Serialize::serialize(&**self, parcel)
     }
 }
 
 impl<T: SerializeOption + FromIBinder + ?Sized> SerializeOption for Strong<T> {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeOption::serialize_option(this.map(|b| &**b), parcel)
     }
 }
@@ -705,14 +715,14 @@
 impl<T: Serialize + FromIBinder + ?Sized> SerializeArray for Strong<T> {}
 
 impl<T: FromIBinder + ?Sized> Deserialize for Strong<T> {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         let ibinder: SpIBinder = parcel.read()?;
         FromIBinder::try_from(ibinder)
     }
 }
 
 impl<T: FromIBinder + ?Sized> DeserializeOption for Strong<T> {
-    fn deserialize_option(parcel: &Parcel) -> Result<Option<Self>> {
+    fn deserialize_option(parcel: &BorrowedParcel<'_>) -> Result<Option<Self>> {
         let ibinder: Option<SpIBinder> = parcel.read()?;
         ibinder.map(FromIBinder::try_from).transpose()
     }
@@ -722,29 +732,29 @@
 
 // We need these to support Option<&T> for all T
 impl<T: Serialize + ?Sized> Serialize for &T {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         Serialize::serialize(*self, parcel)
     }
 }
 
 impl<T: SerializeOption + ?Sized> SerializeOption for &T {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeOption::serialize_option(this.copied(), parcel)
     }
 }
 
 impl<T: SerializeOption> Serialize for Option<T> {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeOption::serialize_option(self.as_ref(), parcel)
     }
 }
 
 impl<T: DeserializeOption> Deserialize for Option<T> {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         DeserializeOption::deserialize_option(parcel)
     }
 
-    fn deserialize_from(&mut self, parcel: &Parcel) -> Result<()> {
+    fn deserialize_from(&mut self, parcel: &BorrowedParcel<'_>) -> Result<()> {
         DeserializeOption::deserialize_option_from(self, parcel)
     }
 }
@@ -762,7 +772,7 @@
         impl $crate::parcel::Serialize for $parcelable {
             fn serialize(
                 &self,
-                parcel: &mut $crate::parcel::Parcel,
+                parcel: &mut $crate::parcel::BorrowedParcel<'_>,
             ) -> $crate::Result<()> {
                 <Self as $crate::parcel::SerializeOption>::serialize_option(
                     Some(self),
@@ -776,7 +786,7 @@
         impl $crate::parcel::SerializeOption for $parcelable {
             fn serialize_option(
                 this: Option<&Self>,
-                parcel: &mut $crate::parcel::Parcel,
+                parcel: &mut $crate::parcel::BorrowedParcel<'_>,
             ) -> $crate::Result<()> {
                 if let Some(this) = this {
                     use $crate::parcel::Parcelable;
@@ -796,13 +806,12 @@
 /// `Deserialize`, `DeserializeArray` and `DeserializeOption` for
 /// structured parcelables. The target type must implement the
 /// `Parcelable` trait.
-/// ```
 #[macro_export]
 macro_rules! impl_deserialize_for_parcelable {
     ($parcelable:ident) => {
         impl $crate::parcel::Deserialize for $parcelable {
             fn deserialize(
-                parcel: &$crate::parcel::Parcel,
+                parcel: &$crate::parcel::BorrowedParcel<'_>,
             ) -> $crate::Result<Self> {
                 $crate::parcel::DeserializeOption::deserialize_option(parcel)
                     .transpose()
@@ -810,7 +819,7 @@
             }
             fn deserialize_from(
                 &mut self,
-                parcel: &$crate::parcel::Parcel,
+                parcel: &$crate::parcel::BorrowedParcel<'_>,
             ) -> $crate::Result<()> {
                 let status: i32 = parcel.read()?;
                 if status == $crate::parcel::NULL_PARCELABLE_FLAG {
@@ -826,7 +835,7 @@
 
         impl $crate::parcel::DeserializeOption for $parcelable {
             fn deserialize_option(
-                parcel: &$crate::parcel::Parcel,
+                parcel: &$crate::parcel::BorrowedParcel<'_>,
             ) -> $crate::Result<Option<Self>> {
                 let mut result = None;
                 Self::deserialize_option_from(&mut result, parcel)?;
@@ -834,7 +843,7 @@
             }
             fn deserialize_option_from(
                 this: &mut Option<Self>,
-                parcel: &$crate::parcel::Parcel,
+                parcel: &$crate::parcel::BorrowedParcel<'_>,
             ) -> $crate::Result<()> {
                 let status: i32 = parcel.read()?;
                 if status == $crate::parcel::NULL_PARCELABLE_FLAG {
@@ -851,326 +860,332 @@
 }
 
 impl<T: Serialize> Serialize for Box<T> {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         Serialize::serialize(&**self, parcel)
     }
 }
 
 impl<T: Deserialize> Deserialize for Box<T> {
-    fn deserialize(parcel: &Parcel) -> Result<Self> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
         Deserialize::deserialize(parcel).map(Box::new)
     }
 }
 
 impl<T: SerializeOption> SerializeOption for Box<T> {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         SerializeOption::serialize_option(this.map(|inner| &**inner), parcel)
     }
 }
 
 impl<T: DeserializeOption> DeserializeOption for Box<T> {
-    fn deserialize_option(parcel: &Parcel) -> Result<Option<Self>> {
+    fn deserialize_option(parcel: &BorrowedParcel<'_>) -> Result<Option<Self>> {
         DeserializeOption::deserialize_option(parcel).map(|t| t.map(Box::new))
     }
 }
 
-#[test]
-fn test_custom_parcelable() {
-    struct Custom(u32, bool, String, Vec<String>);
+#[cfg(test)]
+mod tests {
+    use crate::Parcel;
+    use super::*;
 
-    impl Serialize for Custom {
-        fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
-            self.0.serialize(parcel)?;
-            self.1.serialize(parcel)?;
-            self.2.serialize(parcel)?;
-            self.3.serialize(parcel)
+    #[test]
+    fn test_custom_parcelable() {
+        struct Custom(u32, bool, String, Vec<String>);
+
+        impl Serialize for Custom {
+            fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
+                self.0.serialize(parcel)?;
+                self.1.serialize(parcel)?;
+                self.2.serialize(parcel)?;
+                self.3.serialize(parcel)
+            }
         }
-    }
 
-    impl Deserialize for Custom {
-        fn deserialize(parcel: &Parcel) -> Result<Self> {
-            Ok(Custom(
-                parcel.read()?,
-                parcel.read()?,
-                parcel.read()?,
-                parcel.read::<Option<Vec<String>>>()?.unwrap(),
-            ))
+        impl Deserialize for Custom {
+            fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<Self> {
+                Ok(Custom(
+                        parcel.read()?,
+                        parcel.read()?,
+                        parcel.read()?,
+                        parcel.read::<Option<Vec<String>>>()?.unwrap(),
+                        ))
+            }
         }
+
+        let string8 = "Custom Parcelable".to_string();
+
+        let s1 = "str1".to_string();
+        let s2 = "str2".to_string();
+        let s3 = "str3".to_string();
+
+        let strs = vec![s1, s2, s3];
+
+        let custom = Custom(123_456_789, true, string8, strs);
+
+        let mut parcel = Parcel::new();
+        let start = parcel.get_data_position();
+
+        assert!(custom.serialize(&mut parcel.borrowed()).is_ok());
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let custom2 = Custom::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(custom2.0, 123_456_789);
+        assert!(custom2.1);
+        assert_eq!(custom2.2, custom.2);
+        assert_eq!(custom2.3, custom.3);
     }
 
-    let string8 = "Custom Parcelable".to_string();
+    #[test]
+    #[allow(clippy::excessive_precision)]
+    fn test_slice_parcelables() {
+        let bools = [true, false, false, true];
 
-    let s1 = "str1".to_string();
-    let s2 = "str2".to_string();
-    let s3 = "str3".to_string();
+        let mut parcel = Parcel::new();
+        let start = parcel.get_data_position();
 
-    let strs = vec![s1, s2, s3];
+        assert!(bools.serialize(&mut parcel.borrowed()).is_ok());
 
-    let custom = Custom(123_456_789, true, string8, strs);
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
 
-    let mut parcel = Parcel::new();
-    let start = parcel.get_data_position();
+        assert_eq!(parcel.read::<u32>().unwrap(), 4);
+        assert_eq!(parcel.read::<u32>().unwrap(), 1);
+        assert_eq!(parcel.read::<u32>().unwrap(), 0);
+        assert_eq!(parcel.read::<u32>().unwrap(), 0);
+        assert_eq!(parcel.read::<u32>().unwrap(), 1);
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
 
-    assert!(custom.serialize(&mut parcel).is_ok());
+        let vec = Vec::<bool>::deserialize(parcel.borrowed_ref()).unwrap();
 
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
+        assert_eq!(vec, [true, false, false, true]);
+
+        let u8s = [101u8, 255, 42, 117];
+
+        let mut parcel = Parcel::new();
+        let start = parcel.get_data_position();
+
+        assert!(parcel.write(&u8s[..]).is_ok());
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
+        assert_eq!(parcel.read::<u32>().unwrap(), 0x752aff65); // bytes
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<u8>::deserialize(parcel.borrowed_ref()).unwrap();
+        assert_eq!(vec, [101, 255, 42, 117]);
+
+        let i8s = [-128i8, 127, 42, -117];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        assert!(parcel.write(&i8s[..]).is_ok());
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
+        assert_eq!(parcel.read::<u32>().unwrap(), 0x8b2a7f80); // bytes
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<u8>::deserialize(parcel.borrowed_ref()).unwrap();
+        assert_eq!(vec, [-128i8 as u8, 127, 42, -117i8 as u8]);
+
+        let u16s = [u16::max_value(), 12_345, 42, 117];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(u16s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
+        assert_eq!(parcel.read::<u32>().unwrap(), 0xffff); // u16::max_value()
+        assert_eq!(parcel.read::<u32>().unwrap(), 12345); // 12,345
+        assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
+        assert_eq!(parcel.read::<u32>().unwrap(), 117); // 117
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<u16>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(vec, [u16::max_value(), 12_345, 42, 117]);
+
+        let i16s = [i16::max_value(), i16::min_value(), 42, -117];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(i16s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
+        assert_eq!(parcel.read::<u32>().unwrap(), 0x7fff); // i16::max_value()
+        assert_eq!(parcel.read::<u32>().unwrap(), 0x8000); // i16::min_value()
+        assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
+        assert_eq!(parcel.read::<u32>().unwrap(), 0xff8b); // -117
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<i16>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(vec, [i16::max_value(), i16::min_value(), 42, -117]);
+
+        let u32s = [u32::max_value(), 12_345, 42, 117];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(u32s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
+        assert_eq!(parcel.read::<u32>().unwrap(), 0xffffffff); // u32::max_value()
+        assert_eq!(parcel.read::<u32>().unwrap(), 12345); // 12,345
+        assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
+        assert_eq!(parcel.read::<u32>().unwrap(), 117); // 117
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<u32>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(vec, [u32::max_value(), 12_345, 42, 117]);
+
+        let i32s = [i32::max_value(), i32::min_value(), 42, -117];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(i32s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
+        assert_eq!(parcel.read::<u32>().unwrap(), 0x7fffffff); // i32::max_value()
+        assert_eq!(parcel.read::<u32>().unwrap(), 0x80000000); // i32::min_value()
+        assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
+        assert_eq!(parcel.read::<u32>().unwrap(), 0xffffff8b); // -117
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<i32>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(vec, [i32::max_value(), i32::min_value(), 42, -117]);
+
+        let u64s = [u64::max_value(), 12_345, 42, 117];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(u64s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<u64>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(vec, [u64::max_value(), 12_345, 42, 117]);
+
+        let i64s = [i64::max_value(), i64::min_value(), 42, -117];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(i64s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<i64>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(vec, [i64::max_value(), i64::min_value(), 42, -117]);
+
+        let f32s = [
+            std::f32::NAN,
+            std::f32::INFINITY,
+            1.23456789,
+            std::f32::EPSILON,
+        ];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(f32s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<f32>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        // NAN != NAN so we can't use it in the assert_eq:
+        assert!(vec[0].is_nan());
+        assert_eq!(vec[1..], f32s[1..]);
+
+        let f64s = [
+            std::f64::NAN,
+            std::f64::INFINITY,
+            1.234567890123456789,
+            std::f64::EPSILON,
+        ];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(f64s.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<f64>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        // NAN != NAN so we can't use it in the assert_eq:
+        assert!(vec[0].is_nan());
+        assert_eq!(vec[1..], f64s[1..]);
+
+        let s1 = "Hello, Binder!";
+        let s2 = "This is a utf8 string.";
+        let s3 = "Some more text here.";
+        let s4 = "Embedded nulls \0 \0";
+
+        let strs = [s1, s2, s3, s4];
+
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+        assert!(strs.serialize(&mut parcel.borrowed()).is_ok());
+        unsafe {
+            assert!(parcel.set_data_position(start).is_ok());
+        }
+
+        let vec = Vec::<String>::deserialize(parcel.borrowed_ref()).unwrap();
+
+        assert_eq!(vec, strs);
     }
-
-    let custom2 = Custom::deserialize(&parcel).unwrap();
-
-    assert_eq!(custom2.0, 123_456_789);
-    assert!(custom2.1);
-    assert_eq!(custom2.2, custom.2);
-    assert_eq!(custom2.3, custom.3);
-}
-
-#[test]
-#[allow(clippy::excessive_precision)]
-fn test_slice_parcelables() {
-    let bools = [true, false, false, true];
-
-    let mut parcel = Parcel::new();
-    let start = parcel.get_data_position();
-
-    assert!(bools.serialize(&mut parcel).is_ok());
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert_eq!(parcel.read::<u32>().unwrap(), 4);
-    assert_eq!(parcel.read::<u32>().unwrap(), 1);
-    assert_eq!(parcel.read::<u32>().unwrap(), 0);
-    assert_eq!(parcel.read::<u32>().unwrap(), 0);
-    assert_eq!(parcel.read::<u32>().unwrap(), 1);
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<bool>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, [true, false, false, true]);
-
-    let u8s = [101u8, 255, 42, 117];
-
-    let mut parcel = Parcel::new();
-    let start = parcel.get_data_position();
-
-    assert!(parcel.write(&u8s[..]).is_ok());
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
-    assert_eq!(parcel.read::<u32>().unwrap(), 0x752aff65); // bytes
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<u8>::deserialize(&parcel).unwrap();
-    assert_eq!(vec, [101, 255, 42, 117]);
-
-    let i8s = [-128i8, 127, 42, -117];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert!(parcel.write(&i8s[..]).is_ok());
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
-    assert_eq!(parcel.read::<u32>().unwrap(), 0x8b2a7f80); // bytes
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<u8>::deserialize(&parcel).unwrap();
-    assert_eq!(vec, [-128i8 as u8, 127, 42, -117i8 as u8]);
-
-    let u16s = [u16::max_value(), 12_345, 42, 117];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(u16s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
-    assert_eq!(parcel.read::<u32>().unwrap(), 0xffff); // u16::max_value()
-    assert_eq!(parcel.read::<u32>().unwrap(), 12345); // 12,345
-    assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
-    assert_eq!(parcel.read::<u32>().unwrap(), 117); // 117
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<u16>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, [u16::max_value(), 12_345, 42, 117]);
-
-    let i16s = [i16::max_value(), i16::min_value(), 42, -117];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(i16s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
-    assert_eq!(parcel.read::<u32>().unwrap(), 0x7fff); // i16::max_value()
-    assert_eq!(parcel.read::<u32>().unwrap(), 0x8000); // i16::min_value()
-    assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
-    assert_eq!(parcel.read::<u32>().unwrap(), 0xff8b); // -117
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<i16>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, [i16::max_value(), i16::min_value(), 42, -117]);
-
-    let u32s = [u32::max_value(), 12_345, 42, 117];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(u32s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
-    assert_eq!(parcel.read::<u32>().unwrap(), 0xffffffff); // u32::max_value()
-    assert_eq!(parcel.read::<u32>().unwrap(), 12345); // 12,345
-    assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
-    assert_eq!(parcel.read::<u32>().unwrap(), 117); // 117
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<u32>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, [u32::max_value(), 12_345, 42, 117]);
-
-    let i32s = [i32::max_value(), i32::min_value(), 42, -117];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(i32s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    assert_eq!(parcel.read::<u32>().unwrap(), 4); // 4 items
-    assert_eq!(parcel.read::<u32>().unwrap(), 0x7fffffff); // i32::max_value()
-    assert_eq!(parcel.read::<u32>().unwrap(), 0x80000000); // i32::min_value()
-    assert_eq!(parcel.read::<u32>().unwrap(), 42); // 42
-    assert_eq!(parcel.read::<u32>().unwrap(), 0xffffff8b); // -117
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<i32>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, [i32::max_value(), i32::min_value(), 42, -117]);
-
-    let u64s = [u64::max_value(), 12_345, 42, 117];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(u64s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<u64>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, [u64::max_value(), 12_345, 42, 117]);
-
-    let i64s = [i64::max_value(), i64::min_value(), 42, -117];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(i64s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<i64>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, [i64::max_value(), i64::min_value(), 42, -117]);
-
-    let f32s = [
-        std::f32::NAN,
-        std::f32::INFINITY,
-        1.23456789,
-        std::f32::EPSILON,
-    ];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(f32s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<f32>::deserialize(&parcel).unwrap();
-
-    // NAN != NAN so we can't use it in the assert_eq:
-    assert!(vec[0].is_nan());
-    assert_eq!(vec[1..], f32s[1..]);
-
-    let f64s = [
-        std::f64::NAN,
-        std::f64::INFINITY,
-        1.234567890123456789,
-        std::f64::EPSILON,
-    ];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(f64s.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<f64>::deserialize(&parcel).unwrap();
-
-    // NAN != NAN so we can't use it in the assert_eq:
-    assert!(vec[0].is_nan());
-    assert_eq!(vec[1..], f64s[1..]);
-
-    let s1 = "Hello, Binder!";
-    let s2 = "This is a utf8 string.";
-    let s3 = "Some more text here.";
-    let s4 = "Embedded nulls \0 \0";
-
-    let strs = [s1, s2, s3, s4];
-
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-    assert!(strs.serialize(&mut parcel).is_ok());
-    unsafe {
-        assert!(parcel.set_data_position(start).is_ok());
-    }
-
-    let vec = Vec::<String>::deserialize(&parcel).unwrap();
-
-    assert_eq!(vec, strs);
 }
diff --git a/libs/binder/rust/src/parcel/parcelable_holder.rs b/libs/binder/rust/src/parcel/parcelable_holder.rs
index bccfd2d..b4282b2 100644
--- a/libs/binder/rust/src/parcel/parcelable_holder.rs
+++ b/libs/binder/rust/src/parcel/parcelable_holder.rs
@@ -16,7 +16,7 @@
 
 use crate::binder::Stability;
 use crate::error::{Result, StatusCode};
-use crate::parcel::{OwnedParcel, Parcel, Parcelable};
+use crate::parcel::{Parcel, BorrowedParcel, Parcelable};
 use crate::{impl_deserialize_for_parcelable, impl_serialize_for_parcelable};
 
 use downcast_rs::{impl_downcast, DowncastSync};
@@ -50,7 +50,7 @@
         parcelable: Arc<dyn AnyParcelable>,
         name: String,
     },
-    Parcel(OwnedParcel),
+    Parcel(Parcel),
 }
 
 impl Default for ParcelableHolderData {
@@ -148,7 +148,6 @@
                 }
             }
             ParcelableHolderData::Parcel(ref mut parcel) => {
-                let parcel = parcel.borrowed();
                 unsafe {
                     // Safety: 0 should always be a valid position.
                     parcel.set_data_position(0)?;
@@ -160,7 +159,7 @@
                 }
 
                 let mut parcelable = T::default();
-                parcelable.read_from_parcel(&parcel)?;
+                parcelable.read_from_parcel(parcel.borrowed_ref())?;
 
                 let parcelable = Arc::new(parcelable);
                 let result = Arc::clone(&parcelable);
@@ -181,7 +180,7 @@
 impl_deserialize_for_parcelable!(ParcelableHolder);
 
 impl Parcelable for ParcelableHolder {
-    fn write_to_parcel(&self, parcel: &mut Parcel) -> Result<()> {
+    fn write_to_parcel(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         parcel.write(&self.stability)?;
 
         let mut data = self.data.lock().unwrap();
@@ -214,14 +213,13 @@
                 Ok(())
             }
             ParcelableHolderData::Parcel(ref mut p) => {
-                let p = p.borrowed();
                 parcel.write(&p.get_data_size())?;
-                parcel.append_all_from(&p)
+                parcel.append_all_from(&*p)
             }
         }
     }
 
-    fn read_from_parcel(&mut self, parcel: &Parcel) -> Result<()> {
+    fn read_from_parcel(&mut self, parcel: &BorrowedParcel<'_>) -> Result<()> {
         self.stability = parcel.read()?;
 
         let data_size: i32 = parcel.read()?;
@@ -242,10 +240,8 @@
             .checked_add(data_size)
             .ok_or(StatusCode::BAD_VALUE)?;
 
-        let mut new_parcel = OwnedParcel::new();
-        new_parcel
-            .borrowed()
-            .append_from(parcel, data_start, data_size)?;
+        let mut new_parcel = Parcel::new();
+        new_parcel.append_from(parcel, data_start, data_size)?;
         *self.data.get_mut().unwrap() = ParcelableHolderData::Parcel(new_parcel);
 
         unsafe {
diff --git a/libs/binder/rust/src/proxy.rs b/libs/binder/rust/src/proxy.rs
index a8d0c33..760d862 100644
--- a/libs/binder/rust/src/proxy.rs
+++ b/libs/binder/rust/src/proxy.rs
@@ -22,8 +22,7 @@
 };
 use crate::error::{status_result, Result, StatusCode};
 use crate::parcel::{
-    Deserialize, DeserializeArray, DeserializeOption, OwnedParcel, Parcel, Serialize, SerializeArray,
-    SerializeOption,
+    Parcel, BorrowedParcel, Deserialize, DeserializeArray, DeserializeOption, Serialize, SerializeArray, SerializeOption,
 };
 use crate::sys;
 
@@ -235,7 +234,7 @@
 }
 
 impl<T: AsNative<sys::AIBinder>> IBinderInternal for T {
-    fn prepare_transact(&self) -> Result<OwnedParcel> {
+    fn prepare_transact(&self) -> Result<Parcel> {
         let mut input = ptr::null_mut();
         let status = unsafe {
             // Safety: `SpIBinder` guarantees that `self` always contains a
@@ -255,16 +254,16 @@
             // Safety: At this point, `input` is either a valid, owned `AParcel`
             // pointer, or null. `OwnedParcel::from_raw` safely handles both cases,
             // taking ownership of the parcel.
-            OwnedParcel::from_raw(input).ok_or(StatusCode::UNEXPECTED_NULL)
+            Parcel::from_raw(input).ok_or(StatusCode::UNEXPECTED_NULL)
         }
     }
 
     fn submit_transact(
         &self,
         code: TransactionCode,
-        data: OwnedParcel,
+        data: Parcel,
         flags: TransactionFlags,
-    ) -> Result<OwnedParcel> {
+    ) -> Result<Parcel> {
         let mut reply = ptr::null_mut();
         let status = unsafe {
             // Safety: `SpIBinder` guarantees that `self` always contains a
@@ -299,7 +298,7 @@
             // construct a `Parcel` out of it. `AIBinder_transact` passes
             // ownership of the `reply` parcel to Rust, so we need to
             // construct an owned variant.
-            OwnedParcel::from_raw(reply).ok_or(StatusCode::UNEXPECTED_NULL)
+            Parcel::from_raw(reply).ok_or(StatusCode::UNEXPECTED_NULL)
         }
     }
 
@@ -324,6 +323,7 @@
         status_result(status)
     }
 
+    #[cfg(not(android_vndk))]
     fn set_requesting_sid(&mut self, enable: bool) {
         unsafe { sys::AIBinder_setRequestingSid(self.as_native_mut(), enable) };
     }
@@ -415,13 +415,13 @@
 }
 
 impl Serialize for SpIBinder {
-    fn serialize(&self, parcel: &mut Parcel) -> Result<()> {
+    fn serialize(&self, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         parcel.write_binder(Some(self))
     }
 }
 
 impl SerializeOption for SpIBinder {
-    fn serialize_option(this: Option<&Self>, parcel: &mut Parcel) -> Result<()> {
+    fn serialize_option(this: Option<&Self>, parcel: &mut BorrowedParcel<'_>) -> Result<()> {
         parcel.write_binder(this)
     }
 }
@@ -429,7 +429,7 @@
 impl SerializeArray for SpIBinder {}
 
 impl Deserialize for SpIBinder {
-    fn deserialize(parcel: &Parcel) -> Result<SpIBinder> {
+    fn deserialize(parcel: &BorrowedParcel<'_>) -> Result<SpIBinder> {
         parcel
             .read_binder()
             .transpose()
@@ -438,7 +438,7 @@
 }
 
 impl DeserializeOption for SpIBinder {
-    fn deserialize_option(parcel: &Parcel) -> Result<Option<SpIBinder>> {
+    fn deserialize_option(parcel: &BorrowedParcel<'_>) -> Result<Option<SpIBinder>> {
         parcel.read_binder()
     }
 }
diff --git a/libs/binder/rust/tests/integration.rs b/libs/binder/rust/tests/integration.rs
index ebfe879..1fd2ead 100644
--- a/libs/binder/rust/tests/integration.rs
+++ b/libs/binder/rust/tests/integration.rs
@@ -17,7 +17,7 @@
 //! Rust Binder crate integration tests
 
 use binder::declare_binder_interface;
-use binder::parcel::{Parcel, OwnedParcel};
+use binder::parcel::BorrowedParcel;
 use binder::{
     Binder, BinderFeatures, IBinderInternal, Interface, StatusCode, ThreadState, TransactionCode,
     FIRST_CALL_TRANSACTION,
@@ -179,8 +179,8 @@
 fn on_transact(
     service: &dyn ITest,
     code: TransactionCode,
-    _data: &Parcel,
-    reply: &mut Parcel,
+    _data: &BorrowedParcel<'_>,
+    reply: &mut BorrowedParcel<'_>,
 ) -> binder::Result<()> {
     match code.try_into()? {
         TestTransactionCode::Test => reply.write(&service.test()?),
@@ -218,24 +218,24 @@
     fn test(&self) -> binder::BoxFuture<'static, binder::Result<String>> {
         let binder = self.binder.clone();
         P::spawn(
-            move || binder.transact(TestTransactionCode::Test as TransactionCode, 0, |_| Ok(())).map(|p| OwnedParcel::try_from(p).unwrap()),
-            |reply| async move { reply?.into_parcel().read() }
+            move || binder.transact(TestTransactionCode::Test as TransactionCode, 0, |_| Ok(())),
+            |reply| async move { reply?.read() }
         )
     }
 
     fn get_dump_args(&self) -> binder::BoxFuture<'static, binder::Result<Vec<String>>> {
         let binder = self.binder.clone();
         P::spawn(
-            move || binder.transact(TestTransactionCode::GetDumpArgs as TransactionCode, 0, |_| Ok(())).map(|p| OwnedParcel::try_from(p).unwrap()),
-            |reply| async move { reply?.into_parcel().read() }
+            move || binder.transact(TestTransactionCode::GetDumpArgs as TransactionCode, 0, |_| Ok(())),
+            |reply| async move { reply?.read() }
         )
     }
 
     fn get_selinux_context(&self) -> binder::BoxFuture<'static, binder::Result<String>> {
         let binder = self.binder.clone();
         P::spawn(
-            move || binder.transact(TestTransactionCode::GetSelinuxContext as TransactionCode, 0, |_| Ok(())).map(|p| OwnedParcel::try_from(p).unwrap()),
-            |reply| async move { reply?.into_parcel().read() }
+            move || binder.transact(TestTransactionCode::GetSelinuxContext as TransactionCode, 0, |_| Ok(())),
+            |reply| async move { reply?.read() }
         )
     }
 }
@@ -284,8 +284,8 @@
 fn on_transact_same_descriptor(
     _service: &dyn ITestSameDescriptor,
     _code: TransactionCode,
-    _data: &Parcel,
-    _reply: &mut Parcel,
+    _data: &BorrowedParcel<'_>,
+    _reply: &mut BorrowedParcel<'_>,
 ) -> binder::Result<()> {
     Ok(())
 }
diff --git a/libs/binder/rust/tests/serialization.rs b/libs/binder/rust/tests/serialization.rs
index 66ba846..1fc761e 100644
--- a/libs/binder/rust/tests/serialization.rs
+++ b/libs/binder/rust/tests/serialization.rs
@@ -20,7 +20,7 @@
 use binder::declare_binder_interface;
 use binder::parcel::ParcelFileDescriptor;
 use binder::{
-    Binder, BinderFeatures, ExceptionCode, Interface, Parcel, Result, SpIBinder, Status,
+    Binder, BinderFeatures, BorrowedParcel, ExceptionCode, Interface, Result, SpIBinder, Status,
     StatusCode, TransactionCode,
 };
 
@@ -111,8 +111,8 @@
 fn on_transact(
     _service: &dyn ReadParcelTest,
     code: TransactionCode,
-    parcel: &Parcel,
-    reply: &mut Parcel,
+    parcel: &BorrowedParcel<'_>,
+    reply: &mut BorrowedParcel<'_>,
 ) -> Result<()> {
     match code {
         bindings::Transaction_TEST_BOOL => {
diff --git a/libs/gui/BLASTBufferQueue.cpp b/libs/gui/BLASTBufferQueue.cpp
index 9080822..5570321 100644
--- a/libs/gui/BLASTBufferQueue.cpp
+++ b/libs/gui/BLASTBufferQueue.cpp
@@ -138,7 +138,7 @@
         mSize(width, height),
         mRequestedSize(mSize),
         mFormat(format),
-        mNextTransaction(nullptr) {
+        mSyncTransaction(nullptr) {
     createBufferQueue(&mProducer, &mConsumer);
     // since the adapter is in the client process, set dequeue timeout
     // explicitly so that dequeueBuffer will block
@@ -290,7 +290,7 @@
                 // case, we don't actually want to flush the frames in between since they will get
                 // processed and merged with the sync transaction and released earlier than if they
                 // were sent to SF
-                if (mWaitForTransactionCallback && mNextTransaction == nullptr &&
+                if (mWaitForTransactionCallback && mSyncTransaction == nullptr &&
                     currFrameNumber >= mLastAcquiredFrameNumber) {
                     mWaitForTransactionCallback = false;
                     flushShadowQueue();
@@ -407,20 +407,11 @@
 
     // Release all buffers that are beyond the ones that we need to hold
     while (mPendingRelease.size() > numPendingBuffersToHold) {
-        const auto releaseBuffer = mPendingRelease.front();
+        const auto releasedBuffer = mPendingRelease.front();
         mPendingRelease.pop_front();
-        auto it = mSubmitted.find(releaseBuffer.callbackId);
-        if (it == mSubmitted.end()) {
-            BQA_LOGE("ERROR: releaseBufferCallback without corresponding submitted buffer %s",
-                     releaseBuffer.callbackId.to_string().c_str());
-            return;
-        }
-        mNumAcquired--;
-        BQA_LOGV("released %s", releaseBuffer.callbackId.to_string().c_str());
-        mBufferItemConsumer->releaseBuffer(it->second, releaseBuffer.releaseFence);
-        mSubmitted.erase(it);
+        releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
         // Don't process the transactions here if mWaitForTransactionCallback is set. Instead, let
-        // onFrameAvailable handle processing them since it will merge with the nextTransaction.
+        // onFrameAvailable handle processing them since it will merge with the syncTransaction.
         if (!mWaitForTransactionCallback) {
             acquireNextBufferLocked(std::nullopt);
         }
@@ -432,6 +423,20 @@
     mCallbackCV.notify_all();
 }
 
+void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
+                                     const sp<Fence>& releaseFence) {
+    auto it = mSubmitted.find(callbackId);
+    if (it == mSubmitted.end()) {
+        BQA_LOGE("ERROR: releaseBufferCallback without corresponding submitted buffer %s",
+                 callbackId.to_string().c_str());
+        return;
+    }
+    mNumAcquired--;
+    BQA_LOGV("released %s", callbackId.to_string().c_str());
+    mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
+    mSubmitted.erase(it);
+}
+
 void BLASTBufferQueue::acquireNextBufferLocked(
         const std::optional<SurfaceComposerClient::Transaction*> transaction) {
     ATRACE_CALL();
@@ -589,34 +594,57 @@
     mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
 }
 
+void BLASTBufferQueue::flushAndWaitForFreeBuffer(std::unique_lock<std::mutex>& lock) {
+    if (mWaitForTransactionCallback && mNumFrameAvailable > 0) {
+        // We are waiting on a previous sync's transaction callback so allow another sync
+        // transaction to proceed.
+        //
+        // We need to first flush out the transactions that were in between the two syncs.
+        // We do this by merging them into mSyncTransaction so any buffer merging will get
+        // a release callback invoked. The release callback will be async so we need to wait
+        // on max acquired to make sure we have the capacity to acquire another buffer.
+        if (maxBuffersAcquired(false /* includeExtraAcquire */)) {
+            BQA_LOGD("waiting to flush shadow queue...");
+            mCallbackCV.wait(lock);
+        }
+        while (mNumFrameAvailable > 0) {
+            // flush out the shadow queue
+            acquireAndReleaseBuffer();
+        }
+    }
+
+    while (maxBuffersAcquired(false /* includeExtraAcquire */)) {
+        BQA_LOGD("waiting for free buffer.");
+        mCallbackCV.wait(lock);
+    }
+}
+
 void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
     ATRACE_CALL();
     std::unique_lock _lock{mMutex};
 
-    const bool nextTransactionSet = mNextTransaction != nullptr;
-    BQA_LOGV("onFrameAvailable-start nextTransactionSet=%s", boolToString(nextTransactionSet));
-    if (nextTransactionSet) {
-        if (mWaitForTransactionCallback) {
-            // We are waiting on a previous sync's transaction callback so allow another sync
-            // transaction to proceed.
-            //
-            // We need to first flush out the transactions that were in between the two syncs.
-            // We do this by merging them into mNextTransaction so any buffer merging will get
-            // a release callback invoked. The release callback will be async so we need to wait
-            // on max acquired to make sure we have the capacity to acquire another buffer.
-            if (maxBuffersAcquired(false /* includeExtraAcquire */)) {
-                BQA_LOGD("waiting to flush shadow queue...");
-                mCallbackCV.wait(_lock);
-            }
-            while (mNumFrameAvailable > 0) {
-                // flush out the shadow queue
-                acquireAndReleaseBuffer();
+    const bool syncTransactionSet = mSyncTransaction != nullptr;
+    BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
+
+    if (syncTransactionSet) {
+        bool mayNeedToWaitForBuffer = true;
+        // If we are going to re-use the same mSyncTransaction, release the buffer that may already
+        // be set in the Transaction. This is to allow us a free slot early to continue processing
+        // a new buffer.
+        if (!mAcquireSingleBuffer) {
+            auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
+            if (bufferData) {
+                BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
+                         bufferData->frameNumber);
+                releaseBuffer(bufferData->releaseCallbackId, bufferData->acquireFence);
+                // Because we just released a buffer, we know there's no need to wait for a free
+                // buffer.
+                mayNeedToWaitForBuffer = false;
             }
         }
 
-        while (maxBuffersAcquired(false /* includeExtraAcquire */)) {
-            BQA_LOGD("waiting for free buffer.");
-            mCallbackCV.wait(_lock);
+        if (mayNeedToWaitForBuffer) {
+            flushAndWaitForFreeBuffer(_lock);
         }
     }
 
@@ -625,12 +653,14 @@
     ATRACE_INT(mQueuedBufferTrace.c_str(),
                mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
 
-    BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " nextTransactionSet=%s", item.mFrameNumber,
-             boolToString(nextTransactionSet));
+    BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s", item.mFrameNumber,
+             boolToString(syncTransactionSet));
 
-    if (nextTransactionSet) {
-        acquireNextBufferLocked(std::move(mNextTransaction));
-        mNextTransaction = nullptr;
+    if (syncTransactionSet) {
+        acquireNextBufferLocked(mSyncTransaction);
+        if (mAcquireSingleBuffer) {
+            mSyncTransaction = nullptr;
+        }
         mWaitForTransactionCallback = true;
     } else if (!mWaitForTransactionCallback) {
         acquireNextBufferLocked(std::nullopt);
@@ -652,9 +682,11 @@
     mDequeueTimestamps.erase(bufferId);
 };
 
-void BLASTBufferQueue::setNextTransaction(SurfaceComposerClient::Transaction* t) {
+void BLASTBufferQueue::setSyncTransaction(SurfaceComposerClient::Transaction* t,
+                                          bool acquireSingleBuffer) {
     std::lock_guard _lock{mMutex};
-    mNextTransaction = t;
+    mSyncTransaction = t;
+    mAcquireSingleBuffer = mSyncTransaction ? acquireSingleBuffer : true;
 }
 
 bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
diff --git a/libs/gui/CpuConsumer.cpp b/libs/gui/CpuConsumer.cpp
index 8edf604..a626970 100644
--- a/libs/gui/CpuConsumer.cpp
+++ b/libs/gui/CpuConsumer.cpp
@@ -71,6 +71,7 @@
         case HAL_PIXEL_FORMAT_Y8:
         case HAL_PIXEL_FORMAT_Y16:
         case HAL_PIXEL_FORMAT_RAW16:
+        case HAL_PIXEL_FORMAT_RAW12:
         case HAL_PIXEL_FORMAT_RAW10:
         case HAL_PIXEL_FORMAT_RAW_OPAQUE:
         case HAL_PIXEL_FORMAT_BLOB:
diff --git a/libs/gui/DisplayEventDispatcher.cpp b/libs/gui/DisplayEventDispatcher.cpp
index c986b82..8379675 100644
--- a/libs/gui/DisplayEventDispatcher.cpp
+++ b/libs/gui/DisplayEventDispatcher.cpp
@@ -33,10 +33,13 @@
 // using just a few large reads.
 static const size_t EVENT_BUFFER_SIZE = 100;
 
+static constexpr nsecs_t WAITING_FOR_VSYNC_TIMEOUT = ms2ns(300);
+
 DisplayEventDispatcher::DisplayEventDispatcher(
         const sp<Looper>& looper, ISurfaceComposer::VsyncSource vsyncSource,
         ISurfaceComposer::EventRegistrationFlags eventRegistration)
-      : mLooper(looper), mReceiver(vsyncSource, eventRegistration), mWaitingForVsync(false) {
+      : mLooper(looper), mReceiver(vsyncSource, eventRegistration), mWaitingForVsync(false),
+        mLastVsyncCount(0), mLastScheduleVsyncTime(0) {
     ALOGV("dispatcher %p ~ Initializing display event dispatcher.", this);
 }
 
@@ -86,6 +89,7 @@
         }
 
         mWaitingForVsync = true;
+        mLastScheduleVsyncTime = systemTime(SYSTEM_TIME_MONOTONIC);
     }
     return OK;
 }
@@ -124,9 +128,21 @@
               this, ns2ms(vsyncTimestamp), to_string(vsyncDisplayId).c_str(), vsyncCount,
               vsyncEventData.id);
         mWaitingForVsync = false;
+        mLastVsyncCount = vsyncCount;
         dispatchVsync(vsyncTimestamp, vsyncDisplayId, vsyncCount, vsyncEventData);
     }
 
+    if (mWaitingForVsync) {
+        const nsecs_t currentTime = systemTime(SYSTEM_TIME_MONOTONIC);
+        const nsecs_t vsyncScheduleDelay = currentTime - mLastScheduleVsyncTime;
+        if (vsyncScheduleDelay > WAITING_FOR_VSYNC_TIMEOUT) {
+            ALOGW("Vsync time out! vsyncScheduleDelay=%" PRId64 "ms", ns2ms(vsyncScheduleDelay));
+            mWaitingForVsync = false;
+            dispatchVsync(currentTime, vsyncDisplayId /* displayId is not used */,
+                          ++mLastVsyncCount, vsyncEventData /* empty data */);
+        }
+    }
+
     return 1; // keep the callback
 }
 
diff --git a/libs/gui/SurfaceComposerClient.cpp b/libs/gui/SurfaceComposerClient.cpp
index 2713be0..b139cf1 100644
--- a/libs/gui/SurfaceComposerClient.cpp
+++ b/libs/gui/SurfaceComposerClient.cpp
@@ -413,6 +413,14 @@
     return callback;
 }
 
+void TransactionCompletedListener::removeReleaseBufferCallback(
+        const ReleaseCallbackId& callbackId) {
+    {
+        std::scoped_lock<std::mutex> lock(mMutex);
+        popReleaseBufferCallbackLocked(callbackId);
+    }
+}
+
 // ---------------------------------------------------------------------------
 
 void removeDeadBufferCallback(void* /*context*/, uint64_t graphicBufferId);
@@ -1307,6 +1315,28 @@
     return *this;
 }
 
+std::optional<BufferData> SurfaceComposerClient::Transaction::getAndClearBuffer(
+        const sp<SurfaceControl>& sc) {
+    layer_state_t* s = getLayerState(sc);
+    if (!s) {
+        return std::nullopt;
+    }
+    if (!(s->what & layer_state_t::eBufferChanged)) {
+        return std::nullopt;
+    }
+
+    BufferData bufferData = s->bufferData;
+
+    TransactionCompletedListener::getInstance()->removeReleaseBufferCallback(
+            bufferData.releaseCallbackId);
+    BufferData emptyBufferData;
+    s->what &= ~layer_state_t::eBufferChanged;
+    s->bufferData = emptyBufferData;
+
+    mContainsBuffer = false;
+    return bufferData;
+}
+
 SurfaceComposerClient::Transaction& SurfaceComposerClient::Transaction::setBuffer(
         const sp<SurfaceControl>& sc, const sp<GraphicBuffer>& buffer,
         const std::optional<sp<Fence>>& fence, const std::optional<uint64_t>& frameNumber,
diff --git a/libs/gui/include/gui/BLASTBufferQueue.h b/libs/gui/include/gui/BLASTBufferQueue.h
index 3881620..d766173 100644
--- a/libs/gui/include/gui/BLASTBufferQueue.h
+++ b/libs/gui/include/gui/BLASTBufferQueue.h
@@ -88,13 +88,13 @@
     void onFrameDequeued(const uint64_t) override;
     void onFrameCancelled(const uint64_t) override;
 
-    virtual void transactionCommittedCallback(nsecs_t latchTime, const sp<Fence>& presentFence,
-                                              const std::vector<SurfaceControlStats>& stats);
-    void transactionCallback(nsecs_t latchTime, const sp<Fence>& presentFence,
-            const std::vector<SurfaceControlStats>& stats);
+    void transactionCommittedCallback(nsecs_t latchTime, const sp<Fence>& presentFence,
+                                      const std::vector<SurfaceControlStats>& stats);
+    virtual void transactionCallback(nsecs_t latchTime, const sp<Fence>& presentFence,
+                                     const std::vector<SurfaceControlStats>& stats);
     void releaseBufferCallback(const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
                                std::optional<uint32_t> currentMaxAcquiredBufferCount);
-    void setNextTransaction(SurfaceComposerClient::Transaction *t);
+    void setSyncTransaction(SurfaceComposerClient::Transaction* t, bool acquireSingleBuffer = true);
     void mergeWithNextTransaction(SurfaceComposerClient::Transaction* t, uint64_t frameNumber);
     void applyPendingTransactions(uint64_t frameNumber);
 
@@ -132,6 +132,9 @@
 
     void flushShadowQueue() REQUIRES(mMutex);
     void acquireAndReleaseBuffer() REQUIRES(mMutex);
+    void releaseBuffer(const ReleaseCallbackId& callbackId, const sp<Fence>& releaseFence)
+            REQUIRES(mMutex);
+    void flushAndWaitForFreeBuffer(std::unique_lock<std::mutex>& lock);
 
     std::string mName;
     // Represents the queued buffer count from buffer queue,
@@ -208,7 +211,7 @@
     sp<IGraphicBufferProducer> mProducer;
     sp<BLASTBufferItemConsumer> mBufferItemConsumer;
 
-    SurfaceComposerClient::Transaction* mNextTransaction GUARDED_BY(mMutex);
+    SurfaceComposerClient::Transaction* mSyncTransaction GUARDED_BY(mMutex);
     std::vector<std::tuple<uint64_t /* framenumber */, SurfaceComposerClient::Transaction>>
             mPendingTransactions GUARDED_BY(mMutex);
 
@@ -239,7 +242,11 @@
     std::queue<sp<SurfaceControl>> mSurfaceControlsWithPendingCallback GUARDED_BY(mMutex);
 
     uint32_t mCurrentMaxAcquiredBufferCount;
-    bool mWaitForTransactionCallback = false;
+    bool mWaitForTransactionCallback GUARDED_BY(mMutex) = false;
+
+    // Flag to determine if syncTransaction should only acquire a single buffer and then clear or
+    // continue to acquire buffers until explicitly cleared
+    bool mAcquireSingleBuffer GUARDED_BY(mMutex) = true;
 };
 
 } // namespace android
diff --git a/libs/gui/include/gui/DisplayEventDispatcher.h b/libs/gui/include/gui/DisplayEventDispatcher.h
index 92c89b8..8a3a476 100644
--- a/libs/gui/include/gui/DisplayEventDispatcher.h
+++ b/libs/gui/include/gui/DisplayEventDispatcher.h
@@ -80,6 +80,8 @@
     sp<Looper> mLooper;
     DisplayEventReceiver mReceiver;
     bool mWaitingForVsync;
+    uint32_t mLastVsyncCount;
+    nsecs_t mLastScheduleVsyncTime;
 
     std::vector<FrameRateOverride> mFrameRateOverrides;
 
diff --git a/libs/gui/include/gui/SurfaceComposerClient.h b/libs/gui/include/gui/SurfaceComposerClient.h
index e62c76e..e05c364 100644
--- a/libs/gui/include/gui/SurfaceComposerClient.h
+++ b/libs/gui/include/gui/SurfaceComposerClient.h
@@ -493,6 +493,7 @@
                                const std::optional<uint64_t>& frameNumber = std::nullopt,
                                const ReleaseCallbackId& id = ReleaseCallbackId::INVALID_ID,
                                ReleaseBufferCallback callback = nullptr);
+        std::optional<BufferData> getAndClearBuffer(const sp<SurfaceControl>& sc);
         Transaction& setDataspace(const sp<SurfaceControl>& sc, ui::Dataspace dataspace);
         Transaction& setHdrMetadata(const sp<SurfaceControl>& sc, const HdrMetadata& hdrMetadata);
         Transaction& setSurfaceDamageRegion(const sp<SurfaceControl>& sc,
@@ -751,6 +752,8 @@
     void onReleaseBuffer(ReleaseCallbackId, sp<Fence> releaseFence,
                          uint32_t currentMaxAcquiredBufferCount) override;
 
+    void removeReleaseBufferCallback(const ReleaseCallbackId& callbackId);
+
     // For Testing Only
     static void setInstance(const sp<TransactionCompletedListener>&);
 
diff --git a/libs/gui/tests/BLASTBufferQueue_test.cpp b/libs/gui/tests/BLASTBufferQueue_test.cpp
index b2d5048..48b8621 100644
--- a/libs/gui/tests/BLASTBufferQueue_test.cpp
+++ b/libs/gui/tests/BLASTBufferQueue_test.cpp
@@ -72,31 +72,30 @@
                          int height, int32_t format)
           : BLASTBufferQueue(name, surface, width, height, format) {}
 
-    void transactionCommittedCallback(nsecs_t latchTime, const sp<Fence>& presentFence,
-                                      const std::vector<SurfaceControlStats>& stats) override {
-        BLASTBufferQueue::transactionCommittedCallback(latchTime, presentFence, stats);
-
+    void transactionCallback(nsecs_t latchTime, const sp<Fence>& presentFence,
+                             const std::vector<SurfaceControlStats>& stats) override {
+        BLASTBufferQueue::transactionCallback(latchTime, presentFence, stats);
         uint64_t frameNumber = stats[0].frameEventStats.frameNumber;
 
         {
             std::unique_lock lock{frameNumberMutex};
-            mLastTransactionCommittedFrameNumber = frameNumber;
-            mCommittedCV.notify_all();
+            mLastTransactionFrameNumber = frameNumber;
+            mWaitForCallbackCV.notify_all();
         }
     }
 
     void waitForCallback(int64_t frameNumber) {
         std::unique_lock lock{frameNumberMutex};
         // Wait until all but one of the submitted buffers have been released.
-        while (mLastTransactionCommittedFrameNumber < frameNumber) {
-            mCommittedCV.wait(lock);
+        while (mLastTransactionFrameNumber < frameNumber) {
+            mWaitForCallbackCV.wait(lock);
         }
     }
 
 private:
     std::mutex frameNumberMutex;
-    std::condition_variable mCommittedCV;
-    int64_t mLastTransactionCommittedFrameNumber = -1;
+    std::condition_variable mWaitForCallbackCV;
+    int64_t mLastTransactionFrameNumber = -1;
 };
 
 class BLASTBufferQueueHelper {
@@ -110,15 +109,15 @@
         mBlastBufferQueueAdapter->update(sc, width, height, PIXEL_FORMAT_RGBA_8888);
     }
 
-    void setNextTransaction(Transaction* next) {
-        mBlastBufferQueueAdapter->setNextTransaction(next);
+    void setSyncTransaction(Transaction* next, bool acquireSingleBuffer = true) {
+        mBlastBufferQueueAdapter->setSyncTransaction(next, acquireSingleBuffer);
     }
 
     int getWidth() { return mBlastBufferQueueAdapter->mSize.width; }
 
     int getHeight() { return mBlastBufferQueueAdapter->mSize.height; }
 
-    Transaction* getNextTransaction() { return mBlastBufferQueueAdapter->mNextTransaction; }
+    Transaction* getSyncTransaction() { return mBlastBufferQueueAdapter->mSyncTransaction; }
 
     sp<IGraphicBufferProducer> getIGraphicBufferProducer() {
         return mBlastBufferQueueAdapter->getIGraphicBufferProducer();
@@ -144,6 +143,11 @@
         mBlastBufferQueueAdapter->waitForCallback(frameNumber);
     }
 
+    void validateNumFramesSubmitted(int64_t numFramesSubmitted) {
+        std::unique_lock lock{mBlastBufferQueueAdapter->mMutex};
+        ASSERT_EQ(numFramesSubmitted, mBlastBufferQueueAdapter->mSubmitted.size());
+    }
+
 private:
     sp<TestBLASTBufferQueue> mBlastBufferQueueAdapter;
 };
@@ -299,7 +303,7 @@
         auto ret = igbp->dequeueBuffer(&slot, &fence, mDisplayWidth, mDisplayHeight,
                                        PIXEL_FORMAT_RGBA_8888, GRALLOC_USAGE_SW_WRITE_OFTEN,
                                        nullptr, nullptr);
-        ASSERT_EQ(IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION, ret);
+        ASSERT_TRUE(ret == IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION || ret == NO_ERROR);
         ASSERT_EQ(OK, igbp->requestBuffer(slot, &buf));
 
         uint32_t* bufData;
@@ -338,7 +342,7 @@
     ASSERT_EQ(mSurfaceControl, adapter.getSurfaceControl());
     ASSERT_EQ(mDisplayWidth, adapter.getWidth());
     ASSERT_EQ(mDisplayHeight, adapter.getHeight());
-    ASSERT_EQ(nullptr, adapter.getNextTransaction());
+    ASSERT_EQ(nullptr, adapter.getSyncTransaction());
 }
 
 TEST_F(BLASTBufferQueueTest, Update) {
@@ -359,11 +363,11 @@
     ASSERT_EQ(mDisplayHeight / 2, height);
 }
 
-TEST_F(BLASTBufferQueueTest, SetNextTransaction) {
+TEST_F(BLASTBufferQueueTest, SetSyncTransaction) {
     BLASTBufferQueueHelper adapter(mSurfaceControl, mDisplayWidth, mDisplayHeight);
-    Transaction next;
-    adapter.setNextTransaction(&next);
-    ASSERT_EQ(&next, adapter.getNextTransaction());
+    Transaction sync;
+    adapter.setSyncTransaction(&sync);
+    ASSERT_EQ(&sync, adapter.getSyncTransaction());
 }
 
 TEST_F(BLASTBufferQueueTest, DISABLED_onFrameAvailable_ApplyDesiredPresentTime) {
@@ -802,8 +806,8 @@
     sp<IGraphicBufferProducer> igbProducer;
     setUpProducer(adapter, igbProducer);
 
-    Transaction next;
-    adapter.setNextTransaction(&next);
+    Transaction sync;
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, 0, 255, 0, 0);
 
     // queue non sync buffer, so this one should get blocked
@@ -812,14 +816,14 @@
     queueBuffer(igbProducer, r, g, b, presentTimeDelay);
 
     CallbackHelper transactionCallback;
-    next.addTransactionCompletedCallback(transactionCallback.function,
+    sync.addTransactionCompletedCallback(transactionCallback.function,
                                          transactionCallback.getContext())
             .apply();
 
     CallbackData callbackData;
     transactionCallback.getCallbackData(&callbackData);
 
-    // capture screen and verify that it is red
+    // capture screen and verify that it is green
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
     ASSERT_NO_FATAL_FAILURE(
             checkScreenCapture(0, 255, 0, {0, 0, (int32_t)mDisplayWidth, (int32_t)mDisplayHeight}));
@@ -842,16 +846,16 @@
 
     Transaction mainTransaction;
 
-    Transaction next;
-    adapter.setNextTransaction(&next);
+    Transaction sync;
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, 0, 255, 0, 0);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
 
-    adapter.setNextTransaction(&next);
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, r, g, b, 0);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
     // Expect 1 buffer to be released even before sending to SurfaceFlinger
     mProducerListener->waitOnNumberReleased(1);
 
@@ -882,24 +886,24 @@
 
     Transaction mainTransaction;
 
-    Transaction next;
+    Transaction sync;
     // queue a sync transaction
-    adapter.setNextTransaction(&next);
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, 0, 255, 0, 0);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
 
-    // queue another buffer without setting next transaction
+    // queue another buffer without setting sync transaction
     queueBuffer(igbProducer, 0, 0, 255, 0);
 
     // queue another sync transaction
-    adapter.setNextTransaction(&next);
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, r, g, b, 0);
     // Expect 1 buffer to be released because the non sync transaction should merge
     // with the sync
     mProducerListener->waitOnNumberReleased(1);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
     // Expect 2 buffers to be released due to merging the two syncs.
     mProducerListener->waitOnNumberReleased(2);
 
@@ -930,26 +934,26 @@
 
     Transaction mainTransaction;
 
-    Transaction next;
+    Transaction sync;
     // queue a sync transaction
-    adapter.setNextTransaction(&next);
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, 0, 255, 0, 0);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
 
-    // queue a few buffers without setting next transaction
+    // queue a few buffers without setting sync transaction
     queueBuffer(igbProducer, 0, 0, 255, 0);
     queueBuffer(igbProducer, 0, 0, 255, 0);
     queueBuffer(igbProducer, 0, 0, 255, 0);
 
     // queue another sync transaction
-    adapter.setNextTransaction(&next);
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, r, g, b, 0);
     // Expect 3 buffers to be released because the non sync transactions should merge
     // with the sync
     mProducerListener->waitOnNumberReleased(3);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
     // Expect 4 buffers to be released due to merging the two syncs.
     mProducerListener->waitOnNumberReleased(4);
 
@@ -987,14 +991,14 @@
     // Send a buffer to SF
     queueBuffer(igbProducer, 0, 255, 0, 0);
 
-    Transaction next;
+    Transaction sync;
     // queue a sync transaction
-    adapter.setNextTransaction(&next);
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, 0, 255, 0, 0);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
 
-    // queue a few buffers without setting next transaction
+    // queue a few buffers without setting sync transaction
     queueBuffer(igbProducer, 0, 0, 255, 0);
     queueBuffer(igbProducer, 0, 0, 255, 0);
     queueBuffer(igbProducer, 0, 0, 255, 0);
@@ -1003,13 +1007,13 @@
     mainTransaction.apply();
 
     // queue another sync transaction
-    adapter.setNextTransaction(&next);
+    adapter.setSyncTransaction(&sync);
     queueBuffer(igbProducer, r, g, b, 0);
     // Expect 2 buffers to be released because the non sync transactions should merge
     // with the sync
     mProducerListener->waitOnNumberReleased(3);
 
-    mainTransaction.merge(std::move(next));
+    mainTransaction.merge(std::move(sync));
 
     CallbackHelper transactionCallback;
     mainTransaction
@@ -1026,6 +1030,45 @@
             checkScreenCapture(r, g, b, {0, 0, (int32_t)mDisplayWidth, (int32_t)mDisplayHeight}));
 }
 
+TEST_F(BLASTBufferQueueTest, SetSyncTransactionAcquireMultipleBuffers) {
+    BLASTBufferQueueHelper adapter(mSurfaceControl, mDisplayWidth, mDisplayHeight);
+
+    sp<IGraphicBufferProducer> igbProducer;
+    setUpProducer(adapter, igbProducer);
+
+    Transaction next;
+    adapter.setSyncTransaction(&next, false);
+    queueBuffer(igbProducer, 0, 255, 0, 0);
+    queueBuffer(igbProducer, 0, 0, 255, 0);
+    // There should only be one frame submitted since the first frame will be released.
+    adapter.validateNumFramesSubmitted(1);
+    adapter.setSyncTransaction(nullptr);
+
+    // queue non sync buffer, so this one should get blocked
+    // Add a present delay to allow the first screenshot to get taken.
+    nsecs_t presentTimeDelay = std::chrono::nanoseconds(500ms).count();
+    queueBuffer(igbProducer, 255, 0, 0, presentTimeDelay);
+
+    CallbackHelper transactionCallback;
+    next.addTransactionCompletedCallback(transactionCallback.function,
+                                         transactionCallback.getContext())
+            .apply();
+
+    CallbackData callbackData;
+    transactionCallback.getCallbackData(&callbackData);
+
+    // capture screen and verify that it is blue
+    ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
+    ASSERT_NO_FATAL_FAILURE(
+            checkScreenCapture(0, 0, 255, {0, 0, (int32_t)mDisplayWidth, (int32_t)mDisplayHeight}));
+
+    mProducerListener->waitOnNumberReleased(2);
+    // capture screen and verify that it is red
+    ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
+    ASSERT_NO_FATAL_FAILURE(
+            checkScreenCapture(255, 0, 0, {0, 0, (int32_t)mDisplayWidth, (int32_t)mDisplayHeight}));
+}
+
 class TestProducerListener : public BnProducerListener {
 public:
     sp<IGraphicBufferProducer> mIgbp;
diff --git a/libs/renderengine/RenderEngine.cpp b/libs/renderengine/RenderEngine.cpp
index a9ea690..65d4895 100644
--- a/libs/renderengine/RenderEngine.cpp
+++ b/libs/renderengine/RenderEngine.cpp
@@ -26,55 +26,39 @@
 namespace android {
 namespace renderengine {
 
-std::unique_ptr<RenderEngine> RenderEngine::create(const RenderEngineCreationArgs& args) {
-    RenderEngineType renderEngineType = args.renderEngineType;
-
+std::unique_ptr<RenderEngine> RenderEngine::create(RenderEngineCreationArgs args) {
     // Keep the ability to override by PROPERTIES:
     char prop[PROPERTY_VALUE_MAX];
     property_get(PROPERTY_DEBUG_RENDERENGINE_BACKEND, prop, "");
     if (strcmp(prop, "gles") == 0) {
-        renderEngineType = RenderEngineType::GLES;
+        args.renderEngineType = RenderEngineType::GLES;
     }
     if (strcmp(prop, "threaded") == 0) {
-        renderEngineType = RenderEngineType::THREADED;
+        args.renderEngineType = RenderEngineType::THREADED;
     }
     if (strcmp(prop, "skiagl") == 0) {
-        renderEngineType = RenderEngineType::SKIA_GL;
+        args.renderEngineType = RenderEngineType::SKIA_GL;
     }
     if (strcmp(prop, "skiaglthreaded") == 0) {
-        renderEngineType = RenderEngineType::SKIA_GL_THREADED;
+        args.renderEngineType = RenderEngineType::SKIA_GL_THREADED;
     }
 
-    switch (renderEngineType) {
+    switch (args.renderEngineType) {
         case RenderEngineType::THREADED:
             ALOGD("Threaded RenderEngine with GLES Backend");
             return renderengine::threaded::RenderEngineThreaded::create(
                     [args]() { return android::renderengine::gl::GLESRenderEngine::create(args); },
-                    renderEngineType);
+                    args.renderEngineType);
         case RenderEngineType::SKIA_GL:
             ALOGD("RenderEngine with SkiaGL Backend");
             return renderengine::skia::SkiaGLRenderEngine::create(args);
         case RenderEngineType::SKIA_GL_THREADED: {
-            // These need to be recreated, since they are a constant reference, and we need to
-            // let SkiaRE know that it's running as threaded, and all GL operation will happen on
-            // the same thread.
-            RenderEngineCreationArgs skiaArgs =
-                    RenderEngineCreationArgs::Builder()
-                            .setPixelFormat(args.pixelFormat)
-                            .setImageCacheSize(args.imageCacheSize)
-                            .setUseColorManagerment(args.useColorManagement)
-                            .setEnableProtectedContext(args.enableProtectedContext)
-                            .setPrecacheToneMapperShaderOnly(args.precacheToneMapperShaderOnly)
-                            .setSupportsBackgroundBlur(args.supportsBackgroundBlur)
-                            .setContextPriority(args.contextPriority)
-                            .setRenderEngineType(renderEngineType)
-                            .build();
             ALOGD("Threaded RenderEngine with SkiaGL Backend");
             return renderengine::threaded::RenderEngineThreaded::create(
-                    [skiaArgs]() {
-                        return android::renderengine::skia::SkiaGLRenderEngine::create(skiaArgs);
+                    [args]() {
+                        return android::renderengine::skia::SkiaGLRenderEngine::create(args);
                     },
-                    renderEngineType);
+                    args.renderEngineType);
         }
         case RenderEngineType::GLES:
         default:
diff --git a/libs/renderengine/include/renderengine/RenderEngine.h b/libs/renderengine/include/renderengine/RenderEngine.h
index b9cc648..6b85c57 100644
--- a/libs/renderengine/include/renderengine/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/RenderEngine.h
@@ -99,7 +99,7 @@
         SKIA_GL_THREADED = 4,
     };
 
-    static std::unique_ptr<RenderEngine> create(const RenderEngineCreationArgs& args);
+    static std::unique_ptr<RenderEngine> create(RenderEngineCreationArgs args);
 
     virtual ~RenderEngine() = 0;
 
diff --git a/services/sensorservice/SensorDevice.cpp b/services/sensorservice/SensorDevice.cpp
index db1a1cc..b118d8f 100644
--- a/services/sensorservice/SensorDevice.cpp
+++ b/services/sensorservice/SensorDevice.cpp
@@ -23,31 +23,31 @@
 
 #include <android-base/logging.h>
 #include <android/util/ProtoOutputStream.h>
+#include <cutils/atomic.h>
 #include <frameworks/base/core/proto/android/service/sensor_service.proto.h>
 #include <sensors/convert.h>
-#include <cutils/atomic.h>
 #include <utils/Errors.h>
 #include <utils/Singleton.h>
 
-#include <cstddef>
 #include <chrono>
 #include <cinttypes>
+#include <cstddef>
 #include <thread>
 
 using namespace android::hardware::sensors;
 using namespace android::hardware::sensors::V1_0;
 using namespace android::hardware::sensors::V1_0::implementation;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
 using android::hardware::sensors::V2_0::EventQueueFlagBits;
 using android::hardware::sensors::V2_0::WakeLockQueueFlagBits;
 using android::hardware::sensors::V2_1::ISensorsCallback;
-using android::hardware::sensors::V2_1::implementation::convertToOldSensorInfo;
-using android::hardware::sensors::V2_1::implementation::convertToNewSensorInfos;
 using android::hardware::sensors::V2_1::implementation::convertToNewEvents;
+using android::hardware::sensors::V2_1::implementation::convertToNewSensorInfos;
+using android::hardware::sensors::V2_1::implementation::convertToOldSensorInfo;
 using android::hardware::sensors::V2_1::implementation::ISensorsWrapperV1_0;
 using android::hardware::sensors::V2_1::implementation::ISensorsWrapperV2_0;
 using android::hardware::sensors::V2_1::implementation::ISensorsWrapperV2_1;
-using android::hardware::hidl_vec;
-using android::hardware::Return;
 using android::SensorDeviceUtils::HidlServiceRegistrationWaiter;
 using android::util::ProtoOutputStream;
 
@@ -73,7 +73,7 @@
     }
 }
 
-template<typename EnumType>
+template <typename EnumType>
 constexpr typename std::underlying_type<EnumType>::type asBaseType(EnumType value) {
     return static_cast<typename std::underlying_type<EnumType>::type>(value);
 }
@@ -81,14 +81,13 @@
 // Used internally by the framework to wake the Event FMQ. These values must start after
 // the last value of EventQueueFlagBits
 enum EventQueueFlagBitsInternal : uint32_t {
-    INTERNAL_WAKE =  1 << 16,
+    INTERNAL_WAKE = 1 << 16,
 };
 
-}  // anonymous namespace
+} // anonymous namespace
 
 void SensorsHalDeathReceivier::serviceDied(
-        uint64_t /* cookie */,
-        const wp<::android::hidl::base::V1_0::IBase>& /* service */) {
+        uint64_t /* cookie */, const wp<::android::hidl::base::V1_0::IBase>& /* service */) {
     ALOGW("Sensors HAL died, attempting to reconnect.");
     SensorDevice::getInstance().prepareForReconnect();
 }
@@ -98,29 +97,29 @@
     using SensorInfo = ::android::hardware::sensors::V2_1::SensorInfo;
 
     Return<void> onDynamicSensorsConnected_2_1(
-            const hidl_vec<SensorInfo> &dynamicSensorsAdded) override {
+            const hidl_vec<SensorInfo>& dynamicSensorsAdded) override {
         return SensorDevice::getInstance().onDynamicSensorsConnected(dynamicSensorsAdded);
     }
 
     Return<void> onDynamicSensorsConnected(
-            const hidl_vec<V1_0::SensorInfo> &dynamicSensorsAdded) override {
+            const hidl_vec<V1_0::SensorInfo>& dynamicSensorsAdded) override {
         return SensorDevice::getInstance().onDynamicSensorsConnected(
                 convertToNewSensorInfos(dynamicSensorsAdded));
     }
 
     Return<void> onDynamicSensorsDisconnected(
-            const hidl_vec<int32_t> &dynamicSensorHandlesRemoved) override {
+            const hidl_vec<int32_t>& dynamicSensorHandlesRemoved) override {
         return SensorDevice::getInstance().onDynamicSensorsDisconnected(
                 dynamicSensorHandlesRemoved);
     }
 };
 
 SensorDevice::SensorDevice()
-        : mHidlTransportErrors(20),
-          mRestartWaiter(new HidlServiceRegistrationWaiter()),
-          mEventQueueFlag(nullptr),
-          mWakeLockQueueFlag(nullptr),
-          mReconnecting(false) {
+      : mHidlTransportErrors(20),
+        mRestartWaiter(new HidlServiceRegistrationWaiter()),
+        mEventQueueFlag(nullptr),
+        mWakeLockQueueFlag(nullptr),
+        mReconnecting(false) {
     if (!connectHidlService()) {
         return;
     }
@@ -132,61 +131,59 @@
 }
 
 void SensorDevice::initializeSensorList() {
-    checkReturn(mSensors->getSensorsList(
-            [&](const auto &list) {
-                const size_t count = list.size();
+    checkReturn(mSensors->getSensorsList([&](const auto& list) {
+        const size_t count = list.size();
 
-                mActivationCount.setCapacity(count);
-                Info model;
-                for (size_t i=0 ; i < count; i++) {
-                    sensor_t sensor;
-                    convertToSensor(convertToOldSensorInfo(list[i]), &sensor);
+        mActivationCount.setCapacity(count);
+        Info model;
+        for (size_t i = 0; i < count; i++) {
+            sensor_t sensor;
+            convertToSensor(convertToOldSensorInfo(list[i]), &sensor);
 
-                    if (sensor.type < static_cast<int>(SensorType::DEVICE_PRIVATE_BASE)) {
-                        sensor.resolution = SensorDeviceUtils::resolutionForSensor(sensor);
+            if (sensor.type < static_cast<int>(SensorType::DEVICE_PRIVATE_BASE)) {
+                sensor.resolution = SensorDeviceUtils::resolutionForSensor(sensor);
 
-                        // Some sensors don't have a default resolution and will be left at 0.
-                        // Don't crash in this case since CTS will verify that devices don't go to
-                        // production with a resolution of 0.
-                        if (sensor.resolution != 0) {
-                            float quantizedRange = sensor.maxRange;
-                            SensorDeviceUtils::quantizeValue(
-                                    &quantizedRange, sensor.resolution, /*factor=*/ 1);
-                            // Only rewrite maxRange if the requantization produced a "significant"
-                            // change, which is fairly arbitrarily defined as resolution / 8.
-                            // Smaller deltas are permitted, as they may simply be due to floating
-                            // point representation error, etc.
-                            if (fabsf(sensor.maxRange - quantizedRange) > sensor.resolution / 8) {
-                                ALOGW("%s's max range %.12f is not a multiple of the resolution "
-                                      "%.12f - updated to %.12f", sensor.name, sensor.maxRange,
-                                      sensor.resolution, quantizedRange);
-                                sensor.maxRange = quantizedRange;
-                            }
-                        } else {
-                            // Don't crash here or the device will go into a crashloop.
-                            ALOGW("%s should have a non-zero resolution", sensor.name);
-                        }
+                // Some sensors don't have a default resolution and will be left at 0.
+                // Don't crash in this case since CTS will verify that devices don't go to
+                // production with a resolution of 0.
+                if (sensor.resolution != 0) {
+                    float quantizedRange = sensor.maxRange;
+                    SensorDeviceUtils::quantizeValue(&quantizedRange, sensor.resolution,
+                                                     /*factor=*/1);
+                    // Only rewrite maxRange if the requantization produced a "significant"
+                    // change, which is fairly arbitrarily defined as resolution / 8.
+                    // Smaller deltas are permitted, as they may simply be due to floating
+                    // point representation error, etc.
+                    if (fabsf(sensor.maxRange - quantizedRange) > sensor.resolution / 8) {
+                        ALOGW("%s's max range %.12f is not a multiple of the resolution "
+                              "%.12f - updated to %.12f",
+                              sensor.name, sensor.maxRange, sensor.resolution, quantizedRange);
+                        sensor.maxRange = quantizedRange;
                     }
-
-                    // Check and clamp power if it is 0 (or close)
-                    constexpr float MIN_POWER_MA = 0.001; // 1 microAmp
-                    if (sensor.power < MIN_POWER_MA) {
-                        ALOGI("%s's reported power %f invalid, clamped to %f",
-                              sensor.name, sensor.power, MIN_POWER_MA);
-                        sensor.power = MIN_POWER_MA;
-                    }
-                    mSensorList.push_back(sensor);
-
-                    mActivationCount.add(list[i].sensorHandle, model);
-
-                    // Only disable all sensors on HAL 1.0 since HAL 2.0
-                    // handles this in its initialize method
-                    if (!mSensors->supportsMessageQueues()) {
-                        checkReturn(mSensors->activate(list[i].sensorHandle,
-                                    0 /* enabled */));
-                    }
+                } else {
+                    // Don't crash here or the device will go into a crashloop.
+                    ALOGW("%s should have a non-zero resolution", sensor.name);
                 }
-            }));
+            }
+
+            // Check and clamp power if it is 0 (or close)
+            constexpr float MIN_POWER_MA = 0.001; // 1 microAmp
+            if (sensor.power < MIN_POWER_MA) {
+                ALOGI("%s's reported power %f invalid, clamped to %f", sensor.name, sensor.power,
+                      MIN_POWER_MA);
+                sensor.power = MIN_POWER_MA;
+            }
+            mSensorList.push_back(sensor);
+
+            mActivationCount.add(list[i].sensorHandle, model);
+
+            // Only disable all sensors on HAL 1.0 since HAL 2.0
+            // handles this in its initialize method
+            if (!mSensors->supportsMessageQueues()) {
+                checkReturn(mSensors->activate(list[i].sensorHandle, 0 /* enabled */));
+            }
+        }
+    }));
 }
 
 SensorDevice::~SensorDevice() {
@@ -231,7 +228,7 @@
         // Poke ISensor service. If it has lingering connection from previous generation of
         // system server, it will kill itself. There is no intention to handle the poll result,
         // which will be done since the size is 0.
-        if(mSensors->poll(0, [](auto, const auto &, const auto &) {}).isOk()) {
+        if (mSensors->poll(0, [](auto, const auto&, const auto&) {}).isOk()) {
             // ok to continue
             connectionStatus = HalConnectionStatus::CONNECTED;
             break;
@@ -278,23 +275,22 @@
 SensorDevice::HalConnectionStatus SensorDevice::initializeHidlServiceV2_X() {
     HalConnectionStatus connectionStatus = HalConnectionStatus::UNKNOWN;
 
-    mWakeLockQueue = std::make_unique<WakeLockQueue>(
-            SensorEventQueue::MAX_RECEIVE_BUFFER_EVENT_COUNT,
-            true /* configureEventFlagWord */);
+    mWakeLockQueue =
+            std::make_unique<WakeLockQueue>(SensorEventQueue::MAX_RECEIVE_BUFFER_EVENT_COUNT,
+                                            true /* configureEventFlagWord */);
 
     hardware::EventFlag::deleteEventFlag(&mEventQueueFlag);
-    hardware::EventFlag::createEventFlag(mSensors->getEventQueue()->getEventFlagWord(), &mEventQueueFlag);
+    hardware::EventFlag::createEventFlag(mSensors->getEventQueue()->getEventFlagWord(),
+                                         &mEventQueueFlag);
 
     hardware::EventFlag::deleteEventFlag(&mWakeLockQueueFlag);
-    hardware::EventFlag::createEventFlag(mWakeLockQueue->getEventFlagWord(),
-                                            &mWakeLockQueueFlag);
+    hardware::EventFlag::createEventFlag(mWakeLockQueue->getEventFlagWord(), &mWakeLockQueueFlag);
 
-    CHECK(mSensors != nullptr && mWakeLockQueue != nullptr &&
-            mEventQueueFlag != nullptr && mWakeLockQueueFlag != nullptr);
+    CHECK(mSensors != nullptr && mWakeLockQueue != nullptr && mEventQueueFlag != nullptr &&
+          mWakeLockQueueFlag != nullptr);
 
-    status_t status = checkReturnAndGetStatus(mSensors->initialize(
-            *mWakeLockQueue->getDesc(),
-            new SensorsCallback()));
+    status_t status = checkReturnAndGetStatus(
+            mSensors->initialize(*mWakeLockQueue->getDesc(), new SensorsCallback()));
 
     if (status != NO_ERROR) {
         connectionStatus = HalConnectionStatus::FAILED_TO_CONNECT;
@@ -326,7 +322,7 @@
     mActivationCount.clear();
     mSensorList.clear();
 
-    if (connectHidlServiceV2_0() == HalConnectionStatus::CONNECTED) {
+    if (connectHidlService()) {
         initializeSensorList();
 
         if (sensorHandlesChanged(previousSensorList, mSensorList)) {
@@ -338,8 +334,8 @@
     mReconnecting = false;
 }
 
-bool SensorDevice::sensorHandlesChanged(const Vector<sensor_t>& oldSensorList,
-                                        const Vector<sensor_t>& newSensorList) {
+bool SensorDevice::sensorHandlesChanged(const std::vector<sensor_t>& oldSensorList,
+                                        const std::vector<sensor_t>& newSensorList) {
     bool didChange = false;
 
     if (oldSensorList.size() != newSensorList.size()) {
@@ -375,19 +371,17 @@
 bool SensorDevice::sensorIsEquivalent(const sensor_t& prevSensor, const sensor_t& newSensor) {
     bool equivalent = true;
     if (prevSensor.handle != newSensor.handle ||
-            (strcmp(prevSensor.vendor, newSensor.vendor) != 0) ||
-            (strcmp(prevSensor.stringType, newSensor.stringType) != 0) ||
-            (strcmp(prevSensor.requiredPermission, newSensor.requiredPermission) != 0) ||
-            (prevSensor.version != newSensor.version) ||
-            (prevSensor.type != newSensor.type) ||
-            (std::abs(prevSensor.maxRange - newSensor.maxRange) > 0.001f) ||
-            (std::abs(prevSensor.resolution - newSensor.resolution) > 0.001f) ||
-            (std::abs(prevSensor.power - newSensor.power) > 0.001f) ||
-            (prevSensor.minDelay != newSensor.minDelay) ||
-            (prevSensor.fifoReservedEventCount != newSensor.fifoReservedEventCount) ||
-            (prevSensor.fifoMaxEventCount != newSensor.fifoMaxEventCount) ||
-            (prevSensor.maxDelay != newSensor.maxDelay) ||
-            (prevSensor.flags != newSensor.flags)) {
+        (strcmp(prevSensor.vendor, newSensor.vendor) != 0) ||
+        (strcmp(prevSensor.stringType, newSensor.stringType) != 0) ||
+        (strcmp(prevSensor.requiredPermission, newSensor.requiredPermission) != 0) ||
+        (prevSensor.version != newSensor.version) || (prevSensor.type != newSensor.type) ||
+        (std::abs(prevSensor.maxRange - newSensor.maxRange) > 0.001f) ||
+        (std::abs(prevSensor.resolution - newSensor.resolution) > 0.001f) ||
+        (std::abs(prevSensor.power - newSensor.power) > 0.001f) ||
+        (prevSensor.minDelay != newSensor.minDelay) ||
+        (prevSensor.fifoReservedEventCount != newSensor.fifoReservedEventCount) ||
+        (prevSensor.fifoMaxEventCount != newSensor.fifoMaxEventCount) ||
+        (prevSensor.maxDelay != newSensor.maxDelay) || (prevSensor.flags != newSensor.flags)) {
         equivalent = false;
     }
     return equivalent;
@@ -405,7 +399,7 @@
         for (size_t j = 0; j < info.batchParams.size(); j++) {
             const BatchParams& batchParams = info.batchParams[j];
             status_t res = batchLocked(info.batchParams.keyAt(j), handle, 0 /* flags */,
-                    batchParams.mTSample, batchParams.mTBatch);
+                                       batchParams.mTSample, batchParams.mTBatch);
 
             if (res == NO_ERROR) {
                 activateLocked(info.batchParams.keyAt(j), handle, true /* enabled */);
@@ -433,7 +427,7 @@
                         mSensorList.size(), mActivationCount.size(), mDisabledClients.size());
 
     Mutex::Autolock _l(mLock);
-    for (const auto & s : mSensorList) {
+    for (const auto& s : mSensorList) {
         int32_t handle = s.handle;
         const Info& info = mActivationCount.valueFor(handle);
         if (info.numActiveClients() == 0) continue;
@@ -444,8 +438,9 @@
         for (size_t j = 0; j < info.batchParams.size(); j++) {
             const BatchParams& params = info.batchParams[j];
             result.appendFormat("%.1f%s%s", params.mTSample / 1e6f,
-                isClientDisabledLocked(info.batchParams.keyAt(j)) ? "(disabled)" : "",
-                (j < info.batchParams.size() - 1) ? ", " : "");
+                                isClientDisabledLocked(info.batchParams.keyAt(j)) ? "(disabled)"
+                                                                                  : "",
+                                (j < info.batchParams.size() - 1) ? ", " : "");
         }
         result.appendFormat("}, selected = %.2f ms; ", info.bestBatchParams.mTSample / 1e6f);
 
@@ -453,8 +448,9 @@
         for (size_t j = 0; j < info.batchParams.size(); j++) {
             const BatchParams& params = info.batchParams[j];
             result.appendFormat("%.1f%s%s", params.mTBatch / 1e6f,
-                    isClientDisabledLocked(info.batchParams.keyAt(j)) ? "(disabled)" : "",
-                    (j < info.batchParams.size() - 1) ? ", " : "");
+                                isClientDisabledLocked(info.batchParams.keyAt(j)) ? "(disabled)"
+                                                                                  : "",
+                                (j < info.batchParams.size() - 1) ? ", " : "");
         }
         result.appendFormat("}, selected = %.2f ms\n", info.bestBatchParams.mTBatch / 1e6f);
     }
@@ -472,29 +468,29 @@
 void SensorDevice::dump(ProtoOutputStream* proto) const {
     using namespace service::SensorDeviceProto;
     if (mSensors == nullptr) {
-        proto->write(INITIALIZED , false);
+        proto->write(INITIALIZED, false);
         return;
     }
-    proto->write(INITIALIZED , true);
-    proto->write(TOTAL_SENSORS , int(mSensorList.size()));
-    proto->write(ACTIVE_SENSORS , int(mActivationCount.size()));
+    proto->write(INITIALIZED, true);
+    proto->write(TOTAL_SENSORS, int(mSensorList.size()));
+    proto->write(ACTIVE_SENSORS, int(mActivationCount.size()));
 
     Mutex::Autolock _l(mLock);
-    for (const auto & s : mSensorList) {
+    for (const auto& s : mSensorList) {
         int32_t handle = s.handle;
         const Info& info = mActivationCount.valueFor(handle);
         if (info.numActiveClients() == 0) continue;
 
         uint64_t token = proto->start(SENSORS);
-        proto->write(SensorProto::HANDLE , handle);
-        proto->write(SensorProto::ACTIVE_COUNT , int(info.batchParams.size()));
+        proto->write(SensorProto::HANDLE, handle);
+        proto->write(SensorProto::ACTIVE_COUNT, int(info.batchParams.size()));
         for (size_t j = 0; j < info.batchParams.size(); j++) {
             const BatchParams& params = info.batchParams[j];
-            proto->write(SensorProto::SAMPLING_PERIOD_MS , params.mTSample / 1e6f);
-            proto->write(SensorProto::BATCHING_PERIOD_MS , params.mTBatch / 1e6f);
+            proto->write(SensorProto::SAMPLING_PERIOD_MS, params.mTSample / 1e6f);
+            proto->write(SensorProto::BATCHING_PERIOD_MS, params.mTBatch / 1e6f);
         }
-        proto->write(SensorProto::SAMPLING_PERIOD_SELECTED , info.bestBatchParams.mTSample / 1e6f);
-        proto->write(SensorProto::BATCHING_PERIOD_SELECTED , info.bestBatchParams.mTBatch / 1e6f);
+        proto->write(SensorProto::SAMPLING_PERIOD_SELECTED, info.bestBatchParams.mTSample / 1e6f);
+        proto->write(SensorProto::BATCHING_PERIOD_SELECTED, info.bestBatchParams.mTBatch / 1e6f);
         proto->end(token);
     }
 }
@@ -531,20 +527,19 @@
 
     do {
         auto ret = mSensors->poll(
-                count,
-                [&](auto result,
-                    const auto &events,
-                    const auto &dynamicSensorsAdded) {
+                count, [&](auto result, const auto& events, const auto& dynamicSensorsAdded) {
                     if (result == Result::OK) {
                         convertToSensorEventsAndQuantize(convertToNewEvents(events),
-                                convertToNewSensorInfos(dynamicSensorsAdded), buffer);
+                                                         convertToNewSensorInfos(
+                                                                 dynamicSensorsAdded),
+                                                         buffer);
                         err = (ssize_t)events.size();
                     } else {
                         err = statusFromResult(result);
                     }
                 });
 
-        if (ret.isOk())  {
+        if (ret.isOk()) {
             hidlTransportError = false;
         } else {
             hidlTransportError = true;
@@ -559,7 +554,7 @@
         }
     } while (hidlTransportError);
 
-    if(numHidlTransportErrors > 0) {
+    if (numHidlTransportErrors > 0) {
         ALOGE("Saw %d Hidl transport failures", numHidlTransportErrors);
         HidlTransportErrorLog errLog(time(nullptr), numHidlTransportErrors);
         mHidlTransportErrors.add(errLog);
@@ -581,7 +576,8 @@
         // events is not available, then read() would return no events, possibly introducing
         // additional latency in delivering events to applications.
         mEventQueueFlag->wait(asBaseType(EventQueueFlagBits::READ_AND_PROCESS) |
-                              asBaseType(INTERNAL_WAKE), &eventFlagState);
+                                      asBaseType(INTERNAL_WAKE),
+                              &eventFlagState);
         availableEvents = mSensors->getEventQueue()->availableToRead();
 
         if ((eventFlagState & asBaseType(INTERNAL_WAKE)) && mReconnecting) {
@@ -600,12 +596,13 @@
             for (size_t i = 0; i < eventsToRead; i++) {
                 convertToSensorEvent(mEventBuffer[i], &buffer[i]);
                 android::SensorDeviceUtils::quantizeSensorEventValues(&buffer[i],
-                        getResolutionForSensor(buffer[i].sensor));
+                                                                      getResolutionForSensor(
+                                                                              buffer[i].sensor));
             }
             eventsRead = eventsToRead;
         } else {
-            ALOGW("Failed to read %zu events, currently %zu events available",
-                    eventsToRead, availableEvents);
+            ALOGW("Failed to read %zu events, currently %zu events available", eventsToRead,
+                  availableEvents);
         }
     }
 
@@ -613,22 +610,21 @@
 }
 
 Return<void> SensorDevice::onDynamicSensorsConnected(
-        const hidl_vec<SensorInfo> &dynamicSensorsAdded) {
+        const hidl_vec<SensorInfo>& dynamicSensorsAdded) {
     std::unique_lock<std::mutex> lock(mDynamicSensorsMutex);
 
     // Allocate a sensor_t structure for each dynamic sensor added and insert
     // it into the dictionary of connected dynamic sensors keyed by handle.
     for (size_t i = 0; i < dynamicSensorsAdded.size(); ++i) {
-        const SensorInfo &info = dynamicSensorsAdded[i];
+        const SensorInfo& info = dynamicSensorsAdded[i];
 
         auto it = mConnectedDynamicSensors.find(info.sensorHandle);
         CHECK(it == mConnectedDynamicSensors.end());
 
-        sensor_t *sensor = new sensor_t();
+        sensor_t* sensor = new sensor_t();
         convertToSensor(convertToOldSensorInfo(info), sensor);
 
-        mConnectedDynamicSensors.insert(
-                std::make_pair(sensor->handle, sensor));
+        mConnectedDynamicSensors.insert(std::make_pair(sensor->handle, sensor));
     }
 
     mDynamicSensorsCv.notify_all();
@@ -637,8 +633,8 @@
 }
 
 Return<void> SensorDevice::onDynamicSensorsDisconnected(
-        const hidl_vec<int32_t> &dynamicSensorHandlesRemoved) {
-    (void) dynamicSensorHandlesRemoved;
+        const hidl_vec<int32_t>& dynamicSensorHandlesRemoved) {
+    (void)dynamicSensorHandlesRemoved;
     // TODO: Currently dynamic sensors do not seem to be removed
     return Return<void>();
 }
@@ -653,7 +649,7 @@
     }
 }
 
-void SensorDevice::autoDisable(void *ident, int handle) {
+void SensorDevice::autoDisable(void* ident, int handle) {
     Mutex::Autolock _l(mLock);
     ssize_t activationIndex = mActivationCount.indexOfKey(handle);
     if (activationIndex < 0) {
@@ -687,15 +683,15 @@
     Info& info(mActivationCount.editValueAt(activationIndex));
 
     ALOGD_IF(DEBUG_CONNECTIONS,
-             "SensorDevice::activate: ident=%p, handle=0x%08x, enabled=%d, count=%zu",
-             ident, handle, enabled, info.batchParams.size());
+             "SensorDevice::activate: ident=%p, handle=0x%08x, enabled=%d, count=%zu", ident,
+             handle, enabled, info.batchParams.size());
 
     if (enabled) {
         ALOGD_IF(DEBUG_CONNECTIONS, "enable index=%zd", info.batchParams.indexOfKey(ident));
 
         if (isClientDisabledLocked(ident)) {
-            ALOGW("SensorDevice::activate, isClientDisabledLocked(%p):true, handle:%d",
-                    ident, handle);
+            ALOGW("SensorDevice::activate, isClientDisabledLocked(%p):true, handle:%d", ident,
+                  handle);
             return NO_ERROR;
         }
 
@@ -726,11 +722,10 @@
                 // Call batch for this sensor with the previously calculated best effort
                 // batch_rate and timeout. One of the apps has unregistered for sensor
                 // events, and the best effort batch parameters might have changed.
-                ALOGD_IF(DEBUG_CONNECTIONS,
-                         "\t>>> actuating h/w batch 0x%08x %" PRId64 " %" PRId64, handle,
-                         info.bestBatchParams.mTSample, info.bestBatchParams.mTBatch);
-                checkReturn(mSensors->batch(
-                        handle, info.bestBatchParams.mTSample, info.bestBatchParams.mTBatch));
+                ALOGD_IF(DEBUG_CONNECTIONS, "\t>>> actuating h/w batch 0x%08x %" PRId64 " %" PRId64,
+                         handle, info.bestBatchParams.mTSample, info.bestBatchParams.mTBatch);
+                checkReturn(mSensors->batch(handle, info.bestBatchParams.mTSample,
+                                            info.bestBatchParams.mTBatch));
             }
         } else {
             // sensor wasn't enabled for this ident
@@ -767,12 +762,8 @@
     return err;
 }
 
-status_t SensorDevice::batch(
-        void* ident,
-        int handle,
-        int flags,
-        int64_t samplingPeriodNs,
-        int64_t maxBatchReportLatencyNs) {
+status_t SensorDevice::batch(void* ident, int handle, int flags, int64_t samplingPeriodNs,
+                             int64_t maxBatchReportLatencyNs) {
     if (mSensors == nullptr) return NO_INIT;
 
     if (samplingPeriodNs < MINIMUM_EVENTS_PERIOD) {
@@ -783,7 +774,8 @@
     }
 
     ALOGD_IF(DEBUG_CONNECTIONS,
-             "SensorDevice::batch: ident=%p, handle=0x%08x, flags=%d, period_ns=%" PRId64 " timeout=%" PRId64,
+             "SensorDevice::batch: ident=%p, handle=0x%08x, flags=%d, period_ns=%" PRId64
+             " timeout=%" PRId64,
              ident, handle, flags, samplingPeriodNs, maxBatchReportLatencyNs);
 
     Mutex::Autolock _l(mLock);
@@ -807,25 +799,24 @@
         info.setBatchParamsForIdent(ident, flags, samplingPeriodNs, maxBatchReportLatencyNs);
     }
 
-    status_t err =  updateBatchParamsLocked(handle, info);
+    status_t err = updateBatchParamsLocked(handle, info);
     if (err != NO_ERROR) {
-        ALOGE("sensor batch failed %p 0x%08x %" PRId64 " %" PRId64 " err=%s",
-              mSensors.get(), handle, info.bestBatchParams.mTSample,
-              info.bestBatchParams.mTBatch, strerror(-err));
+        ALOGE("sensor batch failed %p 0x%08x %" PRId64 " %" PRId64 " err=%s", mSensors.get(),
+              handle, info.bestBatchParams.mTSample, info.bestBatchParams.mTBatch, strerror(-err));
         info.removeBatchParamsForIdent(ident);
     }
 
     return err;
 }
 
-status_t SensorDevice::updateBatchParamsLocked(int handle, Info &info) {
+status_t SensorDevice::updateBatchParamsLocked(int handle, Info& info) {
     BatchParams prevBestBatchParams = info.bestBatchParams;
     // Find the minimum of all timeouts and batch_rates for this sensor.
     info.selectBatchParams();
 
     ALOGD_IF(DEBUG_CONNECTIONS,
-             "\t>>> curr_period=%" PRId64 " min_period=%" PRId64
-             " curr_timeout=%" PRId64 " min_timeout=%" PRId64,
+             "\t>>> curr_period=%" PRId64 " min_period=%" PRId64 " curr_timeout=%" PRId64
+             " min_timeout=%" PRId64,
              prevBestBatchParams.mTSample, info.bestBatchParams.mTSample,
              prevBestBatchParams.mTBatch, info.bestBatchParams.mTBatch);
 
@@ -834,8 +825,8 @@
     if (prevBestBatchParams != info.bestBatchParams && info.numActiveClients() > 0) {
         ALOGD_IF(DEBUG_CONNECTIONS, "\t>>> actuating h/w BATCH 0x%08x %" PRId64 " %" PRId64, handle,
                  info.bestBatchParams.mTSample, info.bestBatchParams.mTBatch);
-        err = checkReturnAndGetStatus(mSensors->batch(
-                handle, info.bestBatchParams.mTSample, info.bestBatchParams.mTBatch));
+        err = checkReturnAndGetStatus(mSensors->batch(handle, info.bestBatchParams.mTSample,
+                                                      info.bestBatchParams.mTBatch));
     }
 
     return err;
@@ -866,8 +857,8 @@
     return mDisabledClients.count(ident) > 0;
 }
 
-std::vector<void *> SensorDevice::getDisabledClientsLocked() const {
-    std::vector<void *> vec;
+std::vector<void*> SensorDevice::getDisabledClientsLocked() const {
+    std::vector<void*> vec;
     for (const auto& it : mDisabledClients) {
         vec.push_back(it.first);
     }
@@ -896,7 +887,7 @@
         addDisabledReasonForIdentLocked(ident, DisabledReason::DISABLED_REASON_UID_IDLE);
     }
 
-    for (size_t i = 0; i< mActivationCount.size(); ++i) {
+    for (size_t i = 0; i < mActivationCount.size(); ++i) {
         int handle = mActivationCount.keyAt(i);
         Info& info = mActivationCount.editValueAt(i);
 
@@ -905,8 +896,7 @@
             bool disable = info.numActiveClients() == 0 && info.isActive;
             bool enable = info.numActiveClients() > 0 && !info.isActive;
 
-            if ((enable || disable) &&
-                doActivateHardwareLocked(handle, enable) == NO_ERROR) {
+            if ((enable || disable) && doActivateHardwareLocked(handle, enable) == NO_ERROR) {
                 info.isActive = enable;
             }
         }
@@ -941,22 +931,21 @@
     if (mSensors == nullptr) return;
     Mutex::Autolock _l(mLock);
 
-    for (void *client : getDisabledClientsLocked()) {
-        removeDisabledReasonForIdentLocked(
-            client, DisabledReason::DISABLED_REASON_SERVICE_RESTRICTED);
+    for (void* client : getDisabledClientsLocked()) {
+        removeDisabledReasonForIdentLocked(client,
+                                           DisabledReason::DISABLED_REASON_SERVICE_RESTRICTED);
     }
 
-    for (size_t i = 0; i< mActivationCount.size(); ++i) {
+    for (size_t i = 0; i < mActivationCount.size(); ++i) {
         Info& info = mActivationCount.editValueAt(i);
         if (info.batchParams.isEmpty()) continue;
         info.selectBatchParams();
         const int sensor_handle = mActivationCount.keyAt(i);
         ALOGD_IF(DEBUG_CONNECTIONS, "\t>> reenable actuating h/w sensor enable handle=%d ",
-                   sensor_handle);
-        status_t err = checkReturnAndGetStatus(mSensors->batch(
-                sensor_handle,
-                info.bestBatchParams.mTSample,
-                info.bestBatchParams.mTBatch));
+                 sensor_handle);
+        status_t err = checkReturnAndGetStatus(mSensors->batch(sensor_handle,
+                                                               info.bestBatchParams.mTSample,
+                                                               info.bestBatchParams.mTBatch));
         ALOGE_IF(err, "Error calling batch on sensor %d (%s)", sensor_handle, strerror(-err));
 
         if (err == NO_ERROR) {
@@ -973,38 +962,36 @@
 void SensorDevice::disableAllSensors() {
     if (mSensors == nullptr) return;
     Mutex::Autolock _l(mLock);
-    for (size_t i = 0; i< mActivationCount.size(); ++i) {
+    for (size_t i = 0; i < mActivationCount.size(); ++i) {
         Info& info = mActivationCount.editValueAt(i);
         // Check if this sensor has been activated previously and disable it.
         if (info.batchParams.size() > 0) {
-           const int sensor_handle = mActivationCount.keyAt(i);
-           ALOGD_IF(DEBUG_CONNECTIONS, "\t>> actuating h/w sensor disable handle=%d ",
-                   sensor_handle);
-           checkReturn(mSensors->activate(sensor_handle, 0 /* enabled */));
+            const int sensor_handle = mActivationCount.keyAt(i);
+            ALOGD_IF(DEBUG_CONNECTIONS, "\t>> actuating h/w sensor disable handle=%d ",
+                     sensor_handle);
+            checkReturn(mSensors->activate(sensor_handle, 0 /* enabled */));
 
-           // Add all the connections that were registered for this sensor to the disabled
-           // clients list.
-           for (size_t j = 0; j < info.batchParams.size(); ++j) {
-               addDisabledReasonForIdentLocked(
-                   info.batchParams.keyAt(j), DisabledReason::DISABLED_REASON_SERVICE_RESTRICTED);
-               ALOGI("added %p to mDisabledClients", info.batchParams.keyAt(j));
-           }
+            // Add all the connections that were registered for this sensor to the disabled
+            // clients list.
+            for (size_t j = 0; j < info.batchParams.size(); ++j) {
+                addDisabledReasonForIdentLocked(info.batchParams.keyAt(j),
+                                                DisabledReason::DISABLED_REASON_SERVICE_RESTRICTED);
+                ALOGI("added %p to mDisabledClients", info.batchParams.keyAt(j));
+            }
 
-           info.isActive = false;
+            info.isActive = false;
         }
     }
 }
 
-status_t SensorDevice::injectSensorData(
-        const sensors_event_t *injected_sensor_event) {
+status_t SensorDevice::injectSensorData(const sensors_event_t* injected_sensor_event) {
     if (mSensors == nullptr) return NO_INIT;
     ALOGD_IF(DEBUG_CONNECTIONS,
-            "sensor_event handle=%d ts=%" PRId64 " data=%.2f, %.2f, %.2f %.2f %.2f %.2f",
-            injected_sensor_event->sensor,
-            injected_sensor_event->timestamp, injected_sensor_event->data[0],
-            injected_sensor_event->data[1], injected_sensor_event->data[2],
-            injected_sensor_event->data[3], injected_sensor_event->data[4],
-            injected_sensor_event->data[5]);
+             "sensor_event handle=%d ts=%" PRId64 " data=%.2f, %.2f, %.2f %.2f %.2f %.2f",
+             injected_sensor_event->sensor, injected_sensor_event->timestamp,
+             injected_sensor_event->data[0], injected_sensor_event->data[1],
+             injected_sensor_event->data[2], injected_sensor_event->data[3],
+             injected_sensor_event->data[4], injected_sensor_event->data[5]);
 
     Event ev;
     V2_1::implementation::convertFromSensorEvent(*injected_sensor_event, &ev);
@@ -1014,8 +1001,8 @@
 
 status_t SensorDevice::setMode(uint32_t mode) {
     if (mSensors == nullptr) return NO_INIT;
-    return checkReturnAndGetStatus(mSensors->setOperationMode(
-            static_cast<hardware::sensors::V1_0::OperationMode>(mode)));
+    return checkReturnAndGetStatus(
+            mSensors->setOperationMode(static_cast<hardware::sensors::V1_0::OperationMode>(mode)));
 }
 
 int32_t SensorDevice::registerDirectChannel(const sensors_direct_mem_t* memory) {
@@ -1041,21 +1028,20 @@
     format = SharedMemFormat::SENSORS_EVENT;
 
     SharedMemInfo mem = {
-        .type = type,
-        .format = format,
-        .size = static_cast<uint32_t>(memory->size),
-        .memoryHandle = memory->handle,
+            .type = type,
+            .format = format,
+            .size = static_cast<uint32_t>(memory->size),
+            .memoryHandle = memory->handle,
     };
 
     int32_t ret;
-    checkReturn(mSensors->registerDirectChannel(mem,
-            [&ret](auto result, auto channelHandle) {
-                if (result == Result::OK) {
-                    ret = channelHandle;
-                } else {
-                    ret = statusFromResult(result);
-                }
-            }));
+    checkReturn(mSensors->registerDirectChannel(mem, [&ret](auto result, auto channelHandle) {
+        if (result == Result::OK) {
+            ret = channelHandle;
+        } else {
+            ret = statusFromResult(result);
+        }
+    }));
     return ret;
 }
 
@@ -1065,13 +1051,13 @@
     checkReturn(mSensors->unregisterDirectChannel(channelHandle));
 }
 
-int32_t SensorDevice::configureDirectChannel(int32_t sensorHandle,
-        int32_t channelHandle, const struct sensors_direct_cfg_t *config) {
+int32_t SensorDevice::configureDirectChannel(int32_t sensorHandle, int32_t channelHandle,
+                                             const struct sensors_direct_cfg_t* config) {
     if (mSensors == nullptr) return NO_INIT;
     Mutex::Autolock _l(mLock);
 
     RateLevel rate;
-    switch(config->rate_level) {
+    switch (config->rate_level) {
         case SENSOR_DIRECT_RATE_STOP:
             rate = RateLevel::STOP;
             break;
@@ -1090,17 +1076,17 @@
 
     int32_t ret;
     checkReturn(mSensors->configDirectReport(sensorHandle, channelHandle, rate,
-            [&ret, rate] (auto result, auto token) {
-                if (rate == RateLevel::STOP) {
-                    ret = statusFromResult(result);
-                } else {
-                    if (result == Result::OK) {
-                        ret = token;
-                    } else {
-                        ret = statusFromResult(result);
-                    }
-                }
-            }));
+                                             [&ret, rate](auto result, auto token) {
+                                                 if (rate == RateLevel::STOP) {
+                                                     ret = statusFromResult(result);
+                                                 } else {
+                                                     if (result == Result::OK) {
+                                                         ret = token;
+                                                     } else {
+                                                         ret = statusFromResult(result);
+                                                     }
+                                                 }
+                                             }));
 
     return ret;
 }
@@ -1118,13 +1104,12 @@
     return num;
 }
 
-status_t SensorDevice::Info::setBatchParamsForIdent(void* ident, int,
-                                                    int64_t samplingPeriodNs,
+status_t SensorDevice::Info::setBatchParamsForIdent(void* ident, int, int64_t samplingPeriodNs,
                                                     int64_t maxBatchReportLatencyNs) {
     ssize_t index = batchParams.indexOfKey(ident);
     if (index < 0) {
-        ALOGE("Info::setBatchParamsForIdent(ident=%p, period_ns=%" PRId64
-              " timeout=%" PRId64 ") failed (%s)",
+        ALOGE("Info::setBatchParamsForIdent(ident=%p, period_ns=%" PRId64 " timeout=%" PRId64
+              ") failed (%s)",
               ident, samplingPeriodNs, maxBatchReportLatencyNs, strerror(-index));
         return BAD_INDEX;
     }
@@ -1168,12 +1153,11 @@
     return mIsDirectReportSupported;
 }
 
-void SensorDevice::convertToSensorEvent(
-        const Event &src, sensors_event_t *dst) {
+void SensorDevice::convertToSensorEvent(const Event& src, sensors_event_t* dst) {
     V2_1::implementation::convertToSensorEvent(src, dst);
 
     if (src.sensorType == V2_1::SensorType::DYNAMIC_SENSOR_META) {
-        const DynamicSensorInfo &dyn = src.u.dynamic;
+        const DynamicSensorInfo& dyn = src.u.dynamic;
 
         dst->dynamic_sensor_meta.connected = dyn.connected;
         dst->dynamic_sensor_meta.handle = dyn.sensorHandle;
@@ -1184,10 +1168,9 @@
             // marks it as oneway.
             auto it = mConnectedDynamicSensors.find(dyn.sensorHandle);
             if (it == mConnectedDynamicSensors.end()) {
-                mDynamicSensorsCv.wait_for(lock, MAX_DYN_SENSOR_WAIT,
-                        [&, dyn]{
-                            return mConnectedDynamicSensors.find(dyn.sensorHandle)
-                                    != mConnectedDynamicSensors.end();
+                mDynamicSensorsCv.wait_for(lock, MAX_DYN_SENSOR_WAIT, [&, dyn] {
+                    return mConnectedDynamicSensors.find(dyn.sensorHandle) !=
+                            mConnectedDynamicSensors.end();
                 });
                 it = mConnectedDynamicSensors.find(dyn.sensorHandle);
                 CHECK(it != mConnectedDynamicSensors.end());
@@ -1195,18 +1178,15 @@
 
             dst->dynamic_sensor_meta.sensor = it->second;
 
-            memcpy(dst->dynamic_sensor_meta.uuid,
-                   dyn.uuid.data(),
+            memcpy(dst->dynamic_sensor_meta.uuid, dyn.uuid.data(),
                    sizeof(dst->dynamic_sensor_meta.uuid));
         }
     }
 }
 
-void SensorDevice::convertToSensorEventsAndQuantize(
-        const hidl_vec<Event> &src,
-        const hidl_vec<SensorInfo> &dynamicSensorsAdded,
-        sensors_event_t *dst) {
-
+void SensorDevice::convertToSensorEventsAndQuantize(const hidl_vec<Event>& src,
+                                                    const hidl_vec<SensorInfo>& dynamicSensorsAdded,
+                                                    sensors_event_t* dst) {
     if (dynamicSensorsAdded.size() > 0) {
         onDynamicSensorsConnected(dynamicSensorsAdded);
     }
@@ -1214,26 +1194,27 @@
     for (size_t i = 0; i < src.size(); ++i) {
         V2_1::implementation::convertToSensorEvent(src[i], &dst[i]);
         android::SensorDeviceUtils::quantizeSensorEventValues(&dst[i],
-                getResolutionForSensor(dst[i].sensor));
+                                                              getResolutionForSensor(
+                                                                      dst[i].sensor));
     }
 }
 
 float SensorDevice::getResolutionForSensor(int sensorHandle) {
     for (size_t i = 0; i < mSensorList.size(); i++) {
-      if (sensorHandle == mSensorList[i].handle) {
-        return mSensorList[i].resolution;
-      }
+        if (sensorHandle == mSensorList[i].handle) {
+            return mSensorList[i].resolution;
+        }
     }
 
     auto it = mConnectedDynamicSensors.find(sensorHandle);
     if (it != mConnectedDynamicSensors.end()) {
-      return it->second->resolution;
+        return it->second->resolution;
     }
 
     return 0;
 }
 
-void SensorDevice::handleHidlDeath(const std::string & detail) {
+void SensorDevice::handleHidlDeath(const std::string& detail) {
     if (!mSensors->supportsMessageQueues()) {
         // restart is the only option at present.
         LOG_ALWAYS_FATAL("Abort due to ISensors hidl service failure, detail: %s.", detail.c_str());
diff --git a/services/sensorservice/SensorDevice.h b/services/sensorservice/SensorDevice.h
index bc8d20f..2f24e5f 100644
--- a/services/sensorservice/SensorDevice.h
+++ b/services/sensorservice/SensorDevice.h
@@ -17,14 +17,14 @@
 #ifndef ANDROID_SENSOR_DEVICE_H
 #define ANDROID_SENSOR_DEVICE_H
 
+#include "ISensorsWrapper.h"
 #include "SensorDeviceUtils.h"
 #include "SensorService.h"
 #include "SensorServiceUtils.h"
-#include "ISensorsWrapper.h"
 
 #include <fmq/MessageQueue.h>
-#include <sensor/SensorEventQueue.h>
 #include <sensor/Sensor.h>
+#include <sensor/SensorEventQueue.h>
 #include <stdint.h>
 #include <sys/types.h>
 #include <utils/KeyedVector.h>
@@ -32,9 +32,10 @@
 #include <utils/String8.h>
 #include <utils/Timers.h>
 
+#include <algorithm> //std::max std::min
 #include <string>
 #include <unordered_map>
-#include <algorithm> //std::max std::min
+#include <vector>
 
 #include "RingBuffer.h"
 
@@ -48,12 +49,10 @@
                              const wp<::android::hidl::base::V1_0::IBase>& service) override;
 };
 
-class SensorDevice : public Singleton<SensorDevice>,
-                     public SensorServiceUtil::Dumpable {
+class SensorDevice : public Singleton<SensorDevice>, public SensorServiceUtil::Dumpable {
 public:
     class HidlTransportErrorLog {
-     public:
-
+    public:
         HidlTransportErrorLog() {
             mTs = 0;
             mCount = 0;
@@ -66,7 +65,7 @@
 
         String8 toString() const {
             String8 result;
-            struct tm *timeInfo = localtime(&mTs);
+            struct tm* timeInfo = localtime(&mTs);
             result.appendFormat("%02d:%02d:%02d :: %d", timeInfo->tm_hour, timeInfo->tm_min,
                                 timeInfo->tm_sec, mCount);
             return result;
@@ -74,7 +73,7 @@
 
     private:
         time_t mTs; // timestamp of the error
-        int mCount;   // number of transport errors observed
+        int mCount; // number of transport errors observed
     };
 
     ~SensorDevice();
@@ -99,29 +98,27 @@
     status_t setMode(uint32_t mode);
 
     bool isDirectReportSupported() const;
-    int32_t registerDirectChannel(const sensors_direct_mem_t *memory);
+    int32_t registerDirectChannel(const sensors_direct_mem_t* memory);
     void unregisterDirectChannel(int32_t channelHandle);
-    int32_t configureDirectChannel(int32_t sensorHandle,
-            int32_t channelHandle, const struct sensors_direct_cfg_t *config);
+    int32_t configureDirectChannel(int32_t sensorHandle, int32_t channelHandle,
+                                   const struct sensors_direct_cfg_t* config);
 
     void disableAllSensors();
     void enableAllSensors();
-    void autoDisable(void *ident, int handle);
+    void autoDisable(void* ident, int handle);
 
-    status_t injectSensorData(const sensors_event_t *event);
-    void notifyConnectionDestroyed(void *ident);
+    status_t injectSensorData(const sensors_event_t* event);
+    void notifyConnectionDestroyed(void* ident);
 
     using Result = ::android::hardware::sensors::V1_0::Result;
     hardware::Return<void> onDynamicSensorsConnected(
-            const hardware::hidl_vec<hardware::sensors::V2_1::SensorInfo> &dynamicSensorsAdded);
+            const hardware::hidl_vec<hardware::sensors::V2_1::SensorInfo>& dynamicSensorsAdded);
     hardware::Return<void> onDynamicSensorsDisconnected(
-            const hardware::hidl_vec<int32_t> &dynamicSensorHandlesRemoved);
+            const hardware::hidl_vec<int32_t>& dynamicSensorHandlesRemoved);
 
     void setUidStateForConnection(void* ident, SensorService::UidState state);
 
-    bool isReconnecting() const {
-        return mReconnecting;
-    }
+    bool isReconnecting() const { return mReconnecting; }
 
     bool isSensorActive(int handle) const;
 
@@ -132,11 +129,12 @@
     // Dumpable
     virtual std::string dump() const override;
     virtual void dump(util::ProtoOutputStream* proto) const override;
+
 private:
     friend class Singleton<SensorDevice>;
 
     sp<::android::hardware::sensors::V2_1::implementation::ISensorsWrapperBase> mSensors;
-    Vector<sensor_t> mSensorList;
+    std::vector<sensor_t> mSensorList;
     std::unordered_map<int32_t, sensor_t*> mConnectedDynamicSensors;
 
     // A bug in the Sensors HIDL spec which marks onDynamicSensorsConnected as oneway causes dynamic
@@ -147,26 +145,26 @@
     std::condition_variable mDynamicSensorsCv;
     static constexpr std::chrono::seconds MAX_DYN_SENSOR_WAIT{5};
 
-    static const nsecs_t MINIMUM_EVENTS_PERIOD =   1000000; // 1000 Hz
-    mutable Mutex mLock; // protect mActivationCount[].batchParams
+    static const nsecs_t MINIMUM_EVENTS_PERIOD = 1000000; // 1000 Hz
+    mutable Mutex mLock;                                  // protect mActivationCount[].batchParams
     // fixed-size array after construction
 
     // Struct to store all the parameters(samplingPeriod, maxBatchReportLatency and flags) from
     // batch call. For continous mode clients, maxBatchReportLatency is set to zero.
     struct BatchParams {
-      nsecs_t mTSample, mTBatch;
-      BatchParams() : mTSample(INT64_MAX), mTBatch(INT64_MAX) {}
-      BatchParams(nsecs_t tSample, nsecs_t tBatch): mTSample(tSample), mTBatch(tBatch) {}
-      bool operator != (const BatchParams& other) {
-          return !(mTSample == other.mTSample && mTBatch == other.mTBatch);
-      }
-      // Merge another parameter with this one. The updated mTSample will be the min of the two.
-      // The update mTBatch will be the min of original mTBatch and the apparent batch period
-      // of the other. the apparent batch is the maximum of mTBatch and mTSample,
-      void merge(const BatchParams &other) {
-          mTSample = std::min(mTSample, other.mTSample);
-          mTBatch = std::min(mTBatch, std::max(other.mTBatch, other.mTSample));
-      }
+        nsecs_t mTSample, mTBatch;
+        BatchParams() : mTSample(INT64_MAX), mTBatch(INT64_MAX) {}
+        BatchParams(nsecs_t tSample, nsecs_t tBatch) : mTSample(tSample), mTBatch(tBatch) {}
+        bool operator!=(const BatchParams& other) {
+            return !(mTSample == other.mTSample && mTBatch == other.mTBatch);
+        }
+        // Merge another parameter with this one. The updated mTSample will be the min of the two.
+        // The update mTBatch will be the min of original mTBatch and the apparent batch period
+        // of the other. the apparent batch is the maximum of mTBatch and mTSample,
+        void merge(const BatchParams& other) {
+            mTSample = std::min(mTSample, other.mTSample);
+            mTBatch = std::min(mTBatch, std::max(other.mTBatch, other.mTSample));
+        }
     };
 
     // Store batch parameters in the KeyedVector and the optimal batch_rate and timeout in
@@ -224,7 +222,7 @@
     static_assert(DisabledReason::DISABLED_REASON_MAX < sizeof(uint8_t) * CHAR_BIT);
 
     // Use this map to determine which client is activated or deactivated.
-    std::unordered_map<void *, uint8_t> mDisabledClients;
+    std::unordered_map<void*, uint8_t> mDisabledClients;
 
     void addDisabledReasonForIdentLocked(void* ident, DisabledReason reason);
     void removeDisabledReasonForIdentLocked(void* ident, DisabledReason reason);
@@ -233,13 +231,13 @@
     bool connectHidlService();
     void initializeSensorList();
     void reactivateSensors(const DefaultKeyedVector<int, Info>& previousActivations);
-    static bool sensorHandlesChanged(const Vector<sensor_t>& oldSensorList,
-                                     const Vector<sensor_t>& newSensorList);
+    static bool sensorHandlesChanged(const std::vector<sensor_t>& oldSensorList,
+                                     const std::vector<sensor_t>& newSensorList);
     static bool sensorIsEquivalent(const sensor_t& prevSensor, const sensor_t& newSensor);
 
     enum HalConnectionStatus {
-        CONNECTED, // Successfully connected to the HAL
-        DOES_NOT_EXIST, // Could not find the HAL
+        CONNECTED,         // Successfully connected to the HAL
+        DOES_NOT_EXIST,    // Could not find the HAL
         FAILED_TO_CONNECT, // Found the HAL but failed to connect/initialize
         UNKNOWN,
     };
@@ -257,32 +255,31 @@
     status_t updateBatchParamsLocked(int handle, Info& info);
     status_t doActivateHardwareLocked(int handle, bool enable);
 
-    void handleHidlDeath(const std::string &detail);
-    template<typename T>
+    void handleHidlDeath(const std::string& detail);
+    template <typename T>
     void checkReturn(const Return<T>& ret) {
         if (!ret.isOk()) {
             handleHidlDeath(ret.description());
         }
     }
     status_t checkReturnAndGetStatus(const Return<Result>& ret);
-    //TODO(b/67425500): remove waiter after bug is resolved.
+    // TODO(b/67425500): remove waiter after bug is resolved.
     sp<SensorDeviceUtils::HidlServiceRegistrationWaiter> mRestartWaiter;
 
     bool isClientDisabled(void* ident) const;
     bool isClientDisabledLocked(void* ident) const;
-    std::vector<void *> getDisabledClientsLocked() const;
+    std::vector<void*> getDisabledClientsLocked() const;
 
     bool clientHasNoAccessLocked(void* ident) const;
 
     using Event = hardware::sensors::V2_1::Event;
     using SensorInfo = hardware::sensors::V2_1::SensorInfo;
 
-    void convertToSensorEvent(const Event &src, sensors_event_t *dst);
+    void convertToSensorEvent(const Event& src, sensors_event_t* dst);
 
-    void convertToSensorEventsAndQuantize(
-            const hardware::hidl_vec<Event> &src,
-            const hardware::hidl_vec<SensorInfo> &dynamicSensorsAdded,
-            sensors_event_t *dst);
+    void convertToSensorEventsAndQuantize(const hardware::hidl_vec<Event>& src,
+                                          const hardware::hidl_vec<SensorInfo>& dynamicSensorsAdded,
+                                          sensors_event_t* dst);
 
     float getResolutionForSensor(int sensorHandle);
 
diff --git a/services/sensorservice/SensorService.cpp b/services/sensorservice/SensorService.cpp
index 32a0110..9bc7b8e 100644
--- a/services/sensorservice/SensorService.cpp
+++ b/services/sensorservice/SensorService.cpp
@@ -164,7 +164,6 @@
         sensor_t const* list;
         ssize_t count = dev.getSensorList(&list);
         if (count > 0) {
-            ssize_t orientationIndex = -1;
             bool hasGyro = false, hasAccel = false, hasMag = false;
             uint32_t virtualSensorsNeeds =
                     (1<<SENSOR_TYPE_GRAVITY) |
@@ -183,9 +182,6 @@
                     case SENSOR_TYPE_MAGNETIC_FIELD:
                         hasMag = true;
                         break;
-                    case SENSOR_TYPE_ORIENTATION:
-                        orientationIndex = i;
-                        break;
                     case SENSOR_TYPE_GYROSCOPE:
                     case SENSOR_TYPE_GYROSCOPE_UNCALIBRATED:
                         hasGyro = true;
@@ -201,6 +197,8 @@
                             virtualSensorsNeeds &= ~(1<<list[i].type);
                         }
                         break;
+                    default:
+                        break;
                 }
                 if (useThisSensor) {
                     if (list[i].type == SENSOR_TYPE_PROXIMITY) {
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index 6691575..fb42cc0 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -201,7 +201,7 @@
         "SurfaceFlinger.cpp",
         "SurfaceFlingerDefaultFactory.cpp",
         "SurfaceInterceptor.cpp",
-        "SurfaceTracing.cpp",
+        "Tracing/LayerTracing.cpp",
         "Tracing/TransactionProtoParser.cpp",
         "TransactionCallbackInvoker.cpp",
         "TunnelModeEnabledReporter.cpp",
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index e606e55..968a49d 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -2005,7 +2005,7 @@
     writeToProtoDrawingState(layerProto, traceFlags, display);
     writeToProtoCommonState(layerProto, LayerVector::StateSet::Drawing, traceFlags);
 
-    if (traceFlags & SurfaceTracing::TRACE_COMPOSITION) {
+    if (traceFlags & LayerTracing::TRACE_COMPOSITION) {
         // Only populate for the primary display.
         if (display) {
             const Hwc2::IComposerClient::Composition compositionType = getCompositionType(*display);
@@ -2023,43 +2023,38 @@
 void Layer::writeToProtoDrawingState(LayerProto* layerInfo, uint32_t traceFlags,
                                      const DisplayDevice* display) {
     const ui::Transform transform = getTransform();
+    auto buffer = getBuffer();
+    if (buffer != nullptr) {
+        LayerProtoHelper::writeToProto(buffer,
+                                       [&]() { return layerInfo->mutable_active_buffer(); });
+        LayerProtoHelper::writeToProtoDeprecated(ui::Transform(getBufferTransform()),
+                                                 layerInfo->mutable_buffer_transform());
+    }
+    layerInfo->set_invalidate(contentDirty);
+    layerInfo->set_is_protected(isProtected());
+    layerInfo->set_dataspace(dataspaceDetails(static_cast<android_dataspace>(getDataSpace())));
+    layerInfo->set_queued_frames(getQueuedFrameCount());
+    layerInfo->set_refresh_pending(isBufferLatched());
+    layerInfo->set_curr_frame(mCurrentFrameNumber);
+    layerInfo->set_effective_scaling_mode(getEffectiveScalingMode());
 
-    if (traceFlags & SurfaceTracing::TRACE_CRITICAL) {
+    layerInfo->set_requested_corner_radius(getDrawingState().cornerRadius);
+    layerInfo->set_corner_radius(getRoundedCornerState().radius);
+    layerInfo->set_background_blur_radius(getBackgroundBlurRadius());
+    layerInfo->set_is_trusted_overlay(isTrustedOverlay());
+    LayerProtoHelper::writeToProtoDeprecated(transform, layerInfo->mutable_transform());
+    LayerProtoHelper::writePositionToProto(transform.tx(), transform.ty(),
+                                           [&]() { return layerInfo->mutable_position(); });
+    LayerProtoHelper::writeToProto(mBounds, [&]() { return layerInfo->mutable_bounds(); });
+    if (traceFlags & LayerTracing::TRACE_COMPOSITION) {
+        LayerProtoHelper::writeToProto(getVisibleRegion(display),
+                                       [&]() { return layerInfo->mutable_visible_region(); });
+    }
+    LayerProtoHelper::writeToProto(surfaceDamageRegion,
+                                   [&]() { return layerInfo->mutable_damage_region(); });
 
-        auto buffer = getBuffer();
-        if (buffer != nullptr) {
-            LayerProtoHelper::writeToProto(buffer,
-                                           [&]() { return layerInfo->mutable_active_buffer(); });
-            LayerProtoHelper::writeToProtoDeprecated(ui::Transform(getBufferTransform()),
-                                                     layerInfo->mutable_buffer_transform());
-        }
-        layerInfo->set_invalidate(contentDirty);
-        layerInfo->set_is_protected(isProtected());
-        layerInfo->set_dataspace(dataspaceDetails(static_cast<android_dataspace>(getDataSpace())));
-        layerInfo->set_queued_frames(getQueuedFrameCount());
-        layerInfo->set_refresh_pending(isBufferLatched());
-        layerInfo->set_curr_frame(mCurrentFrameNumber);
-        layerInfo->set_effective_scaling_mode(getEffectiveScalingMode());
-
-        layerInfo->set_requested_corner_radius(getDrawingState().cornerRadius);
-        layerInfo->set_corner_radius(getRoundedCornerState().radius);
-        layerInfo->set_background_blur_radius(getBackgroundBlurRadius());
-        layerInfo->set_is_trusted_overlay(isTrustedOverlay());
-        LayerProtoHelper::writeToProtoDeprecated(transform, layerInfo->mutable_transform());
-        LayerProtoHelper::writePositionToProto(transform.tx(), transform.ty(),
-                                               [&]() { return layerInfo->mutable_position(); });
-        LayerProtoHelper::writeToProto(mBounds, [&]() { return layerInfo->mutable_bounds(); });
-        if (traceFlags & SurfaceTracing::TRACE_COMPOSITION) {
-            LayerProtoHelper::writeToProto(getVisibleRegion(display),
-                                           [&]() { return layerInfo->mutable_visible_region(); });
-        }
-        LayerProtoHelper::writeToProto(surfaceDamageRegion,
-                                       [&]() { return layerInfo->mutable_damage_region(); });
-
-        if (hasColorTransform()) {
-            LayerProtoHelper::writeToProto(getColorTransform(),
-                                           layerInfo->mutable_color_transform());
-        }
+    if (hasColorTransform()) {
+        LayerProtoHelper::writeToProto(getColorTransform(), layerInfo->mutable_color_transform());
     }
 
     LayerProtoHelper::writeToProto(mSourceBounds,
@@ -2079,70 +2074,66 @@
 
     ui::Transform requestedTransform = state.transform;
 
-    if (traceFlags & SurfaceTracing::TRACE_CRITICAL) {
-        layerInfo->set_id(sequence);
-        layerInfo->set_name(getName().c_str());
-        layerInfo->set_type(getType());
+    layerInfo->set_id(sequence);
+    layerInfo->set_name(getName().c_str());
+    layerInfo->set_type(getType());
 
-        for (const auto& child : children) {
-            layerInfo->add_children(child->sequence);
-        }
-
-        for (const wp<Layer>& weakRelative : state.zOrderRelatives) {
-            sp<Layer> strongRelative = weakRelative.promote();
-            if (strongRelative != nullptr) {
-                layerInfo->add_relatives(strongRelative->sequence);
-            }
-        }
-
-        LayerProtoHelper::writeToProto(state.activeTransparentRegion_legacy,
-                                       [&]() { return layerInfo->mutable_transparent_region(); });
-
-        layerInfo->set_layer_stack(getLayerStack().id);
-        layerInfo->set_z(state.z);
-
-        LayerProtoHelper::writePositionToProto(requestedTransform.tx(), requestedTransform.ty(),
-                                               [&]() {
-                                                   return layerInfo->mutable_requested_position();
-                                               });
-
-        LayerProtoHelper::writeSizeToProto(state.width, state.height,
-                                           [&]() { return layerInfo->mutable_size(); });
-
-        LayerProtoHelper::writeToProto(state.crop, [&]() { return layerInfo->mutable_crop(); });
-
-        layerInfo->set_is_opaque(isOpaque(state));
-
-
-        layerInfo->set_pixel_format(decodePixelFormat(getPixelFormat()));
-        LayerProtoHelper::writeToProto(getColor(), [&]() { return layerInfo->mutable_color(); });
-        LayerProtoHelper::writeToProto(state.color,
-                                       [&]() { return layerInfo->mutable_requested_color(); });
-        layerInfo->set_flags(state.flags);
-
-        LayerProtoHelper::writeToProtoDeprecated(requestedTransform,
-                                                 layerInfo->mutable_requested_transform());
-
-        auto parent = useDrawing ? mDrawingParent.promote() : mCurrentParent.promote();
-        if (parent != nullptr) {
-            layerInfo->set_parent(parent->sequence);
-        } else {
-            layerInfo->set_parent(-1);
-        }
-
-        auto zOrderRelativeOf = state.zOrderRelativeOf.promote();
-        if (zOrderRelativeOf != nullptr) {
-            layerInfo->set_z_order_relative_of(zOrderRelativeOf->sequence);
-        } else {
-            layerInfo->set_z_order_relative_of(-1);
-        }
-
-        layerInfo->set_is_relative_of(state.isRelativeOf);
-
-        layerInfo->set_owner_uid(mOwnerUid);
+    for (const auto& child : children) {
+        layerInfo->add_children(child->sequence);
     }
 
-    if (traceFlags & SurfaceTracing::TRACE_INPUT) {
+    for (const wp<Layer>& weakRelative : state.zOrderRelatives) {
+        sp<Layer> strongRelative = weakRelative.promote();
+        if (strongRelative != nullptr) {
+            layerInfo->add_relatives(strongRelative->sequence);
+        }
+    }
+
+    LayerProtoHelper::writeToProto(state.activeTransparentRegion_legacy,
+                                   [&]() { return layerInfo->mutable_transparent_region(); });
+
+    layerInfo->set_layer_stack(getLayerStack().id);
+    layerInfo->set_z(state.z);
+
+    LayerProtoHelper::writePositionToProto(requestedTransform.tx(), requestedTransform.ty(), [&]() {
+        return layerInfo->mutable_requested_position();
+    });
+
+    LayerProtoHelper::writeSizeToProto(state.width, state.height,
+                                       [&]() { return layerInfo->mutable_size(); });
+
+    LayerProtoHelper::writeToProto(state.crop, [&]() { return layerInfo->mutable_crop(); });
+
+    layerInfo->set_is_opaque(isOpaque(state));
+
+    layerInfo->set_pixel_format(decodePixelFormat(getPixelFormat()));
+    LayerProtoHelper::writeToProto(getColor(), [&]() { return layerInfo->mutable_color(); });
+    LayerProtoHelper::writeToProto(state.color,
+                                   [&]() { return layerInfo->mutable_requested_color(); });
+    layerInfo->set_flags(state.flags);
+
+    LayerProtoHelper::writeToProtoDeprecated(requestedTransform,
+                                             layerInfo->mutable_requested_transform());
+
+    auto parent = useDrawing ? mDrawingParent.promote() : mCurrentParent.promote();
+    if (parent != nullptr) {
+        layerInfo->set_parent(parent->sequence);
+    } else {
+        layerInfo->set_parent(-1);
+    }
+
+    auto zOrderRelativeOf = state.zOrderRelativeOf.promote();
+    if (zOrderRelativeOf != nullptr) {
+        layerInfo->set_z_order_relative_of(zOrderRelativeOf->sequence);
+    } else {
+        layerInfo->set_z_order_relative_of(-1);
+    }
+
+    layerInfo->set_is_relative_of(state.isRelativeOf);
+
+    layerInfo->set_owner_uid(mOwnerUid);
+
+    if (traceFlags & LayerTracing::TRACE_INPUT) {
         WindowInfo info;
         if (useDrawing) {
             info = fillInputInfo(ui::Transform(), /* displayIsSecure */ true);
@@ -2154,7 +2145,7 @@
                                        [&]() { return layerInfo->mutable_input_window_info(); });
     }
 
-    if (traceFlags & SurfaceTracing::TRACE_EXTRA) {
+    if (traceFlags & LayerTracing::TRACE_EXTRA) {
         auto protoMap = layerInfo->mutable_metadata();
         for (const auto& entry : state.metadata.mMap) {
             (*protoMap)[entry.first] = std::string(entry.second.cbegin(), entry.second.cend());
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 84355ea..bda1c28 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -57,7 +57,7 @@
 #include "Scheduler/LayerInfo.h"
 #include "Scheduler/Seamlessness.h"
 #include "SurfaceFlinger.h"
-#include "SurfaceTracing.h"
+#include "Tracing/LayerTracing.h"
 #include "TransactionCallbackInvoker.h"
 
 using namespace android::surfaceflinger;
@@ -693,7 +693,7 @@
     // external mStateLock. If writing drawing state, this function should be called on the
     // main or tracing thread.
     void writeToProtoCommonState(LayerProto* layerInfo, LayerVector::StateSet,
-                                 uint32_t traceFlags = SurfaceTracing::TRACE_ALL);
+                                 uint32_t traceFlags = LayerTracing::TRACE_ALL);
 
     gui::WindowInfo::Type getWindowType() const { return mWindowType; }
 
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.cpp b/services/surfaceflinger/Scheduler/MessageQueue.cpp
index 043a536..a020e2c 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.cpp
+++ b/services/surfaceflinger/Scheduler/MessageQueue.cpp
@@ -30,41 +30,30 @@
 
 namespace android::impl {
 
-void MessageQueue::Handler::dispatchComposite() {
-    if ((mEventMask.fetch_or(kComposite) & kComposite) == 0) {
-        mQueue.mLooper->sendMessage(this, Message(kComposite));
-    }
-}
-
-void MessageQueue::Handler::dispatchCommit(int64_t vsyncId, nsecs_t expectedVsyncTime) {
-    if ((mEventMask.fetch_or(kCommit) & kCommit) == 0) {
+void MessageQueue::Handler::dispatchFrame(int64_t vsyncId, nsecs_t expectedVsyncTime) {
+    if (!mFramePending.exchange(true)) {
         mVsyncId = vsyncId;
         mExpectedVsyncTime = expectedVsyncTime;
-        mQueue.mLooper->sendMessage(this, Message(kCommit));
+        mQueue.mLooper->sendMessage(this, Message());
     }
 }
 
 bool MessageQueue::Handler::isFramePending() const {
-    constexpr auto kPendingMask = kCommit | kComposite;
-    return (mEventMask.load() & kPendingMask) != 0;
+    return mFramePending.load();
 }
 
-void MessageQueue::Handler::handleMessage(const Message& message) {
+void MessageQueue::Handler::handleMessage(const Message&) {
+    mFramePending.store(false);
+
     const nsecs_t frameTime = systemTime();
-    switch (message.what) {
-        case kCommit:
-            mEventMask.fetch_and(~kCommit);
-            if (!mQueue.mCompositor.commit(frameTime, mVsyncId, mExpectedVsyncTime)) {
-                return;
-            }
-            // Composite immediately, rather than after pending tasks through scheduleComposite.
-            [[fallthrough]];
-        case kComposite:
-            mEventMask.fetch_and(~kComposite);
-            mQueue.mCompositor.composite(frameTime);
-            mQueue.mCompositor.sample();
-            break;
+    auto& compositor = mQueue.mCompositor;
+
+    if (!compositor.commit(frameTime, mVsyncId, mExpectedVsyncTime)) {
+        return;
     }
+
+    compositor.composite(frameTime);
+    compositor.sample();
 }
 
 MessageQueue::MessageQueue(ICompositor& compositor)
@@ -122,7 +111,7 @@
     const auto vsyncId = mVsync.tokenManager->generateTokenForPredictions(
             {targetWakeupTime, readyTime, vsyncTime});
 
-    mHandler->dispatchCommit(vsyncId, vsyncTime);
+    mHandler->dispatchFrame(vsyncId, vsyncTime);
 }
 
 void MessageQueue::initVsync(scheduler::VSyncDispatch& dispatch,
@@ -176,7 +165,7 @@
     mLooper->sendMessage(handler, Message());
 }
 
-void MessageQueue::scheduleCommit() {
+void MessageQueue::scheduleFrame() {
     ATRACE_CALL();
 
     {
@@ -195,18 +184,14 @@
                                            .earliestVsync = mVsync.lastCallbackTime.count()});
 }
 
-void MessageQueue::scheduleComposite() {
-    mHandler->dispatchComposite();
-}
-
 void MessageQueue::injectorCallback() {
     ssize_t n;
     DisplayEventReceiver::Event buffer[8];
     while ((n = DisplayEventReceiver::getEvents(&mInjector.tube, buffer, 8)) > 0) {
         for (int i = 0; i < n; i++) {
             if (buffer[i].header.type == DisplayEventReceiver::DISPLAY_EVENT_VSYNC) {
-                mHandler->dispatchCommit(buffer[i].vsync.vsyncId,
-                                         buffer[i].vsync.expectedVSyncTimestamp);
+                auto& vsync = buffer[i].vsync;
+                mHandler->dispatchFrame(vsync.vsyncId, vsync.expectedVSyncTimestamp);
                 break;
             }
         }
diff --git a/services/surfaceflinger/Scheduler/MessageQueue.h b/services/surfaceflinger/Scheduler/MessageQueue.h
index 2c908a6..dd69d60 100644
--- a/services/surfaceflinger/Scheduler/MessageQueue.h
+++ b/services/surfaceflinger/Scheduler/MessageQueue.h
@@ -71,8 +71,7 @@
     virtual void setInjector(sp<EventThreadConnection>) = 0;
     virtual void waitMessage() = 0;
     virtual void postMessage(sp<MessageHandler>&&) = 0;
-    virtual void scheduleCommit() = 0;
-    virtual void scheduleComposite() = 0;
+    virtual void scheduleFrame() = 0;
 
     using Clock = std::chrono::steady_clock;
     virtual std::optional<Clock::time_point> getScheduledFrameTime() const = 0;
@@ -83,11 +82,8 @@
 class MessageQueue : public android::MessageQueue {
 protected:
     class Handler : public MessageHandler {
-        static constexpr uint32_t kCommit = 0b1;
-        static constexpr uint32_t kComposite = 0b10;
-
         MessageQueue& mQueue;
-        std::atomic<uint32_t> mEventMask = 0;
+        std::atomic_bool mFramePending = false;
         std::atomic<int64_t> mVsyncId = 0;
         std::atomic<nsecs_t> mExpectedVsyncTime = 0;
 
@@ -97,8 +93,7 @@
 
         bool isFramePending() const;
 
-        virtual void dispatchCommit(int64_t vsyncId, nsecs_t expectedVsyncTime);
-        void dispatchComposite();
+        virtual void dispatchFrame(int64_t vsyncId, nsecs_t expectedVsyncTime);
     };
 
     friend class Handler;
@@ -147,8 +142,7 @@
     void waitMessage() override;
     void postMessage(sp<MessageHandler>&&) override;
 
-    void scheduleCommit() override;
-    void scheduleComposite() override;
+    void scheduleFrame() override;
 
     std::optional<Clock::time_point> getScheduledFrameTime() const override;
 };
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index eeea25d..4d72798 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -117,16 +117,10 @@
     }
 };
 
-Scheduler::Scheduler(const std::shared_ptr<scheduler::RefreshRateConfigs>& configs,
-                     ISchedulerCallback& callback)
-      : Scheduler(configs, callback,
-                  {.useContentDetection = sysprop::use_content_detection_for_refresh_rate(false)}) {
-}
+Scheduler::Scheduler(ICompositor& compositor, ISchedulerCallback& callback, Options options)
+      : impl::MessageQueue(compositor), mOptions(options), mSchedulerCallback(callback) {}
 
-Scheduler::Scheduler(const std::shared_ptr<scheduler::RefreshRateConfigs>& configs,
-                     ISchedulerCallback& callback, Options options)
-      : Scheduler(createVsyncSchedule(configs->supportsKernelIdleTimer()), configs, callback,
-                  createLayerHistory(), options) {
+void Scheduler::startTimers() {
     using namespace sysprop;
 
     if (const int64_t millis = set_touch_timer_ms(0); millis > 0) {
@@ -147,22 +141,6 @@
     }
 }
 
-Scheduler::Scheduler(VsyncSchedule schedule,
-                     const std::shared_ptr<scheduler::RefreshRateConfigs>& configs,
-                     ISchedulerCallback& schedulerCallback,
-                     std::unique_ptr<LayerHistory> layerHistory, Options options)
-      : mOptions(options),
-        mVsyncSchedule(std::move(schedule)),
-        mLayerHistory(std::move(layerHistory)),
-        mSchedulerCallback(schedulerCallback),
-        mPredictedVsyncTracer(
-                base::GetBoolProperty("debug.sf.show_predicted_vsync", false)
-                        ? std::make_unique<PredictedVsyncTracer>(*mVsyncSchedule.dispatch)
-                        : nullptr) {
-    setRefreshRateConfigs(configs);
-    mSchedulerCallback.setVsyncEnabled(false);
-}
-
 Scheduler::~Scheduler() {
     // Ensure the OneShotTimer threads are joined before we start destroying state.
     mDisplayPowerTimer.reset();
@@ -170,7 +148,13 @@
     mRefreshRateConfigs.reset();
 }
 
-Scheduler::VsyncSchedule Scheduler::createVsyncSchedule(bool supportKernelTimer) {
+void Scheduler::run() {
+    while (true) {
+        waitMessage();
+    }
+}
+
+void Scheduler::createVsyncSchedule(bool supportKernelTimer) {
     auto clock = std::make_unique<scheduler::SystemClock>();
     auto tracker = createVSyncTracker();
     auto dispatch = createVSyncDispatch(*tracker);
@@ -180,11 +164,11 @@
     auto controller =
             std::make_unique<scheduler::VSyncReactor>(std::move(clock), *tracker, pendingFenceLimit,
                                                       supportKernelTimer);
-    return {std::move(controller), std::move(tracker), std::move(dispatch)};
-}
+    mVsyncSchedule = {std::move(controller), std::move(tracker), std::move(dispatch)};
 
-std::unique_ptr<LayerHistory> Scheduler::createLayerHistory() {
-    return std::make_unique<scheduler::LayerHistory>();
+    if (base::GetBoolProperty("debug.sf.show_predicted_vsync", false)) {
+        mPredictedVsyncTracer = std::make_unique<PredictedVsyncTracer>(*mVsyncSchedule.dispatch);
+    }
 }
 
 std::unique_ptr<VSyncSource> Scheduler::makePrimaryDispSyncSource(
@@ -594,11 +578,11 @@
     // If the content detection feature is off, we still keep the layer history,
     // since we use it for other features (like Frame Rate API), so layers
     // still need to be registered.
-    mLayerHistory->registerLayer(layer, voteType);
+    mLayerHistory.registerLayer(layer, voteType);
 }
 
 void Scheduler::deregisterLayer(Layer* layer) {
-    mLayerHistory->deregisterLayer(layer);
+    mLayerHistory.deregisterLayer(layer);
 }
 
 void Scheduler::recordLayerHistory(Layer* layer, nsecs_t presentTime,
@@ -608,11 +592,11 @@
         if (!mRefreshRateConfigs->canSwitch()) return;
     }
 
-    mLayerHistory->record(layer, presentTime, systemTime(), updateType);
+    mLayerHistory.record(layer, presentTime, systemTime(), updateType);
 }
 
 void Scheduler::setModeChangePending(bool pending) {
-    mLayerHistory->setModeChangePending(pending);
+    mLayerHistory.setModeChangePending(pending);
 }
 
 void Scheduler::chooseRefreshRateForContent() {
@@ -625,7 +609,7 @@
 
     const auto refreshRateConfigs = holdRefreshRateConfigs();
     scheduler::LayerHistory::Summary summary =
-            mLayerHistory->summarize(*refreshRateConfigs, systemTime());
+            mLayerHistory.summarize(*refreshRateConfigs, systemTime());
     scheduler::RefreshRateConfigs::GlobalSignals consideredSignals;
     DisplayModePtr newMode;
     bool frameRateChanged;
@@ -686,7 +670,7 @@
 
     // Display Power event will boost the refresh rate to performance.
     // Clear Layer History to get fresh FPS detection
-    mLayerHistory->clear();
+    mLayerHistory.clear();
 }
 
 void Scheduler::kernelIdleTimerCallback(TimerState state) {
@@ -730,7 +714,7 @@
     // NOTE: Instead of checking all the layers, we should be checking the layer
     // that is currently on top. b/142507166 will give us this capability.
     if (handleTimerStateChanged(&mFeatures.touch, touch)) {
-        mLayerHistory->clear();
+        mLayerHistory.clear();
     }
     ATRACE_INT("TouchState", static_cast<int>(touch));
 }
@@ -747,7 +731,7 @@
                   mTouchTimer ? mTouchTimer->dump().c_str() : "off");
     StringAppendF(&result, "+  Content detection: %s %s\n\n",
                   toContentDetectionString(mOptions.useContentDetection),
-                  mLayerHistory ? mLayerHistory->dump().c_str() : "(no layer history)");
+                  mLayerHistory.dump().c_str());
 
     {
         std::lock_guard lock(mFrameRateOverridesLock);
@@ -911,7 +895,7 @@
 }
 
 void Scheduler::onActiveDisplayAreaChanged(uint32_t displayArea) {
-    mLayerHistory->setDisplayArea(displayArea);
+    mLayerHistory.setDisplayArea(displayArea);
 }
 
 void Scheduler::setPreferredRefreshRateForUid(FrameRateOverride frameRateOverride) {
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index 8397738..f3c5b35 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -18,6 +18,7 @@
 
 #include <atomic>
 #include <functional>
+#include <future>
 #include <memory>
 #include <mutex>
 #include <optional>
@@ -32,6 +33,7 @@
 
 #include "EventThread.h"
 #include "LayerHistory.h"
+#include "MessageQueue.h"
 #include "OneShotTimer.h"
 #include "RefreshRateConfigs.h"
 #include "SchedulerUtils.h"
@@ -70,14 +72,41 @@
     ~ISchedulerCallback() = default;
 };
 
-class Scheduler {
+class Scheduler : impl::MessageQueue {
+    using Impl = impl::MessageQueue;
+
 public:
     using RefreshRate = scheduler::RefreshRateConfigs::RefreshRate;
     using ModeEvent = scheduler::RefreshRateConfigEvent;
 
-    Scheduler(const std::shared_ptr<scheduler::RefreshRateConfigs>&, ISchedulerCallback&);
+    struct Options {
+        // Whether to use content detection at all.
+        bool useContentDetection;
+    };
+
+    Scheduler(ICompositor&, ISchedulerCallback&, Options);
     ~Scheduler();
 
+    void createVsyncSchedule(bool supportKernelIdleTimer);
+    void startTimers();
+    void run();
+
+    using Impl::initVsync;
+    using Impl::setInjector;
+
+    using Impl::getScheduledFrameTime;
+    using Impl::setDuration;
+
+    using Impl::scheduleFrame;
+
+    // Schedule an asynchronous or synchronous task on the main thread.
+    template <typename F, typename T = std::invoke_result_t<F>>
+    [[nodiscard]] std::future<T> schedule(F&& f) {
+        auto [task, future] = makeTask(std::move(f));
+        postMessage(std::move(task));
+        return std::move(future);
+    }
+
     using ConnectionHandle = scheduler::ConnectionHandle;
     ConnectionHandle createConnection(const char* connectionName, frametimeline::TokenManager*,
                                       std::chrono::nanoseconds workDuration,
@@ -213,27 +242,12 @@
     enum class TimerState { Reset, Expired };
     enum class TouchState { Inactive, Active };
 
-    struct Options {
-        // Whether to use content detection at all.
-        bool useContentDetection;
-    };
-
     struct VsyncSchedule {
         std::unique_ptr<scheduler::VsyncController> controller;
         std::unique_ptr<scheduler::VSyncTracker> tracker;
         std::unique_ptr<scheduler::VSyncDispatch> dispatch;
     };
 
-    // Unlike the testing constructor, this creates the VsyncSchedule, LayerHistory, and timers.
-    Scheduler(const std::shared_ptr<scheduler::RefreshRateConfigs>&, ISchedulerCallback&, Options);
-
-    // Used by tests to inject mocks.
-    Scheduler(VsyncSchedule, const std::shared_ptr<scheduler::RefreshRateConfigs>&,
-              ISchedulerCallback&, std::unique_ptr<LayerHistory>, Options);
-
-    static VsyncSchedule createVsyncSchedule(bool supportKernelIdleTimer);
-    static std::unique_ptr<LayerHistory> createLayerHistory();
-
     // Create a connection on the given EventThread.
     ConnectionHandle createConnection(std::unique_ptr<EventThread>);
     sp<EventThreadConnection> createConnectionInternal(
@@ -297,7 +311,7 @@
     VsyncSchedule mVsyncSchedule;
 
     // Used to choose refresh rate if content detection is enabled.
-    std::unique_ptr<LayerHistory> mLayerHistory;
+    LayerHistory mLayerHistory;
 
     // Timer used to monitor touch events.
     std::optional<scheduler::OneShotTimer> mTouchTimer;
@@ -338,7 +352,7 @@
             GUARDED_BY(mVsyncTimelineLock);
     static constexpr std::chrono::nanoseconds MAX_VSYNC_APPLIED_TIME = 200ms;
 
-    const std::unique_ptr<PredictedVsyncTracer> mPredictedVsyncTracer;
+    std::unique_ptr<PredictedVsyncTracer> mPredictedVsyncTracer;
 
     // The frame rate override lists need their own mutex as they are being read
     // by SurfaceFlinger, Scheduler and EventThread (as a callback) to prevent deadlocks
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 45e8d04..45e0a3f 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -125,7 +125,6 @@
 #include "Scheduler/DispSyncSource.h"
 #include "Scheduler/EventThread.h"
 #include "Scheduler/LayerHistory.h"
-#include "Scheduler/MessageQueue.h"
 #include "Scheduler/Scheduler.h"
 #include "Scheduler/VsyncConfiguration.h"
 #include "Scheduler/VsyncController.h"
@@ -362,7 +361,6 @@
         mTimeStats(std::make_shared<impl::TimeStats>()),
         mFrameTracer(mFactory.createFrameTracer()),
         mFrameTimeline(mFactory.createFrameTimeline(mTimeStats, getpid())),
-        mEventQueue(mFactory.createMessageQueue(*this)),
         mCompositionEngine(mFactory.createCompositionEngine()),
         mHwcServiceName(base::GetProperty("debug.sf.hwc_service_name"s, "default"s)),
         mTunnelModeEnabledReporter(new TunnelModeEnabledReporter()),
@@ -509,8 +507,8 @@
     // the window manager died on us. prepare its eulogy.
     mBootFinished = false;
 
-    // Sever the link to inputflinger since its gone as well.
-    static_cast<void>(schedule([=] { mInputFlinger = nullptr; }));
+    // Sever the link to inputflinger since it's gone as well.
+    static_cast<void>(mScheduler->schedule([=] { mInputFlinger = nullptr; }));
 
     // restore initial conditions (default device unblank, etc)
     initializeDisplays();
@@ -520,16 +518,7 @@
 }
 
 void SurfaceFlinger::run() {
-    while (true) {
-        mEventQueue->waitMessage();
-    }
-}
-
-template <typename F, typename T>
-inline std::future<T> SurfaceFlinger::schedule(F&& f) {
-    auto [task, future] = makeTask(std::move(f));
-    mEventQueue->postMessage(std::move(task));
-    return std::move(future);
+    mScheduler->run();
 }
 
 sp<ISurfaceComposerClient> SurfaceFlinger::createConnection() {
@@ -730,7 +719,7 @@
 
     sp<IBinder> input(defaultServiceManager()->getService(String16("inputflinger")));
 
-    static_cast<void>(schedule([=] {
+    static_cast<void>(mScheduler->schedule([=] {
         if (input == nullptr) {
             ALOGE("Failed to link to input service");
         } else {
@@ -771,7 +760,7 @@
     if (std::this_thread::get_id() == mMainThreadId) {
         return genTextures();
     } else {
-        return schedule(genTextures).get();
+        return mScheduler->schedule(genTextures).get();
     }
 }
 
@@ -1120,7 +1109,7 @@
         return BAD_VALUE;
     }
 
-    auto future = schedule([=]() -> status_t {
+    auto future = mScheduler->schedule([=]() -> status_t {
         const auto display = ON_MAIN_THREAD(getDisplayDeviceLocked(displayToken));
         if (!display) {
             ALOGE("Attempt to set allowed display modes for invalid display token %p",
@@ -1258,7 +1247,7 @@
         const auto displayModeAllowed =
                 display->refreshRateConfigs().isModeAllowed(desiredActiveMode->mode->getId());
         if (!displayModeAllowed) {
-            desiredActiveModeChangeDone(display);
+            clearDesiredActiveModeState(display);
             continue;
         }
 
@@ -1284,7 +1273,7 @@
 }
 
 void SurfaceFlinger::disableExpensiveRendering() {
-    schedule([=]() MAIN_THREAD {
+    auto future = mScheduler->schedule([=]() MAIN_THREAD {
         ATRACE_CALL();
         if (mPowerAdvisor.isUsingExpensiveRendering()) {
             const auto& displays = ON_MAIN_THREAD(mDisplays);
@@ -1293,7 +1282,9 @@
                 mPowerAdvisor.setExpensiveRenderingExpected(display->getId(), kDisable);
             }
         }
-    }).wait();
+    });
+
+    future.wait();
 }
 
 std::vector<ColorMode> SurfaceFlinger::getDisplayColorModes(const DisplayDevice& display) {
@@ -1332,7 +1323,7 @@
         return BAD_VALUE;
     }
 
-    auto future = schedule([=]() MAIN_THREAD -> status_t {
+    auto future = mScheduler->schedule([=]() MAIN_THREAD -> status_t {
         const auto display = getDisplayDeviceLocked(displayToken);
         if (!display) {
             ALOGE("Attempt to set active color mode %s (%d) for invalid display token %p",
@@ -1367,22 +1358,24 @@
 }
 
 void SurfaceFlinger::setAutoLowLatencyMode(const sp<IBinder>& displayToken, bool on) {
-    static_cast<void>(schedule([=]() MAIN_THREAD {
+    const char* const whence = __func__;
+    static_cast<void>(mScheduler->schedule([=]() MAIN_THREAD {
         if (const auto displayId = getPhysicalDisplayIdLocked(displayToken)) {
             getHwComposer().setAutoLowLatencyMode(*displayId, on);
         } else {
-            ALOGE("%s: Invalid display token %p", __FUNCTION__, displayToken.get());
+            ALOGE("%s: Invalid display token %p", whence, displayToken.get());
         }
     }));
 }
 
 void SurfaceFlinger::setGameContentType(const sp<IBinder>& displayToken, bool on) {
-    static_cast<void>(schedule([=]() MAIN_THREAD {
+    const char* const whence = __func__;
+    static_cast<void>(mScheduler->schedule([=]() MAIN_THREAD {
         if (const auto displayId = getPhysicalDisplayIdLocked(displayToken)) {
             const auto type = on ? hal::ContentType::GAME : hal::ContentType::NONE;
             getHwComposer().setContentType(*displayId, type);
         } else {
-            ALOGE("%s: Invalid display token %p", __FUNCTION__, displayToken.get());
+            ALOGE("%s: Invalid display token %p", whence, displayToken.get());
         }
     }));
 }
@@ -1441,17 +1434,18 @@
 status_t SurfaceFlinger::setDisplayContentSamplingEnabled(const sp<IBinder>& displayToken,
                                                           bool enable, uint8_t componentMask,
                                                           uint64_t maxFrames) {
-    return schedule([=]() MAIN_THREAD -> status_t {
-               if (const auto displayId = getPhysicalDisplayIdLocked(displayToken)) {
-                   return getHwComposer().setDisplayContentSamplingEnabled(*displayId, enable,
-                                                                           componentMask,
-                                                                           maxFrames);
-               } else {
-                   ALOGE("%s: Invalid display token %p", __FUNCTION__, displayToken.get());
-                   return NAME_NOT_FOUND;
-               }
-           })
-            .get();
+    const char* const whence = __func__;
+    auto future = mScheduler->schedule([=]() MAIN_THREAD -> status_t {
+        if (const auto displayId = getPhysicalDisplayIdLocked(displayToken)) {
+            return getHwComposer().setDisplayContentSamplingEnabled(*displayId, enable,
+                                                                    componentMask, maxFrames);
+        } else {
+            ALOGE("%s: Invalid display token %p", whence, displayToken.get());
+            return NAME_NOT_FOUND;
+        }
+    });
+
+    return future.get();
 }
 
 status_t SurfaceFlinger::getDisplayedContentSample(const sp<IBinder>& displayToken,
@@ -1493,14 +1487,15 @@
 }
 
 status_t SurfaceFlinger::enableVSyncInjections(bool enable) {
-    schedule([=] {
+    auto future = mScheduler->schedule([=] {
         Mutex::Autolock lock(mStateLock);
 
         if (const auto handle = mScheduler->enableVSyncInjection(enable)) {
-            mEventQueue->setInjector(enable ? mScheduler->getEventConnection(handle) : nullptr);
+            mScheduler->setInjector(enable ? mScheduler->getEventConnection(handle) : nullptr);
         }
-    }).wait();
+    });
 
+    future.wait();
     return NO_ERROR;
 }
 
@@ -1516,12 +1511,14 @@
 
 status_t SurfaceFlinger::getLayerDebugInfo(std::vector<LayerDebugInfo>* outLayers) {
     outLayers->clear();
-    schedule([=] {
+    auto future = mScheduler->schedule([=] {
         const auto display = ON_MAIN_THREAD(getDefaultDisplayDeviceLocked());
         mDrawingState.traverseInZOrder([&](Layer* layer) {
             outLayers->push_back(layer->getLayerDebugInfo(display.get()));
         });
-    }).wait();
+    });
+
+    future.wait();
     return NO_ERROR;
 }
 
@@ -1616,7 +1613,8 @@
         return BAD_VALUE;
     }
 
-    return ftl::chain(schedule([=]() MAIN_THREAD {
+    const char* const whence = __func__;
+    return ftl::chain(mScheduler->schedule([=]() MAIN_THREAD {
                if (const auto display = getDisplayDeviceLocked(displayToken)) {
                    if (enableSdrDimming) {
                        display->getCompositionDisplay()
@@ -1626,7 +1624,7 @@
                    return getHwComposer().setDisplayBrightness(display->getPhysicalId(),
                                                                brightness.displayBrightness);
                } else {
-                   ALOGE("%s: Invalid display token %p", __FUNCTION__, displayToken.get());
+                   ALOGE("%s: Invalid display token %p", whence, displayToken.get());
                    return ftl::yield<status_t>(NAME_NOT_FOUND);
                }
            }))
@@ -1704,7 +1702,7 @@
         mScheduler->resetIdleTimer();
     }
     mPowerAdvisor.notifyDisplayUpdateImminent();
-    mEventQueue->scheduleCommit();
+    mScheduler->scheduleFrame();
 }
 
 void SurfaceFlinger::scheduleComposite(FrameHint hint) {
@@ -1718,7 +1716,7 @@
 }
 
 void SurfaceFlinger::scheduleSample() {
-    static_cast<void>(schedule([this] { sample(); }));
+    static_cast<void>(mScheduler->schedule([this] { sample(); }));
 }
 
 nsecs_t SurfaceFlinger::getVsyncPeriodFromHWC() const {
@@ -1813,7 +1811,7 @@
     ATRACE_CALL();
 
     // On main thread to avoid race conditions with display power state.
-    static_cast<void>(schedule([=]() MAIN_THREAD {
+    static_cast<void>(mScheduler->schedule([=]() MAIN_THREAD {
         mHWCVsyncPendingState = enabled ? hal::Vsync::ENABLE : hal::Vsync::DISABLE;
 
         if (const auto display = getDefaultDisplayDeviceLocked();
@@ -1930,7 +1928,7 @@
     // fired yet just wait for the next commit.
     if (mSetActiveModePending) {
         if (framePending) {
-            mEventQueue->scheduleCommit();
+            mScheduler->scheduleFrame();
             return false;
         }
 
@@ -1948,7 +1946,7 @@
     }
 
     if (mTracingEnabledChanged) {
-        mTracingEnabled = mTracing.isEnabled();
+        mTracingEnabled = mLayerTracing.isEnabled();
         mTracingEnabledChanged = false;
     }
 
@@ -1962,24 +1960,37 @@
     // Composite if transactions were committed, or if requested by HWC.
     bool mustComposite = mMustComposite.exchange(false);
     {
-        mTracePostComposition = mTracing.flagIsSet(SurfaceTracing::TRACE_COMPOSITION) ||
-                mTracing.flagIsSet(SurfaceTracing::TRACE_SYNC) ||
-                mTracing.flagIsSet(SurfaceTracing::TRACE_BUFFERS);
-        const bool tracePreComposition = mTracingEnabled && !mTracePostComposition;
-        ConditionalLockGuard<std::mutex> lock(mTracingLock, tracePreComposition);
-
         mFrameTimeline->setSfWakeUp(vsyncId, frameTime, Fps::fromPeriodNsecs(stats.vsyncPeriod));
 
-        mustComposite |= flushAndCommitTransactions();
+        bool needsTraversal = false;
+        if (clearTransactionFlags(eTransactionFlushNeeded)) {
+            needsTraversal = flushTransactionQueues();
+        }
+
+        const bool shouldCommit =
+                (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
+        if (shouldCommit) {
+            commitTransactions();
+        }
+
+        if (transactionFlushNeeded()) {
+            setTransactionFlags(eTransactionFlushNeeded);
+        }
+
+        mustComposite |= shouldCommit;
         mustComposite |= latchBuffers();
 
-        updateLayerGeometry();
-
-        if (tracePreComposition) {
-            if (mVisibleRegionsDirty) {
-                mTracing.notifyLocked("visibleRegionsDirty");
-            }
+        // This has to be called after latchBuffers because we want to include the layers that have
+        // been latched in the commit callback
+        if (!needsTraversal) {
+            // Invoke empty transaction callbacks early.
+            mTransactionCallbackInvoker.sendCallbacks(false /* onCommitOnly */);
+        } else {
+            // Invoke OnCommit callbacks.
+            mTransactionCallbackInvoker.sendCallbacks(true /* onCommitOnly */);
         }
+
+        updateLayerGeometry();
     }
 
     // Layers need to get updated (in the previous line) before we can use them for
@@ -1999,34 +2010,6 @@
     return mustComposite && CC_LIKELY(mBootStage != BootStage::BOOTLOADER);
 }
 
-bool SurfaceFlinger::flushAndCommitTransactions() {
-    ATRACE_CALL();
-
-    bool needsTraversal = false;
-    if (clearTransactionFlags(eTransactionFlushNeeded)) {
-        needsTraversal = flushTransactionQueues();
-    }
-
-    const bool shouldCommit = (getTransactionFlags() & ~eTransactionFlushNeeded) || needsTraversal;
-    if (shouldCommit) {
-        commitTransactions();
-    }
-
-    if (!needsTraversal) {
-        // Invoke empty transaction callbacks early.
-        mTransactionCallbackInvoker.sendCallbacks(false /* onCommitOnly */);
-    } else {
-        // Invoke OnCommit callbacks.
-        mTransactionCallbackInvoker.sendCallbacks(true /* onCommitOnly */);
-    }
-
-    if (transactionFlushNeeded()) {
-        setTransactionFlags(eTransactionFlushNeeded);
-    }
-
-    return shouldCommit;
-}
-
 void SurfaceFlinger::composite(nsecs_t frameTime) {
     ATRACE_CALL();
 
@@ -2073,7 +2056,7 @@
     const auto hwcMinWorkDuration = mVsyncConfiguration->getCurrentConfigs().hwcMinWorkDuration;
     refreshArgs.earliestPresentTime = prevVsyncTime - hwcMinWorkDuration;
     refreshArgs.previousPresentFence = mPreviousPresentFences[0].fenceTime;
-    refreshArgs.scheduledFrameTime = mEventQueue->getScheduledFrameTime();
+    refreshArgs.scheduledFrameTime = mScheduler->getScheduledFrameTime();
 
     // Store the present time just before calling to the composition engine so we could notify
     // the scheduler.
@@ -2112,12 +2095,12 @@
     modulateVsync(&VsyncModulator::onDisplayRefresh, usedGpuComposition);
 
     mLayersWithQueuedFrames.clear();
-    if (mTracingEnabled && mTracePostComposition) {
-        // This may block if SurfaceTracing is running in sync mode.
+    if (mTracingEnabled) {
+        // This will block and should only be used for debugging.
         if (mVisibleRegionsDirty) {
-            mTracing.notify("visibleRegionsDirty");
-        } else if (mTracing.flagIsSet(SurfaceTracing::TRACE_BUFFERS)) {
-            mTracing.notify("bufferLatched");
+            mLayerTracing.notify("visibleRegionsDirty");
+        } else if (mLayerTracing.flagIsSet(LayerTracing::TRACE_BUFFERS)) {
+            mLayerTracing.notify("bufferLatched");
         }
     }
 
@@ -2800,7 +2783,7 @@
     mDisplays.erase(displayToken);
 
     if (display && display->isVirtual()) {
-        static_cast<void>(schedule([display = std::move(display)] {
+        static_cast<void>(mScheduler->schedule([display = std::move(display)] {
             // Destroy the display without holding the mStateLock.
             // This is a temporary solution until we can manage transaction queues without
             // holding the mStateLock.
@@ -3043,7 +3026,9 @@
 void SurfaceFlinger::notifyWindowInfos() {
     std::vector<WindowInfo> windowInfos;
     std::vector<DisplayInfo> displayInfos;
-    std::unordered_map<uint32_t /*layerStackId*/, const ui::Transform> displayTransforms;
+    std::unordered_map<uint32_t /*layerStackId*/,
+                       std::pair<bool /* isSecure */, const ui::Transform>>
+            inputDisplayDetails;
 
     for (const auto& [_, display] : ON_MAIN_THREAD(mDisplays)) {
         if (!display->receivesInput()) {
@@ -3051,7 +3036,8 @@
         }
         const uint32_t layerStackId = display->getLayerStack().id;
         const auto& [info, transform] = display->getInputInfo();
-        const auto& [it, emplaced] = displayTransforms.try_emplace(layerStackId, transform);
+        const auto& [it, emplaced] =
+                inputDisplayDetails.try_emplace(layerStackId, display->isSecure(), transform);
         if (!emplaced) {
             ALOGE("Multiple displays claim to accept input for the same layer stack: %u",
                   layerStackId);
@@ -3063,19 +3049,21 @@
     mDrawingState.traverseInReverseZOrder([&](Layer* layer) {
         if (!layer->needsInputInfo()) return;
 
-        const DisplayDevice* display = ON_MAIN_THREAD(getDisplayWithInputByLayer(layer)).get();
+        bool isSecure = true;
         ui::Transform displayTransform = ui::Transform();
 
-        if (display != nullptr) {
-            // When calculating the screen bounds we ignore the transparent region since it may
-            // result in an unwanted offset.
-            const auto it = displayTransforms.find(display->getLayerStack().id);
-            if (it != displayTransforms.end()) {
-                displayTransform = it->second;
-            }
+        const uint32_t layerStackId = layer->getLayerStack().id;
+        const auto it = inputDisplayDetails.find(layerStackId);
+        if (it != inputDisplayDetails.end()) {
+            const auto& [secure, transform] = it->second;
+            isSecure = secure;
+            displayTransform = transform;
+        } else {
+            ALOGE("No input-enabled display found for layer `%s` on layer stack id: %d",
+                  layer->getDebugName(), layerStackId);
         }
-        const bool displayIsSecure = !display || display->isSecure();
-        windowInfos.push_back(layer->fillInputInfo(displayTransform, displayIsSecure));
+
+        windowInfos.push_back(layer->fillInputInfo(displayTransform, isSecure));
     });
     mWindowInfosListenerInvoker->windowInfosChanged(windowInfos, displayInfos,
                                                     mInputWindowCommands.syncInputWindows);
@@ -3140,8 +3128,20 @@
     mVsyncConfiguration = getFactory().createVsyncConfiguration(currRefreshRate);
     mVsyncModulator = sp<VsyncModulator>::make(mVsyncConfiguration->getCurrentConfigs());
 
-    // start the EventThread
-    mScheduler = getFactory().createScheduler(display->holdRefreshRateConfigs(), *this);
+    const Scheduler::Options options = {
+            .useContentDetection = sysprop::use_content_detection_for_refresh_rate(false)};
+
+    mScheduler = std::make_unique<Scheduler>(static_cast<ICompositor&>(*this),
+                                             static_cast<ISchedulerCallback&>(*this), options);
+    {
+        auto configs = display->holdRefreshRateConfigs();
+        mScheduler->createVsyncSchedule(configs->supportsKernelIdleTimer());
+        mScheduler->setRefreshRateConfigs(std::move(configs));
+    }
+
+    setVsyncEnabled(false);
+    mScheduler->startTimers();
+
     const auto configs = mVsyncConfiguration->getCurrentConfigs();
     const nsecs_t vsyncPeriod = currRefreshRate.getPeriodNsecs();
     mAppConnectionHandle =
@@ -3157,8 +3157,8 @@
                                              mInterceptor->saveVSyncEvent(timestamp);
                                          });
 
-    mEventQueue->initVsync(mScheduler->getVsyncDispatch(), *mFrameTimeline->getTokenManager(),
-                           configs.late.sfWorkDuration);
+    mScheduler->initVsync(mScheduler->getVsyncDispatch(), *mFrameTimeline->getTokenManager(),
+                          configs.late.sfWorkDuration);
 
     mRegionSamplingThread =
             new RegionSamplingThread(*this, RegionSamplingThread::EnvironmentTimingTunables());
@@ -3192,7 +3192,7 @@
     mScheduler->setDuration(mSfConnectionHandle,
                             /*workDuration=*/std::chrono::nanoseconds(vsyncPeriod),
                             /*readyDuration=*/config.sfWorkDuration);
-    mEventQueue->setDuration(config.sfWorkDuration);
+    mScheduler->setDuration(config.sfWorkDuration);
 }
 
 void SurfaceFlinger::doCommitTransactions() {
@@ -3557,10 +3557,13 @@
 bool SurfaceFlinger::checkTransactionCanLatchUnsignaled(const TransactionState& transaction) {
     if (transaction.states.size() == 1) {
         const auto& state = transaction.states.begin()->state;
-        return (state.flags & ~layer_state_t::eBufferChanged) == 0 &&
-                state.bufferData.flags.test(BufferData::BufferDataChange::fenceChanged) &&
-                state.bufferData.acquireFence &&
-                state.bufferData.acquireFence->getStatus() == Fence::Status::Unsignaled;
+        if ((state.flags & ~layer_state_t::eBufferChanged) == 0 &&
+            state.bufferData.flags.test(BufferData::BufferDataChange::fenceChanged) &&
+            state.bufferData.acquireFence &&
+            state.bufferData.acquireFence->getStatus() == Fence::Status::Unsignaled) {
+            ATRACE_NAME("transactionCanLatchUnsignaled");
+            return true;
+        }
     }
     return false;
 }
@@ -4463,26 +4466,7 @@
 
 void SurfaceFlinger::initializeDisplays() {
     // Async since we may be called from the main thread.
-    static_cast<void>(schedule([this]() MAIN_THREAD { onInitializeDisplays(); }));
-}
-
-sp<DisplayDevice> SurfaceFlinger::getDisplayWithInputByLayer(Layer* layer) const {
-    const auto filter = layer->getOutputFilter();
-    sp<DisplayDevice> inputDisplay;
-
-    for (const auto& [_, display] : mDisplays) {
-        if (!display->receivesInput() || !display->getCompositionDisplay()->includesLayer(filter)) {
-            continue;
-        }
-        // Don't return immediately so that we can log duplicates.
-        if (inputDisplay) {
-            ALOGE("Multiple displays claim to accept input for the same layer stack: %u",
-                  filter.layerStack.id);
-            continue;
-        }
-        inputDisplay = display;
-    }
-    return inputDisplay;
+    static_cast<void>(mScheduler->schedule([this]() MAIN_THREAD { onInitializeDisplays(); }));
 }
 
 void SurfaceFlinger::setPowerModeInternal(const sp<DisplayDevice>& display, hal::PowerMode mode) {
@@ -4582,7 +4566,7 @@
 }
 
 void SurfaceFlinger::setPowerMode(const sp<IBinder>& displayToken, int mode) {
-    schedule([=]() MAIN_THREAD {
+    auto future = mScheduler->schedule([=]() MAIN_THREAD {
         const auto display = getDisplayDeviceLocked(displayToken);
         if (!display) {
             ALOGE("Attempt to set power mode %d for invalid display token %p", mode,
@@ -4592,7 +4576,9 @@
         } else {
             setPowerModeInternal(display, static_cast<hal::PowerMode>(mode));
         }
-    }).wait();
+    });
+
+    future.wait();
 }
 
 status_t SurfaceFlinger::doDump(int fd, const DumpArgs& args, bool asProto) {
@@ -4642,7 +4628,7 @@
         }
 
         if (dumpLayers) {
-            LayersTraceFileProto traceFileProto = SurfaceTracing::createLayersTraceFileProto();
+            LayersTraceFileProto traceFileProto = mLayerTracing.createTraceFileProto();
             LayersTraceProto* layersTrace = traceFileProto.add_entry();
             LayersProto layersProto = dumpProtoFromMainThread();
             layersTrace->mutable_layers()->Swap(&layersProto);
@@ -4664,8 +4650,8 @@
 }
 
 status_t SurfaceFlinger::dumpCritical(int fd, const DumpArgs&, bool asProto) {
-    if (asProto && mTracing.isEnabled()) {
-        mTracing.writeToFile();
+    if (asProto && mLayerTracing.isEnabled()) {
+        mLayerTracing.writeToFile();
     }
 
     return doDump(fd, DumpArgs(), asProto);
@@ -4914,21 +4900,21 @@
 }
 
 LayersProto SurfaceFlinger::dumpProtoFromMainThread(uint32_t traceFlags) {
-    return schedule([=] { return dumpDrawingStateProto(traceFlags); }).get();
+    return mScheduler->schedule([=] { return dumpDrawingStateProto(traceFlags); }).get();
 }
 
 void SurfaceFlinger::dumpOffscreenLayers(std::string& result) {
+    auto future = mScheduler->schedule([this] {
+        std::string result;
+        for (Layer* offscreenLayer : mOffscreenLayers) {
+            offscreenLayer->traverse(LayerVector::StateSet::Drawing,
+                                     [&](Layer* layer) { layer->dumpCallingUidPid(result); });
+        }
+        return result;
+    });
+
     result.append("Offscreen Layers:\n");
-    result.append(schedule([this] {
-                      std::string result;
-                      for (Layer* offscreenLayer : mOffscreenLayers) {
-                          offscreenLayer->traverse(LayerVector::StateSet::Drawing,
-                                                   [&](Layer* layer) {
-                                                       layer->dumpCallingUidPid(result);
-                                                   });
-                      }
-                      return result;
-                  }).get());
+    result.append(future.get());
 }
 
 void SurfaceFlinger::dumpAllLocked(const DumpArgs& args, std::string& result) const {
@@ -5061,7 +5047,7 @@
     /*
      * Tracing state
      */
-    mTracing.dump(result);
+    mLayerTracing.dump(result);
     result.append("\n");
 
     /*
@@ -5338,6 +5324,7 @@
                 scheduleRepaint();
                 return NO_ERROR;
             case 1004: // Force composite ahead of next VSYNC.
+            case 1006:
                 scheduleComposite(FrameHint::kActive);
                 return NO_ERROR;
             case 1005: { // Force commit ahead of next VSYNC.
@@ -5346,9 +5333,6 @@
                                     eTraversalNeeded);
                 return NO_ERROR;
             }
-            case 1006: // Force composite immediately.
-                mEventQueue->scheduleComposite();
-                return NO_ERROR;
             case 1007: // Unused.
                 return NAME_NOT_FOUND;
             case 1008: // Toggle forced GPU composition.
@@ -5459,7 +5443,8 @@
             }
             case 1021: { // Disable HWC virtual displays
                 const bool enable = data.readInt32() != 0;
-                static_cast<void>(schedule([this, enable] { enableHalVirtualDisplays(enable); }));
+                static_cast<void>(
+                        mScheduler->schedule([this, enable] { enableHalVirtualDisplays(enable); }));
                 return NO_ERROR;
             }
             case 1022: { // Set saturation boost
@@ -5488,20 +5473,21 @@
                 bool tracingEnabledChanged;
                 if (n) {
                     ALOGD("LayerTracing enabled");
-                    tracingEnabledChanged = mTracing.enable();
+                    tracingEnabledChanged = mLayerTracing.enable();
                     if (tracingEnabledChanged) {
-                        schedule([&]() MAIN_THREAD { mTracing.notify("start"); }).wait();
+                        mScheduler->schedule([&]() MAIN_THREAD { mLayerTracing.notify("start"); })
+                                .wait();
                     }
                 } else {
                     ALOGD("LayerTracing disabled");
-                    tracingEnabledChanged = mTracing.disable();
+                    tracingEnabledChanged = mLayerTracing.disable();
                 }
                 mTracingEnabledChanged = tracingEnabledChanged;
                 reply->writeInt32(NO_ERROR);
                 return NO_ERROR;
             }
             case 1026: { // Get layer tracing status
-                reply->writeBool(mTracing.isEnabled());
+                reply->writeBool(mLayerTracing.isEnabled());
                 return NO_ERROR;
             }
             // Is a DisplayColorSetting supported?
@@ -5542,7 +5528,7 @@
                 }
 
                 ALOGD("Updating trace buffer to %d KB", n);
-                mTracing.setBufferSize(n * 1024);
+                mLayerTracing.setBufferSize(n * 1024);
                 reply->writeInt32(NO_ERROR);
                 return NO_ERROR;
             }
@@ -5587,12 +5573,12 @@
             case 1033: {
                 n = data.readUint32();
                 ALOGD("Updating trace flags to 0x%x", n);
-                mTracing.setTraceFlags(n);
+                mLayerTracing.setTraceFlags(n);
                 reply->writeInt32(NO_ERROR);
                 return NO_ERROR;
             }
             case 1034: {
-                schedule([&] {
+                auto future = mScheduler->schedule([&] {
                     switch (n = data.readInt32()) {
                         case 0:
                         case 1:
@@ -5602,7 +5588,9 @@
                             reply->writeBool(ON_MAIN_THREAD(isRefreshRateOverlayEnabled()));
                         }
                     }
-                }).get();
+                });
+
+                future.wait();
                 return NO_ERROR;
             }
             case 1035: {
@@ -5632,32 +5620,36 @@
             // rates.
             case 1036: {
                 if (data.readInt32() > 0) { // turn on
-                    return schedule([this] {
-                               const auto display = ON_MAIN_THREAD(getDefaultDisplayDeviceLocked());
+                    return mScheduler
+                            ->schedule([this] {
+                                const auto display =
+                                        ON_MAIN_THREAD(getDefaultDisplayDeviceLocked());
 
-                               // This is a little racy, but not in a way that hurts anything. As we
-                               // grab the defaultMode from the display manager policy, we could be
-                               // setting a new display manager policy, leaving us using a stale
-                               // defaultMode. The defaultMode doesn't matter for the override
-                               // policy though, since we set allowGroupSwitching to true, so it's
-                               // not a problem.
-                               scheduler::RefreshRateConfigs::Policy overridePolicy;
-                               overridePolicy.defaultMode = display->refreshRateConfigs()
-                                                                    .getDisplayManagerPolicy()
-                                                                    .defaultMode;
-                               overridePolicy.allowGroupSwitching = true;
-                               constexpr bool kOverridePolicy = true;
-                               return setDesiredDisplayModeSpecsInternal(display, overridePolicy,
-                                                                         kOverridePolicy);
-                           })
+                                // This is a little racy, but not in a way that hurts anything. As
+                                // we grab the defaultMode from the display manager policy, we could
+                                // be setting a new display manager policy, leaving us using a stale
+                                // defaultMode. The defaultMode doesn't matter for the override
+                                // policy though, since we set allowGroupSwitching to true, so it's
+                                // not a problem.
+                                scheduler::RefreshRateConfigs::Policy overridePolicy;
+                                overridePolicy.defaultMode = display->refreshRateConfigs()
+                                                                     .getDisplayManagerPolicy()
+                                                                     .defaultMode;
+                                overridePolicy.allowGroupSwitching = true;
+                                constexpr bool kOverridePolicy = true;
+                                return setDesiredDisplayModeSpecsInternal(display, overridePolicy,
+                                                                          kOverridePolicy);
+                            })
                             .get();
                 } else { // turn off
-                    return schedule([this] {
-                               const auto display = ON_MAIN_THREAD(getDefaultDisplayDeviceLocked());
-                               constexpr bool kOverridePolicy = true;
-                               return setDesiredDisplayModeSpecsInternal(display, {},
-                                                                         kOverridePolicy);
-                           })
+                    return mScheduler
+                            ->schedule([this] {
+                                const auto display =
+                                        ON_MAIN_THREAD(getDefaultDisplayDeviceLocked());
+                                constexpr bool kOverridePolicy = true;
+                                return setDesiredDisplayModeSpecsInternal(display, {},
+                                                                          kOverridePolicy);
+                            })
                             .get();
                 }
             }
@@ -5702,31 +5694,29 @@
             // Second argument is an optional uint64 - if present, then limits enabling/disabling
             // caching to a particular physical display
             case 1040: {
-                status_t error =
-                        schedule([&] {
-                            n = data.readInt32();
-                            std::optional<PhysicalDisplayId> inputId = std::nullopt;
-                            if (uint64_t inputDisplayId;
-                                data.readUint64(&inputDisplayId) == NO_ERROR) {
-                                inputId = DisplayId::fromValue<PhysicalDisplayId>(inputDisplayId);
-                                if (!inputId || getPhysicalDisplayToken(*inputId)) {
-                                    ALOGE("No display with id: %" PRIu64, inputDisplayId);
-                                    return NAME_NOT_FOUND;
-                                }
+                auto future = mScheduler->schedule([&] {
+                    n = data.readInt32();
+                    std::optional<PhysicalDisplayId> inputId = std::nullopt;
+                    if (uint64_t inputDisplayId; data.readUint64(&inputDisplayId) == NO_ERROR) {
+                        inputId = DisplayId::fromValue<PhysicalDisplayId>(inputDisplayId);
+                        if (!inputId || getPhysicalDisplayToken(*inputId)) {
+                            ALOGE("No display with id: %" PRIu64, inputDisplayId);
+                            return NAME_NOT_FOUND;
+                        }
+                    }
+                    {
+                        Mutex::Autolock lock(mStateLock);
+                        mLayerCachingEnabled = n != 0;
+                        for (const auto& [_, display] : mDisplays) {
+                            if (!inputId || *inputId == display->getPhysicalId()) {
+                                display->enableLayerCaching(mLayerCachingEnabled);
                             }
-                            {
-                                Mutex::Autolock lock(mStateLock);
-                                mLayerCachingEnabled = n != 0;
-                                for (const auto& [_, display] : mDisplays) {
-                                    if (!inputId || *inputId == display->getPhysicalId()) {
-                                        display->enableLayerCaching(mLayerCachingEnabled);
-                                    }
-                                }
-                            }
-                            return OK;
-                        }).get();
+                        }
+                    }
+                    return OK;
+                });
 
-                if (error != OK) {
+                if (const status_t error = future.get(); error != OK) {
                     return error;
                 }
                 scheduleRepaint();
@@ -5745,7 +5735,7 @@
 
     // Update the overlay on the main thread to avoid race conditions with
     // mRefreshRateConfigs->getCurrentRefreshRate()
-    static_cast<void>(schedule([=] {
+    static_cast<void>(mScheduler->schedule([=] {
         const auto display = ON_MAIN_THREAD(getDefaultDisplayDeviceLocked());
         if (!display) {
             ALOGW("%s: default display is null", __func__);
@@ -5760,7 +5750,7 @@
         const bool timerExpired = mKernelIdleTimerEnabled && expired;
 
         if (display->onKernelTimerChanged(desiredModeId, timerExpired)) {
-            mEventQueue->scheduleCommit();
+            mScheduler->scheduleFrame();
         }
     }));
 }
@@ -6152,14 +6142,15 @@
     const bool supportsProtected = getRenderEngine().supportsProtectedContent();
     bool hasProtectedLayer = false;
     if (allowProtected && supportsProtected) {
-        hasProtectedLayer = schedule([=]() {
-                                bool protectedLayerFound = false;
-                                traverseLayers([&](Layer* layer) {
-                                    protectedLayerFound = protectedLayerFound ||
-                                            (layer->isVisible() && layer->isProtected());
-                                });
-                                return protectedLayerFound;
-                            }).get();
+        auto future = mScheduler->schedule([=]() {
+            bool protectedLayerFound = false;
+            traverseLayers([&](Layer* layer) {
+                protectedLayerFound =
+                        protectedLayerFound || (layer->isVisible() && layer->isProtected());
+            });
+            return protectedLayerFound;
+        });
+        hasProtectedLayer = future.get();
     }
 
     const uint32_t usage = GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_RENDER |
@@ -6190,9 +6181,11 @@
 
     bool canCaptureBlackoutContent = hasCaptureBlackoutContentPermission();
 
-    auto scheduleResultFuture = schedule([=,
-                                          renderAreaFuture = std::move(renderAreaFuture)]() mutable
-                                         -> std::shared_future<renderengine::RenderEngineResult> {
+    auto scheduleResultFuture = mScheduler->schedule([=,
+                                                      renderAreaFuture =
+                                                              std::move(renderAreaFuture)]() mutable
+                                                     -> std::shared_future<
+                                                             renderengine::RenderEngineResult> {
         ScreenCaptureResults captureResults;
         std::unique_ptr<RenderArea> renderArea = renderAreaFuture.get();
         if (!renderArea) {
@@ -6485,7 +6478,7 @@
         return BAD_VALUE;
     }
 
-    auto future = schedule([=]() -> status_t {
+    auto future = mScheduler->schedule([=]() -> status_t {
         const auto display = ON_MAIN_THREAD(getDisplayDeviceLocked(displayToken));
         if (!display) {
             ALOGE("Attempt to set desired display modes for invalid display token %p",
@@ -6622,7 +6615,7 @@
         return BAD_VALUE;
     }
 
-    static_cast<void>(schedule([=] {
+    static_cast<void>(mScheduler->schedule([=] {
         Mutex::Autolock lock(mStateLock);
         if (authenticateSurfaceTextureLocked(surface)) {
             sp<Layer> layer = (static_cast<MonitoredProducer*>(surface.get()))->getLayer();
@@ -6814,7 +6807,7 @@
         return;
     }
 
-    mRegionSamplingThread->onCompositionComplete(mEventQueue->getScheduledFrameTime());
+    mRegionSamplingThread->onCompositionComplete(mScheduler->getScheduledFrameTime());
 }
 
 void SurfaceFlinger::onActiveDisplaySizeChanged(const sp<DisplayDevice>& activeDisplay) {
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index b432f24..6093be9 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -56,14 +56,13 @@
 #include "Fps.h"
 #include "FrameTracker.h"
 #include "LayerVector.h"
-#include "Scheduler/MessageQueue.h"
 #include "Scheduler/RefreshRateConfigs.h"
 #include "Scheduler/RefreshRateStats.h"
 #include "Scheduler/Scheduler.h"
 #include "Scheduler/VsyncModulator.h"
 #include "SurfaceFlingerFactory.h"
-#include "SurfaceTracing.h"
 #include "TracedOrdinal.h"
+#include "Tracing/LayerTracing.h"
 #include "TransactionCallbackInvoker.h"
 #include "TransactionState.h"
 
@@ -265,10 +264,6 @@
     SurfaceFlingerBE& getBE() { return mBE; }
     const SurfaceFlingerBE& getBE() const { return mBE; }
 
-    // Schedule an asynchronous or synchronous task on the main thread.
-    template <typename F, typename T = std::invoke_result_t<F>>
-    [[nodiscard]] std::future<T> schedule(F&&);
-
     // Schedule commit of transactions on the main thread ahead of the next VSYNC.
     void scheduleCommit(FrameHint);
     // As above, but also force composite regardless if transactions were committed.
@@ -353,7 +348,7 @@
     friend class MonitoredProducer;
     friend class RefreshRateOverlay;
     friend class RegionSamplingThread;
-    friend class SurfaceTracing;
+    friend class LayerTracing;
 
     // For unit tests
     friend class TestableSurfaceFlinger;
@@ -684,9 +679,6 @@
             const std::optional<scheduler::RefreshRateConfigs::Policy>& policy, bool overridePolicy)
             EXCLUDES(mStateLock);
 
-    // Returns whether transactions were committed.
-    bool flushAndCommitTransactions() EXCLUDES(mStateLock);
-
     void commitTransactions() EXCLUDES(mStateLock);
     void commitTransactionsLocked(uint32_t transactionFlags) REQUIRES(mStateLock);
     void doCommitTransactions() REQUIRES(mStateLock);
@@ -890,8 +882,6 @@
     // region of all screens presenting this layer stack.
     void invalidateLayerStack(const sp<const Layer>& layer, const Region& dirty);
 
-    sp<DisplayDevice> getDisplayWithInputByLayer(Layer* layer) const REQUIRES(mStateLock);
-
     bool isDisplayActiveLocked(const sp<const DisplayDevice>& display) const REQUIRES(mStateLock) {
         return display->getDisplayToken() == mActiveDisplayToken;
     }
@@ -1042,12 +1032,12 @@
     void dumpWideColorInfo(std::string& result) const REQUIRES(mStateLock);
     LayersProto dumpDrawingStateProto(uint32_t traceFlags) const;
     void dumpOffscreenLayersProto(LayersProto& layersProto,
-                                  uint32_t traceFlags = SurfaceTracing::TRACE_ALL) const;
+                                  uint32_t traceFlags = LayerTracing::TRACE_ALL) const;
     void dumpDisplayProto(LayersTraceProto& layersTraceProto) const;
 
     // Dumps state from HW Composer
     void dumpHwc(std::string& result) const;
-    LayersProto dumpProtoFromMainThread(uint32_t traceFlags = SurfaceTracing::TRACE_ALL)
+    LayersProto dumpProtoFromMainThread(uint32_t traceFlags = LayerTracing::TRACE_ALL)
             EXCLUDES(mStateLock);
     void dumpOffscreenLayers(std::string& result) EXCLUDES(mStateLock);
     void dumpPlannerInfo(const DumpArgs& args, std::string& result) const REQUIRES(mStateLock);
@@ -1190,10 +1180,9 @@
     bool mPropagateBackpressureClientComposition = false;
     sp<SurfaceInterceptor> mInterceptor;
 
-    SurfaceTracing mTracing{*this};
+    LayerTracing mLayerTracing{*this};
     std::mutex mTracingLock;
     bool mTracingEnabled = false;
-    bool mTracePostComposition = false;
     std::atomic<bool> mTracingEnabledChanged = false;
 
     const std::shared_ptr<TimeStats> mTimeStats;
@@ -1210,14 +1199,9 @@
 
     TransactionCallbackInvoker mTransactionCallbackInvoker;
 
-    // these are thread safe
-    std::unique_ptr<MessageQueue> mEventQueue;
+    // Thread-safe.
     FrameTracker mAnimFrameTracker;
 
-    // protected by mDestroyedLayerLock;
-    mutable Mutex mDestroyedLayerLock;
-    Vector<Layer const *> mDestroyedLayers;
-
     // We maintain a pool of pre-generated texture names to hand out to avoid
     // layer creation needing to run on the main thread (which it would
     // otherwise need to do to access RenderEngine).
diff --git a/services/surfaceflinger/SurfaceFlingerDefaultFactory.cpp b/services/surfaceflinger/SurfaceFlingerDefaultFactory.cpp
index 9a2f910..b81b445 100644
--- a/services/surfaceflinger/SurfaceFlingerDefaultFactory.cpp
+++ b/services/surfaceflinger/SurfaceFlingerDefaultFactory.cpp
@@ -38,7 +38,6 @@
 #include "SurfaceInterceptor.h"
 
 #include "DisplayHardware/ComposerHal.h"
-#include "Scheduler/MessageQueue.h"
 #include "Scheduler/Scheduler.h"
 #include "Scheduler/VsyncConfiguration.h"
 #include "Scheduler/VsyncController.h"
@@ -51,10 +50,6 @@
     return std::make_unique<android::impl::HWComposer>(serviceName);
 }
 
-std::unique_ptr<MessageQueue> DefaultFactory::createMessageQueue(ICompositor& compositor) {
-    return std::make_unique<android::impl::MessageQueue>(compositor);
-}
-
 std::unique_ptr<scheduler::VsyncConfiguration> DefaultFactory::createVsyncConfiguration(
         Fps currentRefreshRate) {
     if (property_get_bool("debug.sf.use_phase_offsets_as_durations", false)) {
@@ -64,12 +59,6 @@
     }
 }
 
-std::unique_ptr<Scheduler> DefaultFactory::createScheduler(
-        const std::shared_ptr<scheduler::RefreshRateConfigs>& refreshRateConfigs,
-        ISchedulerCallback& callback) {
-    return std::make_unique<Scheduler>(std::move(refreshRateConfigs), callback);
-}
-
 sp<SurfaceInterceptor> DefaultFactory::createSurfaceInterceptor() {
     return new android::impl::SurfaceInterceptor();
 }
diff --git a/services/surfaceflinger/SurfaceFlingerDefaultFactory.h b/services/surfaceflinger/SurfaceFlingerDefaultFactory.h
index 2be09ee..501629d 100644
--- a/services/surfaceflinger/SurfaceFlingerDefaultFactory.h
+++ b/services/surfaceflinger/SurfaceFlingerDefaultFactory.h
@@ -27,11 +27,8 @@
     virtual ~DefaultFactory();
 
     std::unique_ptr<HWComposer> createHWComposer(const std::string& serviceName) override;
-    std::unique_ptr<MessageQueue> createMessageQueue(ICompositor&) override;
     std::unique_ptr<scheduler::VsyncConfiguration> createVsyncConfiguration(
             Fps currentRefreshRate) override;
-    std::unique_ptr<Scheduler> createScheduler(
-            const std::shared_ptr<scheduler::RefreshRateConfigs>&, ISchedulerCallback&) override;
     sp<SurfaceInterceptor> createSurfaceInterceptor() override;
     sp<StartPropertySetThread> createStartPropertySetThread(bool timestampPropertyValue) override;
     sp<DisplayDevice> createDisplayDevice(DisplayDeviceCreationArgs&) override;
diff --git a/services/surfaceflinger/SurfaceFlingerFactory.h b/services/surfaceflinger/SurfaceFlingerFactory.h
index bca533b..e670f37 100644
--- a/services/surfaceflinger/SurfaceFlingerFactory.h
+++ b/services/surfaceflinger/SurfaceFlingerFactory.h
@@ -77,11 +77,8 @@
 class Factory {
 public:
     virtual std::unique_ptr<HWComposer> createHWComposer(const std::string& serviceName) = 0;
-    virtual std::unique_ptr<MessageQueue> createMessageQueue(ICompositor&) = 0;
     virtual std::unique_ptr<scheduler::VsyncConfiguration> createVsyncConfiguration(
             Fps currentRefreshRate) = 0;
-    virtual std::unique_ptr<Scheduler> createScheduler(
-            const std::shared_ptr<scheduler::RefreshRateConfigs>&, ISchedulerCallback&) = 0;
     virtual sp<SurfaceInterceptor> createSurfaceInterceptor() = 0;
 
     virtual sp<StartPropertySetThread> createStartPropertySetThread(
diff --git a/services/surfaceflinger/SurfaceTracing.cpp b/services/surfaceflinger/SurfaceTracing.cpp
deleted file mode 100644
index 5963737..0000000
--- a/services/surfaceflinger/SurfaceTracing.cpp
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#undef LOG_TAG
-#define LOG_TAG "SurfaceTracing"
-#define ATRACE_TAG ATRACE_TAG_GRAPHICS
-
-#include "SurfaceTracing.h"
-#include <SurfaceFlinger.h>
-
-#include <android-base/file.h>
-#include <android-base/stringprintf.h>
-#include <log/log.h>
-#include <utils/SystemClock.h>
-#include <utils/Trace.h>
-
-namespace android {
-
-SurfaceTracing::SurfaceTracing(SurfaceFlinger& flinger) : mFlinger(flinger) {}
-
-bool SurfaceTracing::enable() {
-    std::scoped_lock lock(mTraceLock);
-    if (mEnabled) {
-        return false;
-    }
-
-    if (flagIsSet(TRACE_SYNC)) {
-        runner = std::make_unique<SurfaceTracing::Runner>(mFlinger, mConfig);
-    } else {
-        runner = std::make_unique<SurfaceTracing::AsyncRunner>(mFlinger, mConfig,
-                                                               mFlinger.mTracingLock);
-    }
-    mEnabled = true;
-    return true;
-}
-
-bool SurfaceTracing::disable() {
-    std::scoped_lock lock(mTraceLock);
-    if (!mEnabled) {
-        return false;
-    }
-    mEnabled = false;
-    runner->stop();
-    return true;
-}
-
-bool SurfaceTracing::isEnabled() const {
-    std::scoped_lock lock(mTraceLock);
-    return mEnabled;
-}
-
-status_t SurfaceTracing::writeToFile() {
-    std::scoped_lock lock(mTraceLock);
-    if (!mEnabled) {
-        return STATUS_OK;
-    }
-    return runner->writeToFile();
-}
-
-void SurfaceTracing::notify(const char* where) {
-    std::scoped_lock lock(mTraceLock);
-    if (mEnabled) {
-        runner->notify(where);
-    }
-}
-
-void SurfaceTracing::notifyLocked(const char* where) {
-    std::scoped_lock lock(mTraceLock);
-    if (mEnabled) {
-        runner->notifyLocked(where);
-    }
-}
-
-void SurfaceTracing::dump(std::string& result) const {
-    std::scoped_lock lock(mTraceLock);
-    base::StringAppendF(&result, "Tracing state: %s\n", mEnabled ? "enabled" : "disabled");
-    if (mEnabled) {
-        runner->dump(result);
-    }
-}
-
-void SurfaceTracing::LayersTraceBuffer::reset(size_t newSize) {
-    // use the swap trick to make sure memory is released
-    std::queue<LayersTraceProto>().swap(mStorage);
-    mSizeInBytes = newSize;
-    mUsedInBytes = 0U;
-}
-
-void SurfaceTracing::LayersTraceBuffer::emplace(LayersTraceProto&& proto) {
-    size_t protoSize = static_cast<size_t>(proto.ByteSize());
-    while (mUsedInBytes + protoSize > mSizeInBytes) {
-        if (mStorage.empty()) {
-            return;
-        }
-        mUsedInBytes -= static_cast<size_t>(mStorage.front().ByteSize());
-        mStorage.pop();
-    }
-    mUsedInBytes += protoSize;
-    mStorage.emplace();
-    mStorage.back().Swap(&proto);
-}
-
-void SurfaceTracing::LayersTraceBuffer::flush(LayersTraceFileProto* fileProto) {
-    fileProto->mutable_entry()->Reserve(static_cast<int>(mStorage.size()));
-
-    while (!mStorage.empty()) {
-        auto entry = fileProto->add_entry();
-        entry->Swap(&mStorage.front());
-        mStorage.pop();
-    }
-}
-
-SurfaceTracing::Runner::Runner(SurfaceFlinger& flinger, SurfaceTracing::Config& config)
-      : mFlinger(flinger), mConfig(config) {
-    mBuffer.setSize(mConfig.bufferSize);
-}
-
-void SurfaceTracing::Runner::notify(const char* where) {
-    LayersTraceProto entry = traceLayers(where);
-    mBuffer.emplace(std::move(entry));
-}
-
-status_t SurfaceTracing::Runner::stop() {
-    return writeToFile();
-}
-
-LayersTraceFileProto SurfaceTracing::createLayersTraceFileProto() {
-    LayersTraceFileProto fileProto;
-    fileProto.set_magic_number(uint64_t(LayersTraceFileProto_MagicNumber_MAGIC_NUMBER_H) << 32 |
-                               LayersTraceFileProto_MagicNumber_MAGIC_NUMBER_L);
-    return fileProto;
-}
-
-status_t SurfaceTracing::Runner::writeToFile() {
-    ATRACE_CALL();
-
-    LayersTraceFileProto fileProto = createLayersTraceFileProto();
-    std::string output;
-
-    mBuffer.flush(&fileProto);
-    mBuffer.reset(mConfig.bufferSize);
-
-    if (!fileProto.SerializeToString(&output)) {
-        ALOGE("Could not save the proto file! Permission denied");
-        return PERMISSION_DENIED;
-    }
-
-    // -rw-r--r--
-    const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
-    if (!android::base::WriteStringToFile(output, DEFAULT_FILE_NAME, mode, getuid(), getgid(),
-                                          true)) {
-        ALOGE("Could not save the proto file! There are missing fields");
-        return PERMISSION_DENIED;
-    }
-
-    return NO_ERROR;
-}
-
-LayersTraceProto SurfaceTracing::Runner::traceLayers(const char* where) {
-    ATRACE_CALL();
-
-    LayersTraceProto entry;
-    entry.set_elapsed_realtime_nanos(elapsedRealtimeNano());
-    entry.set_where(where);
-    LayersProto layers(mFlinger.dumpDrawingStateProto(mConfig.flags));
-
-    if (flagIsSet(SurfaceTracing::TRACE_EXTRA)) {
-        mFlinger.dumpOffscreenLayersProto(layers);
-    }
-    entry.mutable_layers()->Swap(&layers);
-
-    if (flagIsSet(SurfaceTracing::TRACE_HWC)) {
-        std::string hwcDump;
-        mFlinger.dumpHwc(hwcDump);
-        entry.set_hwc_blob(hwcDump);
-    }
-    if (!flagIsSet(SurfaceTracing::TRACE_COMPOSITION)) {
-        entry.set_excludes_composition_state(true);
-    }
-    entry.set_missed_entries(mMissedTraceEntries);
-    mFlinger.dumpDisplayProto(entry);
-    return entry;
-}
-
-void SurfaceTracing::Runner::dump(std::string& result) const {
-    base::StringAppendF(&result, "  number of entries: %zu (%.2fMB / %.2fMB)\n",
-                        mBuffer.frameCount(), float(mBuffer.used()) / float(1_MB),
-                        float(mBuffer.size()) / float(1_MB));
-}
-
-SurfaceTracing::AsyncRunner::AsyncRunner(SurfaceFlinger& flinger, SurfaceTracing::Config& config,
-                                         std::mutex& sfLock)
-      : SurfaceTracing::Runner(flinger, config), mSfLock(sfLock) {
-    mEnabled = true;
-    mThread = std::thread(&AsyncRunner::loop, this);
-}
-
-void SurfaceTracing::AsyncRunner::loop() {
-    while (mEnabled) {
-        LayersTraceProto entry;
-        bool entryAdded = traceWhenNotified(&entry);
-        if (entryAdded) {
-            mBuffer.emplace(std::move(entry));
-        }
-        if (mWriteToFile) {
-            Runner::writeToFile();
-            mWriteToFile = false;
-        }
-    }
-}
-
-bool SurfaceTracing::AsyncRunner::traceWhenNotified(LayersTraceProto* outProto) {
-    std::unique_lock<std::mutex> lock(mSfLock);
-    mCanStartTrace.wait(lock);
-    if (!mAddEntry) {
-        return false;
-    }
-    *outProto = traceLayers(mWhere);
-    mAddEntry = false;
-    mMissedTraceEntries = 0;
-    return true;
-}
-
-void SurfaceTracing::AsyncRunner::notify(const char* where) {
-    std::scoped_lock lock(mSfLock);
-    notifyLocked(where);
-}
-
-void SurfaceTracing::AsyncRunner::notifyLocked(const char* where) {
-    mWhere = where;
-    if (mAddEntry) {
-        mMissedTraceEntries++;
-    }
-    mAddEntry = true;
-    mCanStartTrace.notify_one();
-}
-
-status_t SurfaceTracing::AsyncRunner::writeToFile() {
-    mWriteToFile = true;
-    mCanStartTrace.notify_one();
-    return STATUS_OK;
-}
-
-status_t SurfaceTracing::AsyncRunner::stop() {
-    mEnabled = false;
-    mCanStartTrace.notify_one();
-    mThread.join();
-    return Runner::writeToFile();
-}
-
-} // namespace android
diff --git a/services/surfaceflinger/SurfaceTracing.h b/services/surfaceflinger/SurfaceTracing.h
deleted file mode 100644
index 97adb20..0000000
--- a/services/surfaceflinger/SurfaceTracing.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <android-base/thread_annotations.h>
-#include <layerproto/LayerProtoHeader.h>
-#include <utils/Errors.h>
-#include <utils/StrongPointer.h>
-
-#include <condition_variable>
-#include <memory>
-#include <mutex>
-#include <queue>
-#include <thread>
-
-using namespace android::surfaceflinger;
-
-namespace android {
-
-class SurfaceFlinger;
-constexpr auto operator""_MB(unsigned long long const num) {
-    return num * 1024 * 1024;
-}
-/*
- * SurfaceTracing records layer states during surface flinging. Manages tracing state and
- * configuration.
- */
-class SurfaceTracing {
-public:
-    SurfaceTracing(SurfaceFlinger& flinger);
-    bool enable();
-    bool disable();
-    status_t writeToFile();
-    bool isEnabled() const;
-    /*
-     * Adds a trace entry, must be called from the drawing thread or while holding the
-     * SurfaceFlinger tracing lock.
-     */
-    void notify(const char* where);
-    /*
-     * Adds a trace entry, called while holding the SurfaceFlinger tracing lock.
-     */
-    void notifyLocked(const char* where) /* REQUIRES(mSfLock) */;
-
-    void setBufferSize(size_t bufferSizeInBytes) { mConfig.bufferSize = bufferSizeInBytes; }
-    void dump(std::string& result) const;
-
-    enum : uint32_t {
-        TRACE_CRITICAL = 1 << 0,
-        TRACE_INPUT = 1 << 1,
-        TRACE_COMPOSITION = 1 << 2,
-        TRACE_EXTRA = 1 << 3,
-        TRACE_HWC = 1 << 4,
-        // Add non-geometry composition changes to the trace.
-        TRACE_BUFFERS = 1 << 5,
-        // Add entries from the drawing thread post composition.
-        TRACE_SYNC = 1 << 6,
-        TRACE_ALL = TRACE_CRITICAL | TRACE_INPUT | TRACE_COMPOSITION | TRACE_EXTRA,
-    };
-    void setTraceFlags(uint32_t flags) { mConfig.flags = flags; }
-    bool flagIsSet(uint32_t flags) { return (mConfig.flags & flags) == flags; }
-    static LayersTraceFileProto createLayersTraceFileProto();
-
-private:
-    class Runner;
-    static constexpr auto DEFAULT_BUFFER_SIZE = 20_MB;
-    static constexpr auto DEFAULT_FILE_NAME = "/data/misc/wmtrace/layers_trace.winscope";
-
-    SurfaceFlinger& mFlinger;
-    mutable std::mutex mTraceLock;
-    bool mEnabled GUARDED_BY(mTraceLock) = false;
-    std::unique_ptr<Runner> runner GUARDED_BY(mTraceLock);
-
-    struct Config {
-        uint32_t flags = TRACE_CRITICAL | TRACE_INPUT | TRACE_SYNC;
-        size_t bufferSize = DEFAULT_BUFFER_SIZE;
-    } mConfig;
-
-    /*
-     * ring buffer.
-     */
-    class LayersTraceBuffer {
-    public:
-        size_t size() const { return mSizeInBytes; }
-        size_t used() const { return mUsedInBytes; }
-        size_t frameCount() const { return mStorage.size(); }
-
-        void setSize(size_t newSize) { mSizeInBytes = newSize; }
-        void reset(size_t newSize);
-        void emplace(LayersTraceProto&& proto);
-        void flush(LayersTraceFileProto* fileProto);
-
-    private:
-        size_t mUsedInBytes = 0U;
-        size_t mSizeInBytes = DEFAULT_BUFFER_SIZE;
-        std::queue<LayersTraceProto> mStorage;
-    };
-
-    /*
-     * Implements a synchronous way of adding trace entries. This must be called
-     * from the drawing thread.
-     */
-    class Runner {
-    public:
-        Runner(SurfaceFlinger& flinger, SurfaceTracing::Config& config);
-        virtual ~Runner() = default;
-        virtual status_t stop();
-        virtual status_t writeToFile();
-        virtual void notify(const char* where);
-        /* Cannot be called with a synchronous runner. */
-        virtual void notifyLocked(const char* /* where */) {}
-        void dump(std::string& result) const;
-
-    protected:
-        bool flagIsSet(uint32_t flags) { return (mConfig.flags & flags) == flags; }
-        SurfaceFlinger& mFlinger;
-        SurfaceTracing::Config mConfig;
-        SurfaceTracing::LayersTraceBuffer mBuffer;
-        uint32_t mMissedTraceEntries = 0;
-        LayersTraceProto traceLayers(const char* where);
-    };
-
-    /*
-     * Implements asynchronous way to add trace entries called from a separate thread while holding
-     * the SurfaceFlinger tracing lock. Trace entries may be missed if the tracing thread is not
-     * scheduled in time.
-     */
-    class AsyncRunner : public Runner {
-    public:
-        AsyncRunner(SurfaceFlinger& flinger, SurfaceTracing::Config& config, std::mutex& sfLock);
-        virtual ~AsyncRunner() = default;
-        status_t stop() override;
-        status_t writeToFile() override;
-        void notify(const char* where) override;
-        void notifyLocked(const char* where);
-
-    private:
-        std::mutex& mSfLock;
-        std::condition_variable mCanStartTrace;
-        std::thread mThread;
-        const char* mWhere = "";
-        bool mWriteToFile = false;
-        bool mEnabled = false;
-        bool mAddEntry = false;
-        void loop();
-        bool traceWhenNotified(LayersTraceProto* outProto);
-    };
-};
-
-} // namespace android
diff --git a/services/surfaceflinger/Tracing/LayerTracing.cpp b/services/surfaceflinger/Tracing/LayerTracing.cpp
new file mode 100644
index 0000000..84890ee
--- /dev/null
+++ b/services/surfaceflinger/Tracing/LayerTracing.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "LayerTracing"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <SurfaceFlinger.h>
+#include <android-base/stringprintf.h>
+#include <log/log.h>
+#include <utils/SystemClock.h>
+#include <utils/Trace.h>
+
+#include "LayerTracing.h"
+#include "RingBuffer.h"
+
+namespace android {
+
+LayerTracing::LayerTracing(SurfaceFlinger& flinger) : mFlinger(flinger) {
+    mBuffer = std::make_unique<RingBuffer<LayersTraceFileProto, LayersTraceProto>>();
+}
+
+LayerTracing::~LayerTracing() = default;
+
+bool LayerTracing::enable() {
+    std::scoped_lock lock(mTraceLock);
+    if (mEnabled) {
+        return false;
+    }
+    mBuffer->setSize(mBufferSizeInBytes);
+    mEnabled = true;
+    return true;
+}
+
+bool LayerTracing::disable() {
+    std::scoped_lock lock(mTraceLock);
+    if (!mEnabled) {
+        return false;
+    }
+    mEnabled = false;
+    LayersTraceFileProto fileProto = createTraceFileProto();
+    mBuffer->writeToFile(fileProto, FILE_NAME);
+    return true;
+}
+
+bool LayerTracing::isEnabled() const {
+    std::scoped_lock lock(mTraceLock);
+    return mEnabled;
+}
+
+status_t LayerTracing::writeToFile() {
+    std::scoped_lock lock(mTraceLock);
+    if (!mEnabled) {
+        return STATUS_OK;
+    }
+    LayersTraceFileProto fileProto = createTraceFileProto();
+    return mBuffer->writeToFile(fileProto, FILE_NAME);
+}
+
+void LayerTracing::setTraceFlags(uint32_t flags) {
+    std::scoped_lock lock(mTraceLock);
+    mFlags = flags;
+}
+
+void LayerTracing::setBufferSize(size_t bufferSizeInBytes) {
+    std::scoped_lock lock(mTraceLock);
+    mBufferSizeInBytes = bufferSizeInBytes;
+}
+
+bool LayerTracing::flagIsSet(uint32_t flags) const {
+    return (mFlags & flags) == flags;
+}
+
+LayersTraceFileProto LayerTracing::createTraceFileProto() const {
+    LayersTraceFileProto fileProto;
+    fileProto.set_magic_number(uint64_t(LayersTraceFileProto_MagicNumber_MAGIC_NUMBER_H) << 32 |
+                               LayersTraceFileProto_MagicNumber_MAGIC_NUMBER_L);
+    return fileProto;
+}
+
+void LayerTracing::dump(std::string& result) const {
+    std::scoped_lock lock(mTraceLock);
+    base::StringAppendF(&result, "Tracing state: %s\n", mEnabled ? "enabled" : "disabled");
+    mBuffer->dump(result);
+}
+
+void LayerTracing::notify(const char* where) {
+    ATRACE_CALL();
+    std::scoped_lock lock(mTraceLock);
+    if (!mEnabled) {
+        return;
+    }
+
+    ATRACE_CALL();
+    LayersTraceProto entry;
+    entry.set_elapsed_realtime_nanos(elapsedRealtimeNano());
+    entry.set_where(where);
+    LayersProto layers(mFlinger.dumpDrawingStateProto(mFlags));
+
+    if (flagIsSet(LayerTracing::TRACE_EXTRA)) {
+        mFlinger.dumpOffscreenLayersProto(layers);
+    }
+    entry.mutable_layers()->Swap(&layers);
+
+    if (flagIsSet(LayerTracing::TRACE_HWC)) {
+        std::string hwcDump;
+        mFlinger.dumpHwc(hwcDump);
+        entry.set_hwc_blob(hwcDump);
+    }
+    if (!flagIsSet(LayerTracing::TRACE_COMPOSITION)) {
+        entry.set_excludes_composition_state(true);
+    }
+    mFlinger.dumpDisplayProto(entry);
+    mBuffer->emplace(std::move(entry));
+}
+
+} // namespace android
diff --git a/services/surfaceflinger/Tracing/LayerTracing.h b/services/surfaceflinger/Tracing/LayerTracing.h
new file mode 100644
index 0000000..8ca3587
--- /dev/null
+++ b/services/surfaceflinger/Tracing/LayerTracing.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <layerproto/LayerProtoHeader.h>
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+#include <memory>
+#include <mutex>
+
+using namespace android::surfaceflinger;
+
+namespace android {
+
+template <typename FileProto, typename EntryProto>
+class RingBuffer;
+
+class SurfaceFlinger;
+
+/*
+ * LayerTracing records layer states during surface flinging. Manages tracing state and
+ * configuration.
+ */
+class LayerTracing {
+public:
+    LayerTracing(SurfaceFlinger& flinger);
+    ~LayerTracing();
+    bool enable();
+    bool disable();
+    bool isEnabled() const;
+    status_t writeToFile();
+    LayersTraceFileProto createTraceFileProto() const;
+    void notify(const char* where);
+
+    enum : uint32_t {
+        TRACE_INPUT = 1 << 1,
+        TRACE_COMPOSITION = 1 << 2,
+        TRACE_EXTRA = 1 << 3,
+        TRACE_HWC = 1 << 4,
+        TRACE_BUFFERS = 1 << 5,
+        TRACE_ALL = TRACE_INPUT | TRACE_COMPOSITION | TRACE_EXTRA,
+    };
+    void setTraceFlags(uint32_t flags);
+    bool flagIsSet(uint32_t flags) const;
+    void setBufferSize(size_t bufferSizeInBytes);
+    void dump(std::string&) const;
+
+private:
+    static constexpr auto FILE_NAME = "/data/misc/wmtrace/layers_trace.winscope";
+
+    SurfaceFlinger& mFlinger;
+    uint32_t mFlags = TRACE_INPUT;
+    mutable std::mutex mTraceLock;
+    bool mEnabled GUARDED_BY(mTraceLock) = false;
+    std::unique_ptr<RingBuffer<LayersTraceFileProto, LayersTraceProto>> mBuffer
+            GUARDED_BY(mTraceLock);
+    size_t mBufferSizeInBytes GUARDED_BY(mTraceLock) = 20 * 1024 * 1024;
+};
+
+} // namespace android
diff --git a/services/surfaceflinger/Tracing/RingBuffer.h b/services/surfaceflinger/Tracing/RingBuffer.h
new file mode 100644
index 0000000..d0fb3f2
--- /dev/null
+++ b/services/surfaceflinger/Tracing/RingBuffer.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/file.h>
+#include <android-base/stringprintf.h>
+
+#include <log/log.h>
+#include <utils/Errors.h>
+#include <utils/SystemClock.h>
+#include <utils/Trace.h>
+#include <queue>
+
+namespace android {
+
+class SurfaceFlinger;
+
+template <typename FileProto, typename EntryProto>
+class RingBuffer {
+public:
+    size_t size() const { return mSizeInBytes; }
+    size_t used() const { return mUsedInBytes; }
+    size_t frameCount() const { return mStorage.size(); }
+    void setSize(size_t newSize) { mSizeInBytes = newSize; }
+    EntryProto& front() { return mStorage.front(); }
+    const EntryProto& front() const { return mStorage.front(); }
+
+    void reset(size_t newSize) {
+        // use the swap trick to make sure memory is released
+        std::queue<EntryProto>().swap(mStorage);
+        mSizeInBytes = newSize;
+        mUsedInBytes = 0U;
+    }
+    void flush(FileProto& fileProto) {
+        fileProto.mutable_entry()->Reserve(static_cast<int>(mStorage.size()));
+        while (!mStorage.empty()) {
+            auto entry = fileProto.add_entry();
+            entry->Swap(&mStorage.front());
+            mStorage.pop();
+        }
+    }
+
+    status_t writeToFile(FileProto& fileProto, std::string filename) {
+        ATRACE_CALL();
+        std::string output;
+        flush(fileProto);
+        reset(mSizeInBytes);
+        if (!fileProto.SerializeToString(&output)) {
+            ALOGE("Could not serialize proto.");
+            return UNKNOWN_ERROR;
+        }
+
+        // -rw-r--r--
+        const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+        if (!android::base::WriteStringToFile(output, filename, mode, getuid(), getgid(), true)) {
+            ALOGE("Could not save the proto file.");
+            return PERMISSION_DENIED;
+        }
+        return NO_ERROR;
+    }
+
+    std::vector<EntryProto> emplace(EntryProto&& proto) {
+        std::vector<EntryProto> replacedEntries;
+        size_t protoSize = static_cast<size_t>(proto.ByteSize());
+        while (mUsedInBytes + protoSize > mSizeInBytes) {
+            if (mStorage.empty()) {
+                return {};
+            }
+            mUsedInBytes -= static_cast<size_t>(mStorage.front().ByteSize());
+            replacedEntries.emplace_back(mStorage.front());
+            mStorage.pop();
+        }
+        mUsedInBytes += protoSize;
+        mStorage.emplace();
+        mStorage.back().Swap(&proto);
+        return replacedEntries;
+    }
+
+    void dump(std::string& result) const {
+        std::chrono::milliseconds duration(0);
+        if (frameCount() > 0) {
+            duration = std::chrono::duration_cast<std::chrono::milliseconds>(
+                    std::chrono::nanoseconds(systemTime() - front().elapsed_realtime_nanos()));
+        }
+        const int64_t durationCount = duration.count();
+        base::StringAppendF(&result,
+                            "  number of entries: %zu (%.2fMB / %.2fMB) duration: %" PRIi64 "ms\n",
+                            frameCount(), float(used()) / 1024.f * 1024.f,
+                            float(size()) / 1024.f * 1024.f, durationCount);
+    }
+
+private:
+    size_t mUsedInBytes = 0U;
+    size_t mSizeInBytes = 0U;
+    std::queue<EntryProto> mStorage;
+};
+
+} // namespace android
diff --git a/services/surfaceflinger/tests/LayerCallback_test.cpp b/services/surfaceflinger/tests/LayerCallback_test.cpp
index 91a5b52..5c16fee 100644
--- a/services/surfaceflinger/tests/LayerCallback_test.cpp
+++ b/services/surfaceflinger/tests/LayerCallback_test.cpp
@@ -26,6 +26,7 @@
 namespace android {
 
 using android::hardware::graphics::common::V1_1::BufferUsage;
+using SCHash = SurfaceComposerClient::SCHash;
 
 ::testing::Environment* const binderEnv =
         ::testing::AddGlobalTestEnvironment(new BinderEnvironment());
@@ -102,6 +103,24 @@
         }
     }
 
+    static void waitForCommitCallback(
+            CallbackHelper& helper,
+            const std::unordered_set<sp<SurfaceControl>, SCHash>& committedSc) {
+        CallbackData callbackData;
+        ASSERT_NO_FATAL_FAILURE(helper.getCallbackData(&callbackData));
+
+        const auto& surfaceControlStats = callbackData.surfaceControlStats;
+
+        ASSERT_EQ(surfaceControlStats.size(), committedSc.size()) << "wrong number of surfaces";
+
+        for (const auto& stats : surfaceControlStats) {
+            ASSERT_NE(stats.surfaceControl, nullptr) << "returned null surface control";
+
+            const auto& expectedSc = committedSc.find(stats.surfaceControl);
+            ASSERT_NE(expectedSc, committedSc.end()) << "unexpected surface control";
+        }
+    }
+
     DisplayEventReceiver mDisplayEventReceiver;
     int mEpollFd;
 
@@ -1085,4 +1104,29 @@
     EXPECT_NO_FATAL_FAILURE(waitForCallback(callback, expected, true));
 }
 
+TEST_F(LayerCallbackTest, CommitCallbackOffscreenLayer) {
+    sp<SurfaceControl> layer;
+    ASSERT_NO_FATAL_FAILURE(layer = createBufferStateLayer());
+    sp<SurfaceControl> offscreenLayer =
+            createSurface(mClient, "Offscreen Layer", 0, 0, PIXEL_FORMAT_RGBA_8888,
+                          ISurfaceComposerClient::eFXSurfaceBufferState, layer.get());
+
+    Transaction transaction;
+    CallbackHelper callback;
+    int err = fillTransaction(transaction, &callback, layer, true);
+    err |= fillTransaction(transaction, &callback, offscreenLayer, true);
+    if (err) {
+        GTEST_SUCCEED() << "test not supported";
+        return;
+    }
+
+    transaction.reparent(offscreenLayer, nullptr)
+            .addTransactionCommittedCallback(callback.function, callback.getContext());
+    transaction.apply();
+
+    std::unordered_set<sp<SurfaceControl>, SCHash> committedSc;
+    committedSc.insert(layer);
+    committedSc.insert(offscreenLayer);
+    EXPECT_NO_FATAL_FAILURE(waitForCommitCallback(callback, committedSc));
+}
 } // namespace android
diff --git a/services/surfaceflinger/tests/unittests/Android.bp b/services/surfaceflinger/tests/unittests/Android.bp
index 1ac5680..3dc6d8b 100644
--- a/services/surfaceflinger/tests/unittests/Android.bp
+++ b/services/surfaceflinger/tests/unittests/Android.bp
@@ -107,7 +107,6 @@
         "mock/MockEventThread.cpp",
         "mock/MockFrameTimeline.cpp",
         "mock/MockFrameTracer.cpp",
-        "mock/MockMessageQueue.cpp",
         "mock/MockNativeWindowSurface.cpp",
         "mock/MockSurfaceInterceptor.cpp",
         "mock/MockTimeStats.cpp",
diff --git a/services/surfaceflinger/tests/unittests/CompositionTest.cpp b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
index 52d8c35..8d2c078 100644
--- a/services/surfaceflinger/tests/unittests/CompositionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
@@ -45,7 +45,6 @@
 #include "mock/DisplayHardware/MockComposer.h"
 #include "mock/DisplayHardware/MockPowerAdvisor.h"
 #include "mock/MockEventThread.h"
-#include "mock/MockMessageQueue.h"
 #include "mock/MockTimeStats.h"
 #include "mock/MockVsyncController.h"
 #include "mock/system/window/MockNativeWindow.h"
@@ -95,7 +94,6 @@
                 ::testing::UnitTest::GetInstance()->current_test_info();
         ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
 
-        mFlinger.mutableEventQueue().reset(mMessageQueue);
         setupScheduler();
 
         EXPECT_CALL(*mNativeWindow, query(NATIVE_WINDOW_WIDTH, _))
@@ -145,9 +143,6 @@
         mFlinger.setupScheduler(std::move(vsyncController), std::move(vsyncTracker),
                                 std::move(eventThread), std::move(sfEventThread), kCallback,
                                 kHasMultipleConfigs);
-
-        // Layer history should be created if there are multiple configs.
-        ASSERT_TRUE(mFlinger.scheduler()->hasLayerHistory());
     }
 
     void setupForceGeometryDirty() {
@@ -186,7 +181,6 @@
     Hwc2::mock::Composer* mComposer = nullptr;
     renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
     mock::TimeStats* mTimeStats = new mock::TimeStats();
-    mock::MessageQueue* mMessageQueue = new mock::MessageQueue();
     Hwc2::mock::PowerAdvisor mPowerAdvisor;
 
     sp<Fence> mClientTargetAcquireFence = Fence::NO_FENCE;
@@ -542,9 +536,9 @@
         ASSERT_EQ(NO_ERROR, err);
         Mock::VerifyAndClear(test->mRenderEngine);
 
-        EXPECT_CALL(*test->mMessageQueue, scheduleCommit()).Times(1);
+        EXPECT_CALL(*test->mFlinger.scheduler(), scheduleFrame()).Times(1);
         enqueueBuffer(test, layer);
-        Mock::VerifyAndClearExpectations(test->mMessageQueue);
+        Mock::VerifyAndClearExpectations(test->mFlinger.scheduler());
 
         bool ignoredRecomputeVisibleRegions;
         layer->latchBuffer(ignoredRecomputeVisibleRegions, 0, 0);
@@ -839,16 +833,16 @@
 struct BaseLayerVariant {
     template <typename L, typename F>
     static sp<L> createLayerWithFactory(CompositionTest* test, F factory) {
-        EXPECT_CALL(*test->mMessageQueue, postMessage(_)).Times(0);
+        EXPECT_CALL(*test->mFlinger.scheduler(), postMessage(_)).Times(0);
 
         sp<L> layer = factory();
 
         // Layer should be registered with scheduler.
-        EXPECT_EQ(1, test->mFlinger.scheduler()->layerHistorySize());
+        EXPECT_EQ(1u, test->mFlinger.scheduler()->layerHistorySize());
 
         Mock::VerifyAndClear(test->mComposer);
         Mock::VerifyAndClear(test->mRenderEngine);
-        Mock::VerifyAndClearExpectations(test->mMessageQueue);
+        Mock::VerifyAndClearExpectations(test->mFlinger.scheduler());
 
         initLayerDrawingStateAndComputeBounds(test, layer);
 
@@ -889,7 +883,7 @@
 
         // Layer should be unregistered with scheduler.
         test->mFlinger.commit();
-        EXPECT_EQ(0, test->mFlinger.scheduler()->layerHistorySize());
+        EXPECT_EQ(0u, test->mFlinger.scheduler()->layerHistorySize());
     }
 };
 
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index 6cb3052..b1f704a 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -50,7 +50,6 @@
     });
 
     injectMockScheduler();
-    mFlinger.mutableEventQueue().reset(mMessageQueue);
     mFlinger.setupRenderEngine(std::unique_ptr<renderengine::RenderEngine>(mRenderEngine));
     mFlinger.mutableInterceptor() = mSurfaceInterceptor;
 
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
index 7746e73..de5e9df 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTestHelpers.h
@@ -47,7 +47,6 @@
 #include "mock/DisplayHardware/MockComposer.h"
 #include "mock/DisplayHardware/MockPowerAdvisor.h"
 #include "mock/MockEventThread.h"
-#include "mock/MockMessageQueue.h"
 #include "mock/MockNativeWindowSurface.h"
 #include "mock/MockSchedulerCallback.h"
 #include "mock/MockSurfaceInterceptor.h"
@@ -118,7 +117,6 @@
     // to keep a reference to them for use in setting up call expectations.
     renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
     Hwc2::mock::Composer* mComposer = nullptr;
-    mock::MessageQueue* mMessageQueue = new mock::MessageQueue();
     sp<mock::SurfaceInterceptor> mSurfaceInterceptor = new mock::SurfaceInterceptor;
 
     mock::VsyncController* mVsyncController = new mock::VsyncController;
diff --git a/services/surfaceflinger/tests/unittests/LayerHistoryTest.cpp b/services/surfaceflinger/tests/unittests/LayerHistoryTest.cpp
index e8795fe..4993a2d 100644
--- a/services/surfaceflinger/tests/unittests/LayerHistoryTest.cpp
+++ b/services/surfaceflinger/tests/unittests/LayerHistoryTest.cpp
@@ -59,10 +59,8 @@
 
     LayerHistoryTest() { mFlinger.resetScheduler(mScheduler); }
 
-    void SetUp() override { ASSERT_TRUE(mScheduler->hasLayerHistory()); }
-
-    LayerHistory& history() { return *mScheduler->mutableLayerHistory(); }
-    const LayerHistory& history() const { return *mScheduler->mutableLayerHistory(); }
+    LayerHistory& history() { return mScheduler->mutableLayerHistory(); }
+    const LayerHistory& history() const { return mScheduler->mutableLayerHistory(); }
 
     LayerHistory::Summary summarizeLayerHistory(nsecs_t now) {
         return history().summarize(*mScheduler->refreshRateConfigs(), now);
diff --git a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
index 17d1dd6..bd4dc59 100644
--- a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
@@ -41,7 +41,7 @@
     struct MockHandler : MessageQueue::Handler {
         using MessageQueue::Handler::Handler;
 
-        MOCK_METHOD(void, dispatchCommit, (int64_t, nsecs_t), (override));
+        MOCK_METHOD(void, dispatchFrame, (int64_t, nsecs_t), (override));
     };
 
     explicit TestableMessageQueue(sp<MockHandler> handler)
@@ -96,7 +96,7 @@
     EXPECT_FALSE(mEventQueue.getScheduledFrameTime());
 
     EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
-    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleCommit());
+    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
     EXPECT_EQ(1234, mEventQueue.getScheduledFrameTime()->time_since_epoch().count());
@@ -109,13 +109,13 @@
                                                                  .earliestVsync = 0};
 
     EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
-    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleCommit());
+    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
     EXPECT_EQ(1234, mEventQueue.getScheduledFrameTime()->time_since_epoch().count());
 
     EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(4567));
-    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleCommit());
+    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
     EXPECT_EQ(4567, mEventQueue.getScheduledFrameTime()->time_since_epoch().count());
@@ -128,7 +128,7 @@
                                                                  .earliestVsync = 0};
 
     EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(1234));
-    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleCommit());
+    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 
     ASSERT_TRUE(mEventQueue.getScheduledFrameTime());
     EXPECT_EQ(1234, mEventQueue.getScheduledFrameTime()->time_since_epoch().count());
@@ -141,7 +141,7 @@
                 generateTokenForPredictions(
                         frametimeline::TimelineItem(startTime, endTime, presentTime)))
             .WillOnce(Return(vsyncId));
-    EXPECT_CALL(*mEventQueue.mHandler, dispatchCommit(vsyncId, presentTime)).Times(1);
+    EXPECT_CALL(*mEventQueue.mHandler, dispatchFrame(vsyncId, presentTime)).Times(1);
     EXPECT_NO_FATAL_FAILURE(mEventQueue.vsyncCallback(presentTime, startTime, endTime));
 
     EXPECT_FALSE(mEventQueue.getScheduledFrameTime());
@@ -152,7 +152,7 @@
                                                      .earliestVsync = presentTime};
 
     EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timingAfterCallback)).WillOnce(Return(0));
-    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleCommit());
+    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 }
 
 TEST_F(MessageQueueTest, commitWithDurationChange) {
@@ -164,7 +164,7 @@
                                                      .earliestVsync = 0};
 
     EXPECT_CALL(mVSyncDispatch, schedule(mCallbackToken, timing)).WillOnce(Return(0));
-    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleCommit());
+    EXPECT_NO_FATAL_FAILURE(mEventQueue.scheduleFrame());
 }
 
 } // namespace
diff --git a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
index 599b235..e558f3b 100644
--- a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
@@ -68,14 +68,6 @@
             std::make_shared<scheduler::RefreshRateConfigs>(DisplayModes{mode60}, mode60->getId());
 
     mock::SchedulerCallback mSchedulerCallback;
-
-    // The scheduler should initially disable VSYNC.
-    struct ExpectDisableVsync {
-        ExpectDisableVsync(mock::SchedulerCallback& callback) {
-            EXPECT_CALL(callback, setVsyncEnabled(false)).Times(1);
-        }
-    } mExpectDisableVsync{mSchedulerCallback};
-
     TestableScheduler* mScheduler = new TestableScheduler{mConfigs, mSchedulerCallback};
 
     Scheduler::ConnectionHandle mConnectionHandle;
@@ -166,9 +158,9 @@
     sp<mock::MockLayer> layer = sp<mock::MockLayer>::make(mFlinger.flinger());
 
     // recordLayerHistory should be a noop
-    ASSERT_EQ(static_cast<size_t>(0), mScheduler->getNumActiveLayers());
+    ASSERT_EQ(0u, mScheduler->getNumActiveLayers());
     mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
-    ASSERT_EQ(static_cast<size_t>(0), mScheduler->getNumActiveLayers());
+    ASSERT_EQ(0u, mScheduler->getNumActiveLayers());
 
     constexpr bool kPowerStateNormal = true;
     mScheduler->setDisplayPowerState(kPowerStateNormal);
@@ -181,17 +173,17 @@
 }
 
 TEST_F(SchedulerTest, updateDisplayModes) {
-    ASSERT_EQ(static_cast<size_t>(0), mScheduler->layerHistorySize());
+    ASSERT_EQ(0u, mScheduler->layerHistorySize());
     sp<mock::MockLayer> layer = sp<mock::MockLayer>::make(mFlinger.flinger());
-    ASSERT_EQ(static_cast<size_t>(1), mScheduler->layerHistorySize());
+    ASSERT_EQ(1u, mScheduler->layerHistorySize());
 
     mScheduler->setRefreshRateConfigs(
             std::make_shared<scheduler::RefreshRateConfigs>(DisplayModes{mode60, mode120},
                                                             mode60->getId()));
 
-    ASSERT_EQ(static_cast<size_t>(0), mScheduler->getNumActiveLayers());
+    ASSERT_EQ(0u, mScheduler->getNumActiveLayers());
     mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
-    ASSERT_EQ(static_cast<size_t>(1), mScheduler->getNumActiveLayers());
+    ASSERT_EQ(1u, mScheduler->getNumActiveLayers());
 }
 
 TEST_F(SchedulerTest, testDispatchCachedReportedMode) {
diff --git a/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp b/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp
index 360f9c6..eed62a7 100644
--- a/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp
@@ -33,7 +33,6 @@
 #include "TestableSurfaceFlinger.h"
 #include "mock/DisplayHardware/MockComposer.h"
 #include "mock/MockEventThread.h"
-#include "mock/MockMessageQueue.h"
 #include "mock/MockVsyncController.h"
 
 namespace android {
@@ -112,7 +111,6 @@
     void commitTransaction();
 
     TestableSurfaceFlinger mFlinger;
-    mock::MessageQueue* mMessageQueue = new mock::MessageQueue();
 
     std::vector<sp<Layer>> mLayers;
 };
@@ -125,7 +123,6 @@
     setupScheduler();
 
     mFlinger.setupComposer(std::make_unique<Hwc2::mock::Composer>());
-    mFlinger.mutableEventQueue().reset(mMessageQueue);
 }
 
 void SetFrameRateTest::addChild(sp<Layer> layer, sp<Layer> child) {
@@ -172,7 +169,7 @@
 namespace {
 
 TEST_P(SetFrameRateTest, SetAndGet) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -183,7 +180,7 @@
 }
 
 TEST_P(SetFrameRateTest, SetAndGetParent) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -208,7 +205,7 @@
 }
 
 TEST_P(SetFrameRateTest, SetAndGetParentAllVote) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -247,7 +244,7 @@
 }
 
 TEST_P(SetFrameRateTest, SetAndGetChild) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -272,7 +269,7 @@
 }
 
 TEST_P(SetFrameRateTest, SetAndGetChildAllVote) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -311,7 +308,7 @@
 }
 
 TEST_P(SetFrameRateTest, SetAndGetChildAddAfterVote) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -341,7 +338,7 @@
 }
 
 TEST_P(SetFrameRateTest, SetAndGetChildRemoveAfterVote) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -372,7 +369,7 @@
 }
 
 TEST_P(SetFrameRateTest, SetAndGetParentNotInTree) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
@@ -454,24 +451,20 @@
     parent->setFrameRate(FRAME_RATE_VOTE1);
     commitTransaction();
 
-    mFlinger.mutableScheduler()
-            .mutableLayerHistory()
-            ->record(parent.get(), 0, 0, LayerHistory::LayerUpdateType::Buffer);
-    mFlinger.mutableScheduler()
-            .mutableLayerHistory()
-            ->record(child.get(), 0, 0, LayerHistory::LayerUpdateType::Buffer);
+    auto& history = mFlinger.mutableScheduler().mutableLayerHistory();
+    history.record(parent.get(), 0, 0, LayerHistory::LayerUpdateType::Buffer);
+    history.record(child.get(), 0, 0, LayerHistory::LayerUpdateType::Buffer);
 
-    const auto layerHistorySummary =
-            mFlinger.mutableScheduler()
-                    .mutableLayerHistory()
-                    ->summarize(*mFlinger.mutableScheduler().refreshRateConfigs(), 0);
-    ASSERT_EQ(2u, layerHistorySummary.size());
-    EXPECT_EQ(FRAME_RATE_VOTE1.rate, layerHistorySummary[0].desiredRefreshRate);
-    EXPECT_EQ(FRAME_RATE_VOTE1.rate, layerHistorySummary[1].desiredRefreshRate);
+    const auto configs = mFlinger.mutableScheduler().refreshRateConfigs();
+    const auto summary = history.summarize(*configs, 0);
+
+    ASSERT_EQ(2u, summary.size());
+    EXPECT_EQ(FRAME_RATE_VOTE1.rate, summary[0].desiredRefreshRate);
+    EXPECT_EQ(FRAME_RATE_VOTE1.rate, summary[1].desiredRefreshRate);
 }
 
 TEST_P(SetFrameRateTest, addChildForParentWithTreeVote) {
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     const auto& layerFactory = GetParam();
 
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_CreateDisplayTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_CreateDisplayTest.cpp
index 8c30341..f7d34ac 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_CreateDisplayTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_CreateDisplayTest.cpp
@@ -52,7 +52,7 @@
     // Cleanup conditions
 
     // Creating the display commits a display transaction.
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 }
 
 TEST_F(CreateDisplayTest, createDisplaySetsCurrentStateForSecureDisplay) {
@@ -87,7 +87,7 @@
     // Cleanup conditions
 
     // Creating the display commits a display transaction.
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 }
 
 } // namespace
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DestroyDisplayTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DestroyDisplayTest.cpp
index 7087fb6..c7e61c9 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DestroyDisplayTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DestroyDisplayTest.cpp
@@ -41,7 +41,7 @@
     EXPECT_CALL(*mSurfaceInterceptor, saveDisplayDeletion(_)).Times(1);
 
     // Destroying the display commits a display transaction.
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     // --------------------------------------------------------------------
     // Invocation
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_HotplugTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_HotplugTest.cpp
index 29ff0cd..c9a2b00 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_HotplugTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_HotplugTest.cpp
@@ -39,7 +39,7 @@
     // Call Expectations
 
     // We expect a scheduled commit for the display transaction.
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     // --------------------------------------------------------------------
     // Invocation
@@ -86,7 +86,7 @@
     // Call Expectations
 
     // We expect a scheduled commit for the display transaction.
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     // --------------------------------------------------------------------
     // Invocation
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
index e1b44cf..37cf05e 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_OnInitializeDisplaysTest.cpp
@@ -47,7 +47,7 @@
     Case::Display::setupHwcGetActiveConfigCallExpectations(this);
 
     // We expect a scheduled commit for the display transaction.
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     EXPECT_CALL(*mVSyncTracker, nextAnticipatedVSyncTimeFrom(_)).WillRepeatedly(Return(0));
 
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
index 6edebd4..b57feff 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_SetPowerModeInternalTest.cpp
@@ -273,7 +273,7 @@
     }
 
     static void setupRepaintEverythingCallExpectations(DisplayTransactionTest* test) {
-        EXPECT_CALL(*test->mMessageQueue, scheduleCommit()).Times(1);
+        EXPECT_CALL(*test->mFlinger.scheduler(), scheduleFrame()).Times(1);
     }
 
     static void setupSurfaceInterceptorCallExpectations(DisplayTransactionTest* test,
diff --git a/services/surfaceflinger/tests/unittests/TestableScheduler.h b/services/surfaceflinger/tests/unittests/TestableScheduler.h
index 1d21bd4..9d1fc98 100644
--- a/services/surfaceflinger/tests/unittests/TestableScheduler.h
+++ b/services/surfaceflinger/tests/unittests/TestableScheduler.h
@@ -30,21 +30,30 @@
 
 namespace android {
 
-class TestableScheduler : public Scheduler {
+class TestableScheduler : public Scheduler, private ICompositor {
 public:
-    TestableScheduler(const std::shared_ptr<scheduler::RefreshRateConfigs>& refreshRateConfigs,
+    TestableScheduler(std::shared_ptr<scheduler::RefreshRateConfigs> configs,
                       ISchedulerCallback& callback)
           : TestableScheduler(std::make_unique<mock::VsyncController>(),
-                              std::make_unique<mock::VSyncTracker>(), refreshRateConfigs,
+                              std::make_unique<mock::VSyncTracker>(), std::move(configs),
                               callback) {}
 
     TestableScheduler(std::unique_ptr<scheduler::VsyncController> vsyncController,
                       std::unique_ptr<scheduler::VSyncTracker> vsyncTracker,
-                      const std::shared_ptr<scheduler::RefreshRateConfigs>& refreshRateConfigs,
+                      std::shared_ptr<scheduler::RefreshRateConfigs> configs,
                       ISchedulerCallback& callback)
-          : Scheduler({std::move(vsyncController), std::move(vsyncTracker), nullptr},
-                      refreshRateConfigs, callback, createLayerHistory(),
-                      {.useContentDetection = true}) {}
+          : Scheduler(*this, callback, {.useContentDetection = true}) {
+        mVsyncSchedule = {std::move(vsyncController), std::move(vsyncTracker), nullptr};
+        setRefreshRateConfigs(std::move(configs));
+
+        ON_CALL(*this, postMessage).WillByDefault([](sp<MessageHandler>&& handler) {
+            // Execute task to prevent broken promise exception on destruction.
+            handler->handleMessage(Message());
+        });
+    }
+
+    MOCK_METHOD(void, scheduleFrame, (), (override));
+    MOCK_METHOD(void, postMessage, (sp<MessageHandler>&&), (override));
 
     // Used to inject mock event thread.
     ConnectionHandle createConnection(std::unique_ptr<EventThread> eventThread) {
@@ -58,22 +67,13 @@
     auto& mutablePrimaryHWVsyncEnabled() { return mPrimaryHWVsyncEnabled; }
     auto& mutableHWVsyncAvailable() { return mHWVsyncAvailable; }
 
-    bool hasLayerHistory() const { return static_cast<bool>(mLayerHistory); }
+    auto& mutableLayerHistory() { return mLayerHistory; }
 
-    auto* mutableLayerHistory() { return mLayerHistory.get(); }
-
-    size_t layerHistorySize() NO_THREAD_SAFETY_ANALYSIS {
-        if (!mLayerHistory) return 0;
-        return mutableLayerHistory()->mLayerInfos.size();
-    }
+    size_t layerHistorySize() NO_THREAD_SAFETY_ANALYSIS { return mLayerHistory.mLayerInfos.size(); }
+    size_t getNumActiveLayers() NO_THREAD_SAFETY_ANALYSIS { return mLayerHistory.mActiveLayersEnd; }
 
     auto refreshRateConfigs() { return holdRefreshRateConfigs(); }
 
-    size_t getNumActiveLayers() NO_THREAD_SAFETY_ANALYSIS {
-        if (!mLayerHistory) return 0;
-        return mutableLayerHistory()->mActiveLayersEnd;
-    }
-
     void replaceTouchTimer(int64_t millis) {
         if (mTouchTimer) {
             mTouchTimer.reset();
@@ -112,6 +112,12 @@
         mVsyncSchedule.controller.reset();
         mConnections.clear();
     }
+
+private:
+    // ICompositor overrides:
+    bool commit(nsecs_t, int64_t, nsecs_t) override { return false; }
+    void composite(nsecs_t) override {}
+    void sample() override {}
 };
 
 } // namespace android
diff --git a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
index 8cca6af..4c5789e 100644
--- a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
+++ b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
@@ -73,20 +73,11 @@
         return nullptr;
     }
 
-    std::unique_ptr<MessageQueue> createMessageQueue(ICompositor& compositor) override {
-        return std::make_unique<android::impl::MessageQueue>(compositor);
-    }
-
     std::unique_ptr<scheduler::VsyncConfiguration> createVsyncConfiguration(
             Fps /*currentRefreshRate*/) override {
         return std::make_unique<scheduler::FakePhaseOffsets>();
     }
 
-    std::unique_ptr<Scheduler> createScheduler(
-            const std::shared_ptr<scheduler::RefreshRateConfigs>&, ISchedulerCallback&) override {
-        return nullptr;
-    }
-
     sp<SurfaceInterceptor> createSurfaceInterceptor() override {
         return new android::impl::SurfaceInterceptor();
     }
@@ -431,7 +422,6 @@
     auto& mutableDisplayColorSetting() { return mFlinger->mDisplayColorSetting; }
     auto& mutableDisplays() { return mFlinger->mDisplays; }
     auto& mutableDrawingState() { return mFlinger->mDrawingState; }
-    auto& mutableEventQueue() { return mFlinger->mEventQueue; }
     auto& mutableGeometryDirty() { return mFlinger->mGeometryDirty; }
     auto& mutableInterceptor() { return mFlinger->mInterceptor; }
     auto& mutableMainThreadId() { return mFlinger->mMainThreadId; }
@@ -460,7 +450,6 @@
         mutableDisplays().clear();
         mutableCurrentState().displays.clear();
         mutableDrawingState().displays.clear();
-        mutableEventQueue().reset();
         mutableInterceptor().clear();
         mFlinger->mScheduler.reset();
         mFlinger->mCompositionEngine->setHwComposer(std::unique_ptr<HWComposer>());
diff --git a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
index 8caadfb..ec19100 100644
--- a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
@@ -29,7 +29,6 @@
 #include "TestableScheduler.h"
 #include "TestableSurfaceFlinger.h"
 #include "mock/MockEventThread.h"
-#include "mock/MockMessageQueue.h"
 #include "mock/MockVsyncController.h"
 
 namespace android {
@@ -46,7 +45,6 @@
                 ::testing::UnitTest::GetInstance()->current_test_info();
         ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
 
-        mFlinger.mutableEventQueue().reset(mMessageQueue);
         setupScheduler();
     }
 
@@ -92,7 +90,6 @@
 
     std::unique_ptr<mock::EventThread> mEventThread = std::make_unique<mock::EventThread>();
 
-    mock::MessageQueue* mMessageQueue = new mock::MessageQueue();
     mock::VsyncController* mVsyncController = new mock::VsyncController();
     mock::VSyncTracker* mVSyncTracker = new mock::VSyncTracker();
     mock::MockFence* mFenceUnsignaled = new mock::MockFence();
@@ -146,7 +143,7 @@
 
     void NotPlacedOnTransactionQueue(uint32_t flags, bool syncInputWindows) {
         ASSERT_EQ(0u, mFlinger.getTransactionQueue().size());
-        EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+        EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
         TransactionInfo transaction;
         setupSingle(transaction, flags, syncInputWindows,
                     /*desiredPresentTime*/ systemTime(), /*isAutoTimestamp*/ true,
@@ -176,7 +173,7 @@
 
     void PlaceOnTransactionQueue(uint32_t flags, bool syncInputWindows) {
         ASSERT_EQ(0u, mFlinger.getTransactionQueue().size());
-        EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+        EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
         // first check will see desired present time has not passed,
         // but afterwards it will look like the desired present time has passed
@@ -207,9 +204,9 @@
         ASSERT_EQ(0u, mFlinger.getTransactionQueue().size());
         nsecs_t time = systemTime();
         if (!syncInputWindows) {
-            EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(2);
+            EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(2);
         } else {
-            EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+            EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
         }
         // transaction that should go on the pending thread
         TransactionInfo transactionA;
@@ -454,7 +451,7 @@
 
 TEST_F(TransactionApplicationTest, Flush_RemovesFromQueue) {
     ASSERT_EQ(0u, mFlinger.getTransactionQueue().size());
-    EXPECT_CALL(*mMessageQueue, scheduleCommit()).Times(1);
+    EXPECT_CALL(*mFlinger.scheduler(), scheduleFrame()).Times(1);
 
     TransactionInfo transactionA; // transaction to go on pending queue
     setupSingle(transactionA, /*flags*/ 0, /*syncInputWindows*/ false,
diff --git a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
index ade4fbb..15fea9c 100644
--- a/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TunnelModeEnabledReporterTest.cpp
@@ -27,7 +27,6 @@
 #include "TunnelModeEnabledReporter.h"
 #include "mock/DisplayHardware/MockComposer.h"
 #include "mock/MockEventThread.h"
-#include "mock/MockMessageQueue.h"
 
 namespace android {
 
@@ -69,12 +68,12 @@
 
     TestableSurfaceFlinger mFlinger;
     Hwc2::mock::Composer* mComposer = nullptr;
-    sp<TestableTunnelModeEnabledListener> mTunnelModeEnabledListener =
-            new TestableTunnelModeEnabledListener();
-    sp<TunnelModeEnabledReporter> mTunnelModeEnabledReporter =
-            new TunnelModeEnabledReporter();
 
-    mock::MessageQueue* mMessageQueue = new mock::MessageQueue();
+    sp<TestableTunnelModeEnabledListener> mTunnelModeEnabledListener =
+            sp<TestableTunnelModeEnabledListener>::make();
+
+    sp<TunnelModeEnabledReporter> mTunnelModeEnabledReporter =
+            sp<TunnelModeEnabledReporter>::make();
 };
 
 TunnelModeEnabledReporterTest::TunnelModeEnabledReporterTest() {
@@ -82,7 +81,6 @@
             ::testing::UnitTest::GetInstance()->current_test_info();
     ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
 
-    mFlinger.mutableEventQueue().reset(mMessageQueue);
     setupScheduler();
     mFlinger.setupComposer(std::make_unique<Hwc2::mock::Composer>());
     mFlinger.flinger()->mTunnelModeEnabledReporter = mTunnelModeEnabledReporter;
diff --git a/services/surfaceflinger/tests/unittests/mock/MockMessageQueue.cpp b/services/surfaceflinger/tests/unittests/mock/MockMessageQueue.cpp
deleted file mode 100644
index 5fb06fd..0000000
--- a/services/surfaceflinger/tests/unittests/mock/MockMessageQueue.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mock/MockMessageQueue.h"
-
-namespace android::mock {
-
-MessageQueue::MessageQueue() {
-    ON_CALL(*this, postMessage).WillByDefault([](sp<MessageHandler>&& handler) {
-        // Execute task to prevent broken promise exception on destruction.
-        handler->handleMessage(Message());
-    });
-}
-
-MessageQueue::~MessageQueue() = default;
-
-} // namespace android::mock
diff --git a/services/surfaceflinger/tests/unittests/mock/MockMessageQueue.h b/services/surfaceflinger/tests/unittests/mock/MockMessageQueue.h
deleted file mode 100644
index d684337..0000000
--- a/services/surfaceflinger/tests/unittests/mock/MockMessageQueue.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <gmock/gmock.h>
-
-#include "FrameTimeline.h"
-#include "Scheduler/EventThread.h"
-#include "Scheduler/MessageQueue.h"
-
-namespace android::mock {
-
-class MessageQueue : public android::MessageQueue {
-public:
-    MessageQueue();
-    ~MessageQueue() override;
-
-    MOCK_METHOD1(setInjector, void(sp<EventThreadConnection>));
-    MOCK_METHOD0(waitMessage, void());
-    MOCK_METHOD1(postMessage, void(sp<MessageHandler>&&));
-    MOCK_METHOD3(initVsync,
-                 void(scheduler::VSyncDispatch&, frametimeline::TokenManager&,
-                      std::chrono::nanoseconds));
-    MOCK_METHOD1(setDuration, void(std::chrono::nanoseconds workDuration));
-
-    MOCK_METHOD(void, scheduleCommit, (), (override));
-    MOCK_METHOD(void, scheduleComposite, (), (override));
-
-    MOCK_METHOD(std::optional<Clock::time_point>, getScheduledFrameTime, (), (const, override));
-};
-
-} // namespace android::mock