Merge "Remove unused update_out_oat_access_times."
diff --git a/cmds/atrace/atrace.rc b/cmds/atrace/atrace.rc
index 994375b..db3a314 100644
--- a/cmds/atrace/atrace.rc
+++ b/cmds/atrace/atrace.rc
@@ -220,6 +220,67 @@
chmod 0666 /sys/kernel/debug/tracing/per_cpu/cpu15/trace
chmod 0666 /sys/kernel/tracing/per_cpu/cpu15/trace
+on post-fs-data
+# Create MM Events Tracing Instance for Kmem Activity Trigger
+ mkdir /sys/kernel/debug/tracing/instances/mm_events 0755 system system
+ mkdir /sys/kernel/tracing/instances/mm_events 0755 system system
+
+# Read and set per CPU buffer size
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/buffer_size_kb
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/buffer_size_kb
+
+# Read and enable tracing
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/tracing_on
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/tracing_on
+
+# Read and truncate kernel trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/trace
+
+# Enable trace events
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/events/vmscan/mm_vmscan_direct_reclaim_begin/enable
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/events/vmscan/mm_vmscan_direct_reclaim_begin/enable
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/events/vmscan/mm_vmscan_kswapd_wake/enable
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/events/vmscan/mm_vmscan_kswapd_wake/enable
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/events/compaction/mm_compaction_begin/enable
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/events/compaction/mm_compaction_begin/enable
+
+# Read and clear per-CPU raw kernel trace
+# Cannot use wildcards in .rc files. Update this if there is a phone with
+# more CPUs.
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu0/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu0/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu1/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu1/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu2/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu2/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu3/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu3/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu4/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu4/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu5/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu5/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu6/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu6/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu7/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu7/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu8/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu8/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu9/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu9/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu10/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu10/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu11/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu11/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu12/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu12/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu13/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu13/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu14/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu14/trace
+ chmod 0666 /sys/kernel/debug/tracing/instances/mm_events/per_cpu/cpu15/trace
+ chmod 0666 /sys/kernel/tracing/instances/mm_events/per_cpu/cpu15/trace
+
on property:persist.debug.atrace.boottrace=1
start boottrace
diff --git a/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl b/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl
index 85e6969..50c1624 100644
--- a/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl
+++ b/cmds/dumpstate/binder/android/os/IDumpstateListener.aidl
@@ -21,8 +21,6 @@
*
* <p>When bugreport creation is complete one of {@code onError} or {@code onFinished} is called.
*
- * <p>These methods are synchronous by design in order to make dumpstate's lifecycle simpler
- * to handle.
*
* {@hide}
*/
@@ -54,10 +52,8 @@
/**
* Called on an error condition with one of the error codes listed above.
- * This is not an asynchronous method since it can race with dumpstate exiting, thus triggering
- * death recipient.
*/
- void onError(int errorCode);
+ oneway void onError(int errorCode);
/**
* Called when taking bugreport finishes successfully.
diff --git a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
index 0e366cb..c62d302 100644
--- a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
+++ b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
@@ -310,8 +310,12 @@
// FS/proc/*/mountinfo size > 0
FileExists("FS/proc/1/mountinfo", 0U, 100000U);
- // FS/data/misc/profiles/cur/0/*/primary.prof size > 0
- FileExists("FS/data/misc/profiles/cur/0/com.android.phone/primary.prof", 0U, 100000U);
+ // FS/data/misc/profiles/cur/0/*/primary.prof should exist. Also, since dumpstate only adds
+ // profiles to the zip in the non-user build, a build checking is necessary here.
+ if (!PropertiesHelper::IsUserBuild()) {
+ ZipEntry entry;
+ GetEntry(handle, "FS/data/misc/profiles/cur/0/com.android.phone/primary.prof", &entry);
+ }
}
/**
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index b21010d..0dbab4e 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -623,4 +623,21 @@
return Status::ok();
}
+Status ServiceManager::getServiceDebugInfo(std::vector<ServiceDebugInfo>* outReturn) {
+ if (!mAccess->canList(mAccess->getCallingContext())) {
+ return Status::fromExceptionCode(Status::EX_SECURITY);
+ }
+
+ outReturn->reserve(mNameToService.size());
+ for (auto const& [name, service] : mNameToService) {
+ ServiceDebugInfo info;
+ info.name = name;
+ info.debugPid = service.debugPid;
+
+ outReturn->push_back(std::move(info));
+ }
+
+ return Status::ok();
+}
+
} // namespace android
diff --git a/cmds/servicemanager/ServiceManager.h b/cmds/servicemanager/ServiceManager.h
index 9f43eb4..c089115 100644
--- a/cmds/servicemanager/ServiceManager.h
+++ b/cmds/servicemanager/ServiceManager.h
@@ -26,6 +26,7 @@
using os::IClientCallback;
using os::IServiceCallback;
+using os::ServiceDebugInfo;
class ServiceManager : public os::BnServiceManager, public IBinder::DeathRecipient {
public:
@@ -48,6 +49,7 @@
binder::Status registerClientCallback(const std::string& name, const sp<IBinder>& service,
const sp<IClientCallback>& cb) override;
binder::Status tryUnregisterService(const std::string& name, const sp<IBinder>& binder) override;
+ binder::Status getServiceDebugInfo(std::vector<ServiceDebugInfo>* outReturn) override;
void binderDied(const wp<IBinder>& who) override;
void handleClientCallbacks();
diff --git a/cmds/servicemanager/main.cpp b/cmds/servicemanager/main.cpp
index b1bc6dc..627dfe6 100644
--- a/cmds/servicemanager/main.cpp
+++ b/cmds/servicemanager/main.cpp
@@ -45,10 +45,6 @@
IPCThreadState::self()->setupPolling(&binder_fd);
LOG_ALWAYS_FATAL_IF(binder_fd < 0, "Failed to setupPolling: %d", binder_fd);
- // Flush after setupPolling(), to make sure the binder driver
- // knows about this thread handling commands.
- IPCThreadState::self()->flushCommands();
-
int ret = looper->addFd(binder_fd,
Looper::POLL_CALLBACK,
Looper::EVENT_INPUT,
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index e754d74..ed079db 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -214,6 +214,7 @@
"aidl/android/os/IClientCallback.aidl",
"aidl/android/os/IServiceCallback.aidl",
"aidl/android/os/IServiceManager.aidl",
+ "aidl/android/os/ServiceDebugInfo.aidl",
],
path: "aidl",
}
diff --git a/libs/binder/IPCThreadState.cpp b/libs/binder/IPCThreadState.cpp
index 7d01e0b..5c34069 100644
--- a/libs/binder/IPCThreadState.cpp
+++ b/libs/binder/IPCThreadState.cpp
@@ -486,15 +486,30 @@
}
}
+bool IPCThreadState::flushIfNeeded()
+{
+ if (mIsLooper || mServingStackPointer != nullptr) {
+ return false;
+ }
+ // In case this thread is not a looper and is not currently serving a binder transaction,
+ // there's no guarantee that this thread will call back into the kernel driver any time
+ // soon. Therefore, flush pending commands such as BC_FREE_BUFFER, to prevent them from getting
+ // stuck in this thread's out buffer.
+ flushCommands();
+ return true;
+}
+
void IPCThreadState::blockUntilThreadAvailable()
{
pthread_mutex_lock(&mProcess->mThreadCountLock);
+ mProcess->mWaitingForThreads++;
while (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads) {
ALOGW("Waiting for thread to be free. mExecutingThreadsCount=%lu mMaxThreads=%lu\n",
static_cast<unsigned long>(mProcess->mExecutingThreadsCount),
static_cast<unsigned long>(mProcess->mMaxThreads));
pthread_cond_wait(&mProcess->mThreadCountDecrement, &mProcess->mThreadCountLock);
}
+ mProcess->mWaitingForThreads--;
pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
@@ -534,7 +549,12 @@
}
mProcess->mStarvationStartTimeMs = 0;
}
- pthread_cond_broadcast(&mProcess->mThreadCountDecrement);
+
+ // Cond broadcast can be expensive, so don't send it every time a binder
+ // call is processed. b/168806193
+ if (mProcess->mWaitingForThreads > 0) {
+ pthread_cond_broadcast(&mProcess->mThreadCountDecrement);
+ }
pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
@@ -597,6 +617,7 @@
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
+ mIsLooper = true;
status_t result;
do {
processPendingDerefs();
@@ -619,6 +640,7 @@
(void*)pthread_self(), getpid(), result);
mOut.writeInt32(BC_EXIT_LOOPER);
+ mIsLooper = false;
talkWithDriver(false);
}
@@ -629,6 +651,7 @@
}
mOut.writeInt32(BC_ENTER_LOOPER);
+ flushCommands();
*fd = mProcess->mDriverFD;
return 0;
}
@@ -731,9 +754,11 @@
LOG_REMOTEREFS("IPCThreadState::incStrongHandle(%d)\n", handle);
mOut.writeInt32(BC_ACQUIRE);
mOut.writeInt32(handle);
- // Create a temp reference until the driver has handled this command.
- proxy->incStrong(mProcess.get());
- mPostWriteStrongDerefs.push(proxy);
+ if (!flushIfNeeded()) {
+ // Create a temp reference until the driver has handled this command.
+ proxy->incStrong(mProcess.get());
+ mPostWriteStrongDerefs.push(proxy);
+ }
}
void IPCThreadState::decStrongHandle(int32_t handle)
@@ -741,6 +766,7 @@
LOG_REMOTEREFS("IPCThreadState::decStrongHandle(%d)\n", handle);
mOut.writeInt32(BC_RELEASE);
mOut.writeInt32(handle);
+ flushIfNeeded();
}
void IPCThreadState::incWeakHandle(int32_t handle, BpBinder *proxy)
@@ -748,9 +774,11 @@
LOG_REMOTEREFS("IPCThreadState::incWeakHandle(%d)\n", handle);
mOut.writeInt32(BC_INCREFS);
mOut.writeInt32(handle);
- // Create a temp reference until the driver has handled this command.
- proxy->getWeakRefs()->incWeak(mProcess.get());
- mPostWriteWeakDerefs.push(proxy->getWeakRefs());
+ if (!flushIfNeeded()) {
+ // Create a temp reference until the driver has handled this command.
+ proxy->getWeakRefs()->incWeak(mProcess.get());
+ mPostWriteWeakDerefs.push(proxy->getWeakRefs());
+ }
}
void IPCThreadState::decWeakHandle(int32_t handle)
@@ -758,6 +786,7 @@
LOG_REMOTEREFS("IPCThreadState::decWeakHandle(%d)\n", handle);
mOut.writeInt32(BC_DECREFS);
mOut.writeInt32(handle);
+ flushIfNeeded();
}
status_t IPCThreadState::attemptIncStrongHandle(int32_t handle)
@@ -813,6 +842,7 @@
mServingStackPointer(nullptr),
mWorkSource(kUnsetWorkSource),
mPropagateWorkSource(false),
+ mIsLooper(false),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0),
mCallRestriction(mProcess->mCallRestriction)
@@ -1393,6 +1423,7 @@
IPCThreadState* state = self();
state->mOut.writeInt32(BC_FREE_BUFFER);
state->mOut.writePointer((uintptr_t)data);
+ state->flushIfNeeded();
}
} // namespace android
diff --git a/libs/binder/LazyServiceRegistrar.cpp b/libs/binder/LazyServiceRegistrar.cpp
index ff1fbec..21a9251 100644
--- a/libs/binder/LazyServiceRegistrar.cpp
+++ b/libs/binder/LazyServiceRegistrar.cpp
@@ -42,8 +42,7 @@
*/
void forcePersist(bool persist);
- void setActiveServicesCountCallback(const std::function<bool(int)>&
- activeServicesCountCallback);
+ void setActiveServicesCallback(const std::function<bool(bool)>& activeServicesCallback);
bool tryUnregister();
@@ -86,13 +85,16 @@
// count of services with clients
size_t mNumConnectedServices;
+ // previous value passed to the active services callback
+ std::optional<bool> mPreviousHasClients;
+
// map of registered names and services
std::map<std::string, Service> mRegisteredServices;
bool mForcePersist;
- // Callback used to report the number of services with clients
- std::function<bool(int)> mActiveServicesCountCallback;
+ // Callback used to report if there are services with clients
+ std::function<bool(bool)> mActiveServicesCallback;
};
bool ClientCounterCallback::registerService(const sp<IBinder>& service, const std::string& name,
@@ -139,7 +141,7 @@
void ClientCounterCallback::forcePersist(bool persist) {
mForcePersist = persist;
- if (!mForcePersist && mNumConnectedServices == 0) {
+ if (!mForcePersist) {
// Attempt a shutdown in case the number of clients hit 0 while the flag was on
maybeTryShutdown();
}
@@ -185,8 +187,12 @@
}
bool handledInCallback = false;
- if (mActiveServicesCountCallback != nullptr) {
- handledInCallback = mActiveServicesCountCallback(mNumConnectedServices);
+ if (mActiveServicesCallback != nullptr) {
+ bool hasClients = mNumConnectedServices != 0;
+ if (hasClients != mPreviousHasClients) {
+ handledInCallback = mActiveServicesCallback(hasClients);
+ mPreviousHasClients = hasClients;
+ }
}
// If there is no callback defined or the callback did not handle this
@@ -238,9 +244,9 @@
reRegister();
}
-void ClientCounterCallback::setActiveServicesCountCallback(const std::function<bool(int)>&
- activeServicesCountCallback) {
- mActiveServicesCountCallback = activeServicesCountCallback;
+void ClientCounterCallback::setActiveServicesCallback(const std::function<bool(bool)>&
+ activeServicesCallback) {
+ mActiveServicesCallback = activeServicesCallback;
}
} // namespace internal
@@ -266,9 +272,9 @@
mClientCC->forcePersist(persist);
}
-void LazyServiceRegistrar::setActiveServicesCountCallback(const std::function<bool(int)>&
- activeServicesCountCallback) {
- mClientCC->setActiveServicesCountCallback(activeServicesCountCallback);
+void LazyServiceRegistrar::setActiveServicesCallback(const std::function<bool(bool)>&
+ activeServicesCallback) {
+ mClientCC->setActiveServicesCallback(activeServicesCallback);
}
bool LazyServiceRegistrar::tryUnregister() {
diff --git a/libs/binder/ProcessState.cpp b/libs/binder/ProcessState.cpp
index b5e4dfe..c38249e 100644
--- a/libs/binder/ProcessState.cpp
+++ b/libs/binder/ProcessState.cpp
@@ -399,6 +399,7 @@
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
+ , mWaitingForThreads(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mThreadPoolStarted(false)
diff --git a/libs/binder/TEST_MAPPING b/libs/binder/TEST_MAPPING
index 00a14f4..2e90142 100644
--- a/libs/binder/TEST_MAPPING
+++ b/libs/binder/TEST_MAPPING
@@ -65,6 +65,9 @@
},
{
"name": "binderRustNdkInteropTest"
+ },
+ {
+ "name": "rustBinderSerializationTest"
}
]
}
diff --git a/libs/binder/aidl/android/os/IServiceManager.aidl b/libs/binder/aidl/android/os/IServiceManager.aidl
index 2b1e492..ce30050 100644
--- a/libs/binder/aidl/android/os/IServiceManager.aidl
+++ b/libs/binder/aidl/android/os/IServiceManager.aidl
@@ -18,6 +18,7 @@
import android.os.IClientCallback;
import android.os.IServiceCallback;
+import android.os.ServiceDebugInfo;
/**
* Basic interface for finding and publishing system services.
@@ -116,4 +117,9 @@
* Attempt to unregister and remove a service. Will fail if the service is still in use.
*/
void tryUnregisterService(@utf8InCpp String name, IBinder service);
+
+ /**
+ * Get debug information for all currently registered services.
+ */
+ ServiceDebugInfo[] getServiceDebugInfo();
}
diff --git a/libs/binder/aidl/android/os/ServiceDebugInfo.aidl b/libs/binder/aidl/android/os/ServiceDebugInfo.aidl
new file mode 100644
index 0000000..b95d222
--- /dev/null
+++ b/libs/binder/aidl/android/os/ServiceDebugInfo.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/**
+ * Debug information associated with a registered service
+ * @hide
+ */
+parcelable ServiceDebugInfo {
+ /**
+ * Service name (see IServiceManager.addService/checkService/getService)
+ */
+ @utf8InCpp String name;
+ /**
+ * PID of service at the time of registration (may no longer be valid).
+ */
+ int debugPid;
+}
diff --git a/libs/binder/include/binder/IPCThreadState.h b/libs/binder/include/binder/IPCThreadState.h
index 4da8aa1..0183324 100644
--- a/libs/binder/include/binder/IPCThreadState.h
+++ b/libs/binder/include/binder/IPCThreadState.h
@@ -110,6 +110,7 @@
status_t setupPolling(int* fd);
status_t handlePolledCommands();
void flushCommands();
+ bool flushIfNeeded();
void joinThreadPool(bool isMain = true);
@@ -204,6 +205,7 @@
int32_t mWorkSource;
// Whether the work source should be propagated.
bool mPropagateWorkSource;
+ bool mIsLooper;
int32_t mStrictModePolicy;
int32_t mLastTransactionBinderFlags;
CallRestriction mCallRestriction;
diff --git a/libs/binder/include/binder/LazyServiceRegistrar.h b/libs/binder/include/binder/LazyServiceRegistrar.h
index 73893c7..9659732 100644
--- a/libs/binder/include/binder/LazyServiceRegistrar.h
+++ b/libs/binder/include/binder/LazyServiceRegistrar.h
@@ -56,10 +56,10 @@
void forcePersist(bool persist);
/**
- * Set a callback that is executed when the total number of services with
- * clients changes.
- * The callback takes an argument, which is the number of registered
- * lazy services for this process which have clients.
+ * Set a callback that is invoked when the active service count (i.e. services with clients)
+ * registered with this process drops to zero (or becomes nonzero).
+ * The callback takes a boolean argument, which is 'true' if there is
+ * at least one service with clients.
*
* Callback return value:
* - false: Default behavior for lazy services (shut down the process if there
@@ -73,8 +73,7 @@
*
* This method should be called before 'registerService' to avoid races.
*/
- void setActiveServicesCountCallback(const std::function<bool(int)>&
- activeServicesCountCallback);
+ void setActiveServicesCallback(const std::function<bool(bool)>& activeServicesCallback);
/**
* Try to unregister all services previously registered with 'registerService'.
diff --git a/libs/binder/include/binder/ProcessState.h b/libs/binder/include/binder/ProcessState.h
index bab6469..2405ab6 100644
--- a/libs/binder/include/binder/ProcessState.h
+++ b/libs/binder/include/binder/ProcessState.h
@@ -107,11 +107,14 @@
int mDriverFD;
void* mVMStart;
- // Protects thread count variable below.
+ // Protects thread count and wait variables below.
pthread_mutex_t mThreadCountLock;
+ // Broadcast whenever mWaitingForThreads > 0
pthread_cond_t mThreadCountDecrement;
// Number of binder threads current executing a command.
size_t mExecutingThreadsCount;
+ // Number of threads calling IPCThreadState::blockUntilThreadAvailable()
+ size_t mWaitingForThreads;
// Maximum number for binder threads allowed for this process.
size_t mMaxThreads;
// Time when thread pool was emptied
diff --git a/libs/binder/ndk/ibinder.cpp b/libs/binder/ndk/ibinder.cpp
index 454fbd0..3c90681 100644
--- a/libs/binder/ndk/ibinder.cpp
+++ b/libs/binder/ndk/ibinder.cpp
@@ -597,13 +597,6 @@
return STATUS_INVALID_OPERATION;
}
- if (!binder->isRemote()) {
- LOG(WARNING) << "A binder object at " << binder
- << " is being transacted on, however, this object is in the same process as "
- "its proxy. Transacting with this binder is expensive compared to just "
- "calling the corresponding functionality in the same process.";
- }
-
*in = new AParcel(binder);
status_t status = (*in)->get()->writeInterfaceToken(clazz->getInterfaceDescriptor());
binder_status_t ret = PruneStatusT(status);
diff --git a/libs/binder/ndk/include_cpp/android/binder_auto_utils.h b/libs/binder/ndk/include_cpp/android/binder_auto_utils.h
index 6b52c0a..0ad400b 100644
--- a/libs/binder/ndk/include_cpp/android/binder_auto_utils.h
+++ b/libs/binder/ndk/include_cpp/android/binder_auto_utils.h
@@ -265,7 +265,18 @@
AStatus_deleteDescription(cStr);
return ret;
}
- return "(not available)";
+ binder_exception_t exception = getExceptionCode();
+ std::string desc = std::to_string(exception);
+ if (exception == EX_SERVICE_SPECIFIC) {
+ desc += " (" + std::to_string(getServiceSpecificError()) + ")";
+ } else if (exception == EX_TRANSACTION_FAILED) {
+ desc += " (" + std::to_string(getStatus()) + ")";
+ }
+ if (const char* msg = getMessage(); msg != nullptr) {
+ desc += ": ";
+ desc += msg;
+ }
+ return desc;
}
/**
diff --git a/libs/binder/ndk/include_ndk/android/binder_ibinder.h b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
index 0ca3a07..bb70588 100644
--- a/libs/binder/ndk/include_ndk/android/binder_ibinder.h
+++ b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
@@ -660,13 +660,15 @@
/**
* Whether AIBinder is less than another.
*
- * This provides a per-process-unique total ordering of binders determined by
- * an underlying allocation address where a null AIBinder* is considered to be
- * ordered before all other binders.
+ * This provides a per-process-unique total ordering of binders where a null
+ * AIBinder* object is considered to be before all other binder objects.
+ * For instance, two binders refer to the same object in a local or remote
+ * process when both AIBinder_lt(a, b) and AIBinder(b, a) are false. This API
+ * might be used to insert and lookup binders in binary search trees.
*
* AIBinder* pointers themselves actually also create a per-process-unique total
* ordering. However, this ordering is inconsistent with AIBinder_Weak_lt for
- * remote binders.
+ * remote binders. So, in general, this function should be preferred.
*
* Available since API level 31.
*
@@ -698,14 +700,21 @@
* the same as AIBinder_lt. Similarly, a null AIBinder_Weak* is considered to be
* ordered before all other weak references.
*
- * If you have many AIBinder_Weak* objects which are all references to distinct
- * binder objects which happen to have the same underlying address (as ordered
- * by AIBinder_lt), these AIBinder_Weak* objects will retain the same order with
- * respect to all other AIBinder_Weak* pointers with different underlying
- * addresses and are also guaranteed to have a per-process-unique ordering. That
- * is, even though multiple AIBinder* instances may happen to be allocated at
- * the same underlying address, this function will still correctly distinguish
- * that these are weak pointers to different binder objects.
+ * This function correctly distinguishes binders even if one is deallocated. So,
+ * for instance, an AIBinder_Weak* entry representing a deleted binder will
+ * never compare as equal to an AIBinder_Weak* entry which represents a
+ * different allocation of a binder, even if the two binders were originally
+ * allocated at the same address. That is:
+ *
+ * AIBinder* a = ...; // imagine this has address 0x8
+ * AIBinder_Weak* bWeak = AIBinder_Weak_new(a);
+ * AIBinder_decStrong(a); // a may be deleted, if this is the last reference
+ * AIBinder* b = ...; // imagine this has address 0x8 (same address as b)
+ * AIBinder_Weak* bWeak = AIBinder_Weak_new(b);
+ *
+ * Then when a/b are compared with other binders, their order will be preserved,
+ * and it will either be the case that AIBinder_Weak_lt(aWeak, bWeak) OR
+ * AIBinder_Weak_lt(bWeak, aWeak), but not both.
*
* Unlike AIBinder*, the AIBinder_Weak* addresses themselves have nothing to do
* with the underlying binder.
diff --git a/libs/binder/ndk/tests/libbinder_ndk_unit_test.cpp b/libs/binder/ndk/tests/libbinder_ndk_unit_test.cpp
index b7df115..0d1989e 100644
--- a/libs/binder/ndk/tests/libbinder_ndk_unit_test.cpp
+++ b/libs/binder/ndk/tests/libbinder_ndk_unit_test.cpp
@@ -375,8 +375,7 @@
AIBinder_decStrong(binder);
- // assert because would need to decStrong if non-null and we shouldn't need to add a no-op here
- ASSERT_NE(nullptr, AIBinder_Weak_promote(wBinder));
+ ASSERT_EQ(nullptr, AIBinder_Weak_promote(wBinder));
AIBinder_Weak_delete(wBinder);
}
diff --git a/libs/binder/rust/src/binder.rs b/libs/binder/rust/src/binder.rs
index ed3b9ec..42c1e0a 100644
--- a/libs/binder/rust/src/binder.rs
+++ b/libs/binder/rust/src/binder.rs
@@ -613,7 +613,7 @@
impl $crate::parcel::Serialize for dyn $interface + '_
where
- $interface: $crate::Interface
+ dyn $interface: $crate::Interface
{
fn serialize(&self, parcel: &mut $crate::parcel::Parcel) -> $crate::Result<()> {
let binder = $crate::Interface::as_binder(self);
diff --git a/libs/binder/rust/src/parcel/parcelable.rs b/libs/binder/rust/src/parcel/parcelable.rs
index 138b360..8d18fb4 100644
--- a/libs/binder/rust/src/parcel/parcelable.rs
+++ b/libs/binder/rust/src/parcel/parcelable.rs
@@ -15,15 +15,17 @@
*/
use crate::binder::{AsNative, FromIBinder};
-use crate::error::{status_result, Result, Status, StatusCode};
+use crate::error::{status_result, status_t, Result, Status, StatusCode};
use crate::parcel::Parcel;
use crate::proxy::SpIBinder;
use crate::sys;
use std::convert::TryInto;
use std::ffi::c_void;
-use std::os::raw::c_char;
+use std::os::raw::{c_char, c_ulong};
+use std::mem::{self, MaybeUninit};
use std::ptr;
+use std::slice;
/// A struct whose instances can be written to a [`Parcel`].
// Might be able to hook this up as a serde backend in the future?
@@ -49,38 +51,109 @@
pub trait SerializeArray: Serialize + Sized {
/// Serialize an array of this type into the given [`Parcel`].
fn serialize_array(slice: &[Self], parcel: &mut Parcel) -> Result<()> {
- parcel.write_slice_size(Some(slice))?;
-
- for item in slice {
- parcel.write(item)?;
- }
-
- Ok(())
+ let res = unsafe {
+ // Safety: Safe FFI, slice will always be a safe pointer to pass.
+ sys::AParcel_writeParcelableArray(
+ parcel.as_native_mut(),
+ slice.as_ptr() as *const c_void,
+ slice.len().try_into().or(Err(StatusCode::BAD_VALUE))?,
+ Some(serialize_element::<Self>),
+ )
+ };
+ status_result(res)
}
}
+/// Callback to serialize an element of a generic parcelable array.
+///
+/// Safety: We are relying on binder_ndk to not overrun our slice. As long as it
+/// doesn't provide an index larger than the length of the original slice in
+/// serialize_array, this operation is safe. The index provided is zero-based.
+unsafe extern "C" fn serialize_element<T: Serialize>(
+ parcel: *mut sys::AParcel,
+ array: *const c_void,
+ index: c_ulong,
+) -> status_t {
+ // c_ulong and usize are the same, but we need the explicitly sized version
+ // so the function signature matches what bindgen generates.
+ let index = index as usize;
+
+ let slice: &[T] = slice::from_raw_parts(array.cast(), index+1);
+
+ let mut parcel = match Parcel::borrowed(parcel) {
+ None => return StatusCode::UNEXPECTED_NULL as status_t,
+ Some(p) => p,
+ };
+
+ slice[index].serialize(&mut parcel)
+ .err()
+ .unwrap_or(StatusCode::OK)
+ as status_t
+}
+
/// Helper trait for types that can be deserialized as arrays.
/// Defaults to calling Deserialize::deserialize() manually for every element,
/// but can be overridden for custom implementations like `readByteArray`.
pub trait DeserializeArray: Deserialize {
/// Deserialize an array of type from the given [`Parcel`].
fn deserialize_array(parcel: &Parcel) -> Result<Option<Vec<Self>>> {
- let len: i32 = parcel.read()?;
- if len < 0 {
- return Ok(None);
- }
-
- // TODO: Assumes that usize is at least 32 bits
- let mut vec = Vec::with_capacity(len as usize);
-
- for _ in 0..len {
- vec.push(parcel.read()?);
- }
-
- Ok(Some(vec))
+ let mut vec: Option<Vec<MaybeUninit<Self>>> = None;
+ let res = unsafe {
+ // Safety: Safe FFI, vec is the correct opaque type expected by
+ // allocate_vec and deserialize_element.
+ sys::AParcel_readParcelableArray(
+ parcel.as_native(),
+ &mut vec as *mut _ as *mut c_void,
+ Some(allocate_vec::<Self>),
+ Some(deserialize_element::<Self>),
+ )
+ };
+ status_result(res)?;
+ let vec: Option<Vec<Self>> = unsafe {
+ // Safety: We are assuming that the NDK correctly initialized every
+ // element of the vector by now, so we know that all the
+ // MaybeUninits are now properly initialized. We can transmute from
+ // Vec<MaybeUninit<T>> to Vec<T> because MaybeUninit<T> has the same
+ // alignment and size as T, so the pointer to the vector allocation
+ // will be compatible.
+ mem::transmute(vec)
+ };
+ Ok(vec)
}
}
+/// Callback to deserialize a parcelable element.
+///
+/// The opaque array data pointer must be a mutable pointer to an
+/// `Option<Vec<MaybeUninit<T>>>` with at least enough elements for `index` to be valid
+/// (zero-based).
+unsafe extern "C" fn deserialize_element<T: Deserialize>(
+ parcel: *const sys::AParcel,
+ array: *mut c_void,
+ index: c_ulong,
+) -> status_t {
+ // c_ulong and usize are the same, but we need the explicitly sized version
+ // so the function signature matches what bindgen generates.
+ let index = index as usize;
+
+ let vec = &mut *(array as *mut Option<Vec<MaybeUninit<T>>>);
+ let vec = match vec {
+ Some(v) => v,
+ None => return StatusCode::BAD_INDEX as status_t,
+ };
+
+ let parcel = match Parcel::borrowed(parcel as *mut _) {
+ None => return StatusCode::UNEXPECTED_NULL as status_t,
+ Some(p) => p,
+ };
+ let element = match parcel.read() {
+ Ok(e) => e,
+ Err(code) => return code as status_t,
+ };
+ ptr::write(vec[index].as_mut_ptr(), element);
+ StatusCode::OK as status_t
+}
+
/// Helper trait for types that can be nullable when serialized.
// We really need this trait instead of implementing `Serialize for Option<T>`
// because of the Rust orphan rule which prevents us from doing
@@ -115,28 +188,54 @@
/// Callback to allocate a vector for parcel array read functions.
///
+/// This variant is for APIs which use an out buffer pointer.
+///
/// # Safety
///
/// The opaque data pointer passed to the array read function must be a mutable
-/// pointer to an `Option<Vec<T>>`. `buffer` will be assigned a mutable pointer
+/// pointer to an `Option<Vec<MaybeUninit<T>>>`. `buffer` will be assigned a mutable pointer
/// to the allocated vector data if this function returns true.
-unsafe extern "C" fn allocate_vec<T: Clone + Default>(
+unsafe extern "C" fn allocate_vec_with_buffer<T>(
data: *mut c_void,
len: i32,
buffer: *mut *mut T,
) -> bool {
- let vec = &mut *(data as *mut Option<Vec<T>>);
+ let res = allocate_vec::<T>(data, len);
+ let vec = &mut *(data as *mut Option<Vec<MaybeUninit<T>>>);
+ if let Some(new_vec) = vec {
+ *buffer = new_vec.as_mut_ptr() as *mut T;
+ }
+ res
+}
+
+/// Callback to allocate a vector for parcel array read functions.
+///
+/// # Safety
+///
+/// The opaque data pointer passed to the array read function must be a mutable
+/// pointer to an `Option<Vec<MaybeUninit<T>>>`.
+unsafe extern "C" fn allocate_vec<T>(
+ data: *mut c_void,
+ len: i32,
+) -> bool {
+ let vec = &mut *(data as *mut Option<Vec<MaybeUninit<T>>>);
if len < 0 {
*vec = None;
return true;
}
- let mut new_vec: Vec<T> = Vec::with_capacity(len as usize);
- new_vec.resize_with(len as usize, Default::default);
- *buffer = new_vec.as_mut_ptr();
- *vec = Some(new_vec);
+ let mut new_vec: Vec<MaybeUninit<T>> = Vec::with_capacity(len as usize);
+
+ // Safety: We are filling the vector with uninitialized data here, but this
+ // is safe because the vector contains MaybeUninit elements which can be
+ // uninitialized. We're putting off the actual unsafe bit, transmuting the
+ // vector to a Vec<T> until the contents are initialized.
+ new_vec.set_len(len as usize);
+
+ ptr::write(vec, Some(new_vec));
true
}
+
macro_rules! parcelable_primitives {
{
$(
@@ -204,19 +303,29 @@
{DeserializeArray, $ty:ty, $read_array_fn:path} => {
impl DeserializeArray for $ty {
fn deserialize_array(parcel: &Parcel) -> Result<Option<Vec<Self>>> {
- let mut vec: Option<Vec<Self>> = None;
+ let mut vec: Option<Vec<MaybeUninit<Self>>> = None;
let status = unsafe {
// Safety: `Parcel` always contains a valid pointer to an
// `AParcel`. `allocate_vec<T>` expects the opaque pointer to
- // be of type `*mut Option<Vec<T>>`, so `&mut vec` is
+ // be of type `*mut Option<Vec<MaybeUninit<T>>>`, so `&mut vec` is
// correct for it.
$read_array_fn(
parcel.as_native(),
&mut vec as *mut _ as *mut c_void,
- Some(allocate_vec),
+ Some(allocate_vec_with_buffer),
)
};
status_result(status)?;
+ let vec: Option<Vec<Self>> = unsafe {
+ // Safety: We are assuming that the NDK correctly
+ // initialized every element of the vector by now, so we
+ // know that all the MaybeUninits are now properly
+ // initialized. We can transmute from Vec<MaybeUninit<T>> to
+ // Vec<T> because MaybeUninit<T> has the same alignment and
+ // size as T, so the pointer to the vector allocation will
+ // be compatible.
+ mem::transmute(vec)
+ };
Ok(vec)
}
}
@@ -414,7 +523,7 @@
sys::AParcel_readString(
parcel.as_native(),
&mut vec as *mut _ as *mut c_void,
- Some(allocate_vec),
+ Some(allocate_vec_with_buffer),
)
};
diff --git a/libs/binder/rust/src/proxy.rs b/libs/binder/rust/src/proxy.rs
index 17af099..e9e74c0 100644
--- a/libs/binder/rust/src/proxy.rs
+++ b/libs/binder/rust/src/proxy.rs
@@ -70,6 +70,20 @@
ptr.as_mut().map(|p| Self(p))
}
+ /// Extract a raw `AIBinder` pointer from this wrapper.
+ ///
+ /// This method should _only_ be used for testing. Do not try to use the NDK
+ /// interface directly for anything else.
+ ///
+ /// # Safety
+ ///
+ /// The resulting pointer is valid only as long as the SpIBinder is alive.
+ /// The SpIBinder object retains ownership of the AIBinder and the caller
+ /// should not attempt to free the returned pointer.
+ pub unsafe fn as_raw(&self) -> *mut sys::AIBinder {
+ self.0
+ }
+
/// Return true if this binder object is hosted in a different process than
/// the current one.
pub fn is_remote(&self) -> bool {
diff --git a/libs/binder/rust/tests/Android.bp b/libs/binder/rust/tests/Android.bp
index 5ae9c53..8810b5d 100644
--- a/libs/binder/rust/tests/Android.bp
+++ b/libs/binder/rust/tests/Android.bp
@@ -79,3 +79,50 @@
"IBinderRustNdkInteropTest-rust",
],
}
+
+cc_test {
+ name: "rustBinderSerializationTest",
+ shared_libs: [
+ "libbinder",
+ "libbinder_ndk",
+ "libutils",
+ "libbase",
+ ],
+ static_libs: [
+ "libbinder_rs_serialization_test"
+ ],
+ srcs: [
+ "serialization.cpp",
+ ],
+ auto_gen_config: true,
+ test_suites: ["general-tests"],
+}
+
+rust_bindgen {
+ name: "libbinder_rs_serialization_bindgen",
+ crate_name: "binder_rs_serialization_bindgen",
+ wrapper_src: "serialization.hpp",
+ source_stem: "bindings",
+ cpp_std: "gnu++17",
+ bindgen_flags: [
+ "--whitelist-type", "Transaction",
+ "--whitelist-var", "TESTDATA_.*",
+ ],
+
+ shared_libs: [
+ "libbinder",
+ "libc++",
+ ],
+}
+
+rust_ffi_static {
+ name: "libbinder_rs_serialization_test",
+ crate_name: "binder_rs_serialization_test",
+ srcs: [
+ "serialization.rs",
+ ":libbinder_rs_serialization_bindgen",
+ ],
+ rustlibs: [
+ "libbinder_rs",
+ ],
+}
diff --git a/libs/binder/rust/tests/serialization.cpp b/libs/binder/rust/tests/serialization.cpp
new file mode 100644
index 0000000..ec780f2
--- /dev/null
+++ b/libs/binder/rust/tests/serialization.cpp
@@ -0,0 +1,454 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/binder_ibinder_platform.h>
+#include <android/binder_libbinder.h>
+#include <binder/IServiceManager.h>
+#include <binder/Parcel.h>
+#include <binder/ParcelFileDescriptor.h>
+#include <binder/ProcessState.h>
+#include <binder/Status.h>
+#include <gtest/gtest.h>
+#include <utils/Errors.h>
+#include <utils/String16.h>
+#include "android-base/file.h"
+#include "serialization.hpp"
+
+#include <cmath>
+#include <cstdint>
+#include <iostream>
+#include <optional>
+
+using namespace std;
+using namespace android;
+using android::base::unique_fd;
+using android::os::ParcelFileDescriptor;
+
+// defined in Rust
+extern "C" AIBinder *rust_service();
+
+
+const int8_t TESTDATA_I8[4] = {-128, 0, 117, 127};
+const uint8_t TESTDATA_U8[4] = {0, 42, 117, 255};
+const char16_t TESTDATA_CHARS[4] = {0, 42, 117, numeric_limits<char16_t>::max()};
+const int32_t TESTDATA_I32[4] = {numeric_limits<int32_t>::min(), 0, 117, numeric_limits<int32_t>::max()};
+const int64_t TESTDATA_I64[4] = {numeric_limits<int64_t>::min(), 0, 117, numeric_limits<int64_t>::max()};
+const uint64_t TESTDATA_U64[4] = {0, 42, 117, numeric_limits<uint64_t>::max()};
+const float TESTDATA_FLOAT[4] = {
+ numeric_limits<float>::quiet_NaN(),
+ -numeric_limits<float>::infinity(),
+ 117.0,
+ numeric_limits<float>::infinity(),
+};
+const double TESTDATA_DOUBLE[4] = {
+ numeric_limits<double>::quiet_NaN(),
+ -numeric_limits<double>::infinity(),
+ 117.0,
+ numeric_limits<double>::infinity(),
+};
+const bool TESTDATA_BOOL[4] = {true, false, false, true};
+const char* const TESTDATA_STRS[4] = {"", nullptr, "test", ""};
+
+static ::testing::Environment* gEnvironment;
+
+class SerializationEnvironment : public ::testing::Environment {
+public:
+ void SetUp() override {
+ m_server = AIBinder_toPlatformBinder(rust_service());
+ }
+
+ sp<IBinder> getServer(void) { return m_server; }
+
+private:
+ sp<IBinder> m_server;
+};
+
+
+class SerializationTest : public ::testing::Test {
+protected:
+ void SetUp() override {
+ ASSERT_NE(gEnvironment, nullptr);
+ m_server = static_cast<SerializationEnvironment *>(gEnvironment)->getServer();
+ }
+
+ sp<IBinder> m_server;
+};
+
+
+TEST_F(SerializationTest, SerializeBool) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<bool> bools(begin(TESTDATA_BOOL), end(TESTDATA_BOOL));
+ ASSERT_EQ(data.writeBool(true), OK);
+ ASSERT_EQ(data.writeBool(false), OK);
+ ASSERT_EQ(data.writeBoolVector(bools), OK);
+ ASSERT_EQ(data.writeBoolVector(nullopt), OK);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_BOOL, data, &reply), OK);
+
+ vector<bool> read_bools;
+ optional<vector<bool>> maybe_bools;
+ ASSERT_EQ(reply.readBool(), true);
+ ASSERT_EQ(reply.readBool(), false);
+ ASSERT_EQ(reply.readBoolVector(&read_bools), OK);
+ ASSERT_EQ(read_bools, bools);
+ ASSERT_EQ(reply.readBoolVector(&maybe_bools), OK);
+ ASSERT_EQ(maybe_bools, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeByte) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<int8_t> i8s(begin(TESTDATA_I8), end(TESTDATA_I8));
+ vector<uint8_t> u8s(begin(TESTDATA_U8), end(TESTDATA_U8));
+ data.writeByte(0);
+ data.writeByte(1);
+ data.writeByte(numeric_limits<int8_t>::max());
+ data.writeByteVector(i8s);
+ data.writeByteVector(u8s);
+ data.writeByteVector(optional<vector<int8_t>>({}));
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_BYTE, data, &reply), OK);
+
+ vector<int8_t> read_i8s;
+ vector<uint8_t> read_u8s;
+ optional<vector<int8_t>> maybe_i8s;
+ ASSERT_EQ(reply.readByte(), 0);
+ ASSERT_EQ(reply.readByte(), 1);
+ ASSERT_EQ(reply.readByte(), numeric_limits<int8_t>::max());
+ ASSERT_EQ(reply.readByteVector(&read_i8s), OK);
+ ASSERT_EQ(read_i8s, i8s);
+ ASSERT_EQ(reply.readByteVector(&read_u8s), OK);
+ ASSERT_EQ(read_u8s, u8s);
+ ASSERT_EQ(reply.readByteVector(&maybe_i8s), OK);
+ ASSERT_EQ(maybe_i8s, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeU16) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<char16_t> chars(begin(TESTDATA_CHARS), end(TESTDATA_CHARS));
+ data.writeChar(0);
+ data.writeChar(1);
+ data.writeChar(numeric_limits<char16_t>::max());
+ data.writeCharVector(chars);
+ data.writeCharVector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_U16, data, &reply), OK);
+
+ vector<char16_t> read_chars;
+ optional<vector<char16_t>> maybe_chars;
+ ASSERT_EQ(reply.readChar(), 0);
+ ASSERT_EQ(reply.readChar(), 1);
+ ASSERT_EQ(reply.readChar(), numeric_limits<char16_t>::max());
+ ASSERT_EQ(reply.readCharVector(&read_chars), OK);
+ ASSERT_EQ(read_chars, chars);
+ ASSERT_EQ(reply.readCharVector(&maybe_chars), OK);
+ ASSERT_EQ(maybe_chars, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeI32) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<int32_t> i32s(begin(TESTDATA_I32), end(TESTDATA_I32));
+ data.writeInt32(0);
+ data.writeInt32(1);
+ data.writeInt32(numeric_limits<int32_t>::max());
+ data.writeInt32Vector(i32s);
+ data.writeInt32Vector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_I32, data, &reply), OK);
+
+ vector<int32_t> read_i32s;
+ optional<vector<int32_t>> maybe_i32s;
+ ASSERT_EQ(reply.readInt32(), 0);
+ ASSERT_EQ(reply.readInt32(), 1);
+ ASSERT_EQ(reply.readInt32(), numeric_limits<int32_t>::max());
+ ASSERT_EQ(reply.readInt32Vector(&read_i32s), OK);
+ ASSERT_EQ(read_i32s, i32s);
+ ASSERT_EQ(reply.readInt32Vector(&maybe_i32s), OK);
+ ASSERT_EQ(maybe_i32s, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeI64) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<int64_t> i64s(begin(TESTDATA_I64), end(TESTDATA_I64));
+ data.writeInt64(0);
+ data.writeInt64(1);
+ data.writeInt64(numeric_limits<int64_t>::max());
+ data.writeInt64Vector(i64s);
+ data.writeInt64Vector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_I64, data, &reply), OK);
+
+ vector<int64_t> read_i64s;
+ optional<vector<int64_t>> maybe_i64s;
+ ASSERT_EQ(reply.readInt64(), 0);
+ ASSERT_EQ(reply.readInt64(), 1);
+ ASSERT_EQ(reply.readInt64(), numeric_limits<int64_t>::max());
+ ASSERT_EQ(reply.readInt64Vector(&read_i64s), OK);
+ ASSERT_EQ(read_i64s, i64s);
+ ASSERT_EQ(reply.readInt64Vector(&maybe_i64s), OK);
+ ASSERT_EQ(maybe_i64s, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeU64) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<uint64_t> u64s(begin(TESTDATA_U64), end(TESTDATA_U64));
+ data.writeUint64(0);
+ data.writeUint64(1);
+ data.writeUint64(numeric_limits<uint64_t>::max());
+ data.writeUint64Vector(u64s);
+ data.writeUint64Vector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_U64, data, &reply), OK);
+
+ vector<uint64_t> read_u64s;
+ optional<vector<uint64_t>> maybe_u64s;
+ ASSERT_EQ(reply.readUint64(), 0);
+ ASSERT_EQ(reply.readUint64(), 1);
+ ASSERT_EQ(reply.readUint64(), numeric_limits<uint64_t>::max());
+ ASSERT_EQ(reply.readUint64Vector(&read_u64s), OK);
+ ASSERT_EQ(read_u64s, u64s);
+ ASSERT_EQ(reply.readUint64Vector(&maybe_u64s), OK);
+ ASSERT_EQ(maybe_u64s, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeF32) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<float> floats(begin(TESTDATA_FLOAT), end(TESTDATA_FLOAT));
+ data.writeFloat(0);
+ data.writeFloatVector(floats);
+ data.writeFloatVector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_F32, data, &reply), OK);
+
+ vector<float> read_floats;
+ optional<vector<float>> maybe_floats;
+ ASSERT_EQ(reply.readFloat(), 0);
+ ASSERT_EQ(reply.readFloatVector(&read_floats), OK);
+ ASSERT_TRUE(isnan(read_floats[0]));
+ ASSERT_EQ(read_floats[1], floats[1]);
+ ASSERT_EQ(read_floats[2], floats[2]);
+ ASSERT_EQ(read_floats[3], floats[3]);
+ ASSERT_EQ(reply.readFloatVector(&maybe_floats), OK);
+ ASSERT_EQ(maybe_floats, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeF64) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<double> doubles(begin(TESTDATA_DOUBLE), end(TESTDATA_DOUBLE));
+ data.writeDouble(0);
+ data.writeDoubleVector(doubles);
+ data.writeDoubleVector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_F64, data, &reply), OK);
+
+ vector<double> read_doubles;
+ optional<vector<double>> maybe_doubles;
+ ASSERT_EQ(reply.readDouble(), 0);
+ ASSERT_EQ(reply.readDoubleVector(&read_doubles), OK);
+ ASSERT_TRUE(isnan(read_doubles[0]));
+ ASSERT_EQ(read_doubles[1], doubles[1]);
+ ASSERT_EQ(read_doubles[2], doubles[2]);
+ ASSERT_EQ(read_doubles[3], doubles[3]);
+ ASSERT_EQ(reply.readDoubleVector(&maybe_doubles), OK);
+ ASSERT_EQ(maybe_doubles, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeString) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ vector<optional<String16>> strings;
+ for (auto I = begin(TESTDATA_STRS), E = end(TESTDATA_STRS); I != E; ++I) {
+ if (*I == nullptr) {
+ strings.push_back(optional<String16>());
+ } else {
+ strings.emplace_back(*I);
+ }
+ }
+ data.writeUtf8AsUtf16(string("testing"));
+ data.writeString16(nullopt);
+ data.writeString16Vector(strings);
+ data.writeString16Vector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_STRING, data, &reply), OK);
+
+ optional<String16> maybe_string;
+ optional<vector<optional<String16>>> read_strings;
+ ASSERT_EQ(reply.readString16(), String16("testing"));
+ ASSERT_EQ(reply.readString16(&maybe_string), OK);
+ ASSERT_EQ(maybe_string, nullopt);
+ ASSERT_EQ(reply.readString16Vector(&read_strings), OK);
+ ASSERT_EQ(read_strings, strings);
+ ASSERT_EQ(reply.readString16Vector(&read_strings), OK);
+ ASSERT_EQ(read_strings, nullopt);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeFileDescriptor) {
+ unique_fd out_file, in_file;
+ ASSERT_TRUE(base::Pipe(&out_file, &in_file));
+
+ vector<ParcelFileDescriptor> file_descriptors;
+ file_descriptors.push_back(ParcelFileDescriptor(std::move(out_file)));
+ file_descriptors.push_back(ParcelFileDescriptor(std::move(in_file)));
+
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ data.writeParcelable(file_descriptors[0]);
+ data.writeParcelable(file_descriptors[1]);
+ data.writeParcelableVector(file_descriptors);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_FILE_DESCRIPTOR, data, &reply), OK);
+
+ ParcelFileDescriptor returned_fd1, returned_fd2;
+ vector<ParcelFileDescriptor> returned_file_descriptors;
+ ASSERT_EQ(reply.readParcelable(&returned_fd1), OK);
+ ASSERT_EQ(reply.readParcelable(&returned_fd2), OK);
+ ASSERT_EQ(reply.readParcelableVector(&returned_file_descriptors), OK);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+
+ base::WriteStringToFd("Testing", returned_fd2.get());
+ base::WriteStringToFd("File", returned_file_descriptors[1].get());
+ base::WriteStringToFd("Descriptors", file_descriptors[1].get());
+
+ string expected = "TestingFileDescriptors";
+ vector<char> buf(expected.length());
+ base::ReadFully(file_descriptors[0].release(), buf.data(), buf.size());
+ ASSERT_EQ(expected, string(buf.data()));
+}
+
+TEST_F(SerializationTest, SerializeIBinder) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ data.writeStrongBinder(m_server);
+ data.writeStrongBinder(nullptr);
+ data.writeStrongBinderVector({m_server, nullptr});
+ data.writeStrongBinderVector(nullopt);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_IBINDER, data, &reply), OK);
+
+ optional<vector<sp<IBinder>>> binders;
+ ASSERT_TRUE(reply.readStrongBinder());
+ ASSERT_FALSE(reply.readStrongBinder());
+ ASSERT_EQ(reply.readStrongBinderVector(&binders), OK);
+ ASSERT_EQ(binders->size(), 2);
+ ASSERT_TRUE((*binders)[0]);
+ ASSERT_FALSE((*binders)[1]);
+ ASSERT_EQ(reply.readStrongBinderVector(&binders), OK);
+ ASSERT_FALSE(binders);
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+TEST_F(SerializationTest, SerializeStatus) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+
+ binder::Status::ok().writeToParcel(&data);
+ binder::Status::fromExceptionCode(binder::Status::EX_NULL_POINTER, "a status message")
+ .writeToParcel(&data);
+ binder::Status::fromServiceSpecificError(42, "a service-specific error").writeToParcel(&data);
+
+ android::Parcel reply;
+ ASSERT_EQ(m_server->transact(TEST_STATUS, data, &reply), OK);
+
+ binder::Status status;
+
+ ASSERT_EQ(status.readFromParcel(reply), OK);
+ ASSERT_TRUE(status.isOk());
+
+ ASSERT_EQ(status.readFromParcel(reply), OK);
+ ASSERT_EQ(status.exceptionCode(), binder::Status::EX_NULL_POINTER);
+ ASSERT_EQ(status.exceptionMessage(), "a status message");
+
+ ASSERT_EQ(status.readFromParcel(reply), OK);
+ ASSERT_EQ(status.serviceSpecificErrorCode(), 42);
+ ASSERT_EQ(status.exceptionMessage(), "a service-specific error");
+
+ int32_t end;
+ ASSERT_EQ(reply.readInt32(&end), NOT_ENOUGH_DATA);
+}
+
+// Test that failures from Rust properly propagate to C++
+TEST_F(SerializationTest, SerializeRustFail) {
+ android::Parcel data;
+ data.writeInterfaceToken(String16("read_parcel_test"));
+ ASSERT_EQ(m_server->transact(TEST_FAIL, data, nullptr), FAILED_TRANSACTION);
+}
+
+int main(int argc, char **argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ gEnvironment = AddGlobalTestEnvironment(new SerializationEnvironment());
+ ProcessState::self()->startThreadPool();
+ return RUN_ALL_TESTS();
+}
diff --git a/libs/binder/rust/tests/serialization.hpp b/libs/binder/rust/tests/serialization.hpp
new file mode 100644
index 0000000..0041608
--- /dev/null
+++ b/libs/binder/rust/tests/serialization.hpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <binder/IBinder.h>
+
+using namespace android;
+
+enum Transaction {
+ TEST_BOOL = IBinder::FIRST_CALL_TRANSACTION,
+ TEST_BYTE,
+ TEST_U16,
+ TEST_I32,
+ TEST_I64,
+ TEST_U64,
+ TEST_F32,
+ TEST_F64,
+ TEST_STRING,
+ TEST_FILE_DESCRIPTOR,
+ TEST_IBINDER,
+ TEST_STATUS,
+ TEST_FAIL,
+};
+
+extern const int8_t TESTDATA_I8[4];
+extern const uint8_t TESTDATA_U8[4];
+extern const char16_t TESTDATA_CHARS[4];
+extern const int32_t TESTDATA_I32[4];
+extern const int64_t TESTDATA_I64[4];
+extern const uint64_t TESTDATA_U64[4];
+extern const float TESTDATA_FLOAT[4];
+extern const double TESTDATA_DOUBLE[4];
+extern const bool TESTDATA_BOOL[4];
+extern const char* const TESTDATA_STRS[4];
diff --git a/libs/binder/rust/tests/serialization.rs b/libs/binder/rust/tests/serialization.rs
new file mode 100644
index 0000000..2bf3d03
--- /dev/null
+++ b/libs/binder/rust/tests/serialization.rs
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Included as a module in the binder crate internal tests for internal API
+//! access.
+
+use binder::declare_binder_interface;
+use binder::{
+ Binder, ExceptionCode, Interface, Parcel, Result, SpIBinder, Status,
+ StatusCode, TransactionCode,
+};
+use binder::parcel::ParcelFileDescriptor;
+
+use std::ffi::{c_void, CStr, CString};
+use std::sync::Once;
+
+#[allow(
+ non_camel_case_types,
+ non_snake_case,
+ non_upper_case_globals,
+ unused,
+ improper_ctypes,
+ missing_docs,
+ clippy::all
+)]
+mod bindings {
+ include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
+}
+
+static SERVICE_ONCE: Once = Once::new();
+static mut SERVICE: Option<SpIBinder> = None;
+
+/// Start binder service and return a raw AIBinder pointer to it.
+///
+/// Safe to call multiple times, only creates the service once.
+#[no_mangle]
+pub extern "C" fn rust_service() -> *mut c_void {
+ unsafe {
+ SERVICE_ONCE.call_once(|| {
+ SERVICE = Some(BnReadParcelTest::new_binder(()).as_binder());
+ });
+ SERVICE.as_ref().unwrap().as_raw().cast()
+ }
+}
+
+/// Empty interface just to use the declare_binder_interface macro
+pub trait ReadParcelTest: Interface {}
+
+declare_binder_interface! {
+ ReadParcelTest["read_parcel_test"] {
+ native: BnReadParcelTest(on_transact),
+ proxy: BpReadParcelTest,
+ }
+}
+
+impl ReadParcelTest for Binder<BnReadParcelTest> {}
+
+impl ReadParcelTest for BpReadParcelTest {}
+
+impl ReadParcelTest for () {}
+
+#[allow(clippy::float_cmp)]
+fn on_transact(_service: &dyn ReadParcelTest, code: TransactionCode,
+ parcel: &Parcel, reply: &mut Parcel) -> Result<()> {
+ match code {
+ bindings::Transaction_TEST_BOOL => {
+ assert_eq!(parcel.read::<bool>()?, true);
+ assert_eq!(parcel.read::<bool>()?, false);
+ assert_eq!(parcel.read::<Vec<bool>>()?, unsafe {
+ bindings::TESTDATA_BOOL
+ });
+ assert_eq!(parcel.read::<Option<Vec<bool>>>()?, None);
+
+ reply.write(&true)?;
+ reply.write(&false)?;
+ reply.write(&unsafe { bindings::TESTDATA_BOOL }[..])?;
+ reply.write(&(None as Option<Vec<bool>>))?;
+ }
+ bindings::Transaction_TEST_BYTE => {
+ assert_eq!(parcel.read::<i8>()?, 0);
+ assert_eq!(parcel.read::<i8>()?, 1);
+ assert_eq!(parcel.read::<i8>()?, i8::max_value());
+ assert_eq!(parcel.read::<Vec<i8>>()?, unsafe { bindings::TESTDATA_I8 });
+ assert_eq!(parcel.read::<Vec<u8>>()?, unsafe { bindings::TESTDATA_U8 });
+ assert_eq!(parcel.read::<Option<Vec<i8>>>()?, None);
+
+ reply.write(&0i8)?;
+ reply.write(&1i8)?;
+ reply.write(&i8::max_value())?;
+ reply.write(&unsafe { bindings::TESTDATA_I8 }[..])?;
+ reply.write(&unsafe { bindings::TESTDATA_U8 }[..])?;
+ reply.write(&(None as Option<Vec<i8>>))?;
+ }
+ bindings::Transaction_TEST_U16 => {
+ assert_eq!(parcel.read::<u16>()?, 0);
+ assert_eq!(parcel.read::<u16>()?, 1);
+ assert_eq!(parcel.read::<u16>()?, u16::max_value());
+ assert_eq!(parcel.read::<Vec<u16>>()?, unsafe {
+ bindings::TESTDATA_CHARS
+ });
+ assert_eq!(parcel.read::<Option<Vec<u16>>>()?, None);
+
+ reply.write(&0u16)?;
+ reply.write(&1u16)?;
+ reply.write(&u16::max_value())?;
+ reply.write(&unsafe { bindings::TESTDATA_CHARS }[..])?;
+ reply.write(&(None as Option<Vec<u16>>))?;
+ }
+ bindings::Transaction_TEST_I32 => {
+ assert_eq!(parcel.read::<i32>()?, 0);
+ assert_eq!(parcel.read::<i32>()?, 1);
+ assert_eq!(parcel.read::<i32>()?, i32::max_value());
+ assert_eq!(parcel.read::<Vec<i32>>()?, unsafe {
+ bindings::TESTDATA_I32
+ });
+ assert_eq!(parcel.read::<Option<Vec<i32>>>()?, None);
+
+ reply.write(&0i32)?;
+ reply.write(&1i32)?;
+ reply.write(&i32::max_value())?;
+ reply.write(&unsafe { bindings::TESTDATA_I32 }[..])?;
+ reply.write(&(None as Option<Vec<i32>>))?;
+ }
+ bindings::Transaction_TEST_I64 => {
+ assert_eq!(parcel.read::<i64>()?, 0);
+ assert_eq!(parcel.read::<i64>()?, 1);
+ assert_eq!(parcel.read::<i64>()?, i64::max_value());
+ assert_eq!(parcel.read::<Vec<i64>>()?, unsafe {
+ bindings::TESTDATA_I64
+ });
+ assert_eq!(parcel.read::<Option<Vec<i64>>>()?, None);
+
+ reply.write(&0i64)?;
+ reply.write(&1i64)?;
+ reply.write(&i64::max_value())?;
+ reply.write(&unsafe { bindings::TESTDATA_I64 }[..])?;
+ reply.write(&(None as Option<Vec<i64>>))?;
+ }
+ bindings::Transaction_TEST_U64 => {
+ assert_eq!(parcel.read::<u64>()?, 0);
+ assert_eq!(parcel.read::<u64>()?, 1);
+ assert_eq!(parcel.read::<u64>()?, u64::max_value());
+ assert_eq!(parcel.read::<Vec<u64>>()?, unsafe {
+ bindings::TESTDATA_U64
+ });
+ assert_eq!(parcel.read::<Option<Vec<u64>>>()?, None);
+
+ reply.write(&0u64)?;
+ reply.write(&1u64)?;
+ reply.write(&u64::max_value())?;
+ reply.write(&unsafe { bindings::TESTDATA_U64 }[..])?;
+ reply.write(&(None as Option<Vec<u64>>))?;
+ }
+ bindings::Transaction_TEST_F32 => {
+ assert_eq!(parcel.read::<f32>()?, 0f32);
+ let floats = parcel.read::<Vec<f32>>()?;
+ assert!(floats[0].is_nan());
+ assert_eq!(floats[1..], unsafe { bindings::TESTDATA_FLOAT }[1..]);
+ assert_eq!(parcel.read::<Option<Vec<f32>>>()?, None);
+
+ reply.write(&0f32)?;
+ reply.write(&unsafe { bindings::TESTDATA_FLOAT }[..])?;
+ reply.write(&(None as Option<Vec<f32>>))?;
+ }
+ bindings::Transaction_TEST_F64 => {
+ assert_eq!(parcel.read::<f64>()?, 0f64);
+ let doubles = parcel.read::<Vec<f64>>()?;
+ assert!(doubles[0].is_nan());
+ assert_eq!(doubles[1..], unsafe { bindings::TESTDATA_DOUBLE }[1..]);
+ assert_eq!(parcel.read::<Option<Vec<f64>>>()?, None);
+
+ reply.write(&0f64)?;
+ reply.write(&unsafe { bindings::TESTDATA_DOUBLE }[..])?;
+ reply.write(&(None as Option<Vec<f64>>))?;
+ }
+ bindings::Transaction_TEST_STRING => {
+ let s: Option<String> = parcel.read()?;
+ assert_eq!(s.as_deref(), Some("testing"));
+ let s: Option<String> = parcel.read()?;
+ assert_eq!(s, None);
+ let s: Option<Vec<Option<String>>> = parcel.read()?;
+ for (s, expected) in s
+ .unwrap()
+ .iter()
+ .zip(unsafe { bindings::TESTDATA_STRS }.iter())
+ {
+ let expected = unsafe {
+ expected
+ .as_ref()
+ .and_then(|e| CStr::from_ptr(e).to_str().ok())
+ };
+ assert_eq!(s.as_deref(), expected);
+ }
+ let s: Option<Vec<Option<String>>> = parcel.read()?;
+ assert_eq!(s, None);
+
+ let strings: Vec<Option<String>> = unsafe {
+ bindings::TESTDATA_STRS
+ .iter()
+ .map(|s| {
+ s.as_ref().map(|s| {
+ CStr::from_ptr(s)
+ .to_str()
+ .expect("String was not UTF-8")
+ .to_owned()
+ })
+ })
+ .collect()
+ };
+
+ reply.write("testing")?;
+ reply.write(&(None as Option<String>))?;
+ reply.write(&strings)?;
+ reply.write(&(None as Option<Vec<String>>))?;
+ }
+ bindings::Transaction_TEST_FILE_DESCRIPTOR => {
+ let file1 = parcel.read::<ParcelFileDescriptor>()?;
+ let file2 = parcel.read::<ParcelFileDescriptor>()?;
+ let files = parcel.read::<Vec<Option<ParcelFileDescriptor>>>()?;
+
+ reply.write(&file1)?;
+ reply.write(&file2)?;
+ reply.write(&files)?;
+ }
+ bindings::Transaction_TEST_IBINDER => {
+ assert!(parcel.read::<Option<SpIBinder>>()?.is_some());
+ assert!(parcel.read::<Option<SpIBinder>>()?.is_none());
+ let ibinders = parcel.read::<Option<Vec<Option<SpIBinder>>>>()?.unwrap();
+ assert_eq!(ibinders.len(), 2);
+ assert!(ibinders[0].is_some());
+ assert!(ibinders[1].is_none());
+ assert!(parcel.read::<Option<Vec<Option<SpIBinder>>>>()?.is_none());
+
+ let service = unsafe {
+ SERVICE
+ .as_ref()
+ .expect("Global binder service not initialized")
+ .clone()
+ };
+ reply.write(&service)?;
+ reply.write(&(None as Option<&SpIBinder>))?;
+ reply.write(&[Some(&service), None][..])?;
+ reply.write(&(None as Option<Vec<Option<&SpIBinder>>>))?;
+ }
+ bindings::Transaction_TEST_STATUS => {
+ let status: Status = parcel.read()?;
+ assert!(status.is_ok());
+ let status: Status = parcel.read()?;
+ assert_eq!(status.exception_code(), ExceptionCode::NULL_POINTER);
+ assert_eq!(
+ status.get_description(),
+ "Status(-4, EX_NULL_POINTER): 'a status message'"
+ );
+ let status: Status = parcel.read()?;
+ assert_eq!(status.service_specific_error(), 42);
+ assert_eq!(
+ status.get_description(),
+ "Status(-8, EX_SERVICE_SPECIFIC): '42: a service-specific error'"
+ );
+
+ reply.write(&Status::ok())?;
+ reply.write(&Status::new_exception(
+ ExceptionCode::NULL_POINTER,
+ Some(&CString::new("a status message").unwrap()),
+ ))?;
+ reply.write(&Status::new_service_specific_error(
+ 42,
+ Some(&CString::new("a service-specific error").unwrap()),
+ ))?;
+ }
+ bindings::Transaction_TEST_FAIL => {
+ return Err(StatusCode::FAILED_TRANSACTION)
+ }
+ _ => return Err(StatusCode::UNKNOWN_TRANSACTION),
+ }
+
+ assert_eq!(parcel.read::<i32>(), Err(StatusCode::NOT_ENOUGH_DATA));
+ Ok(())
+}
diff --git a/libs/binder/tests/binderClearBufTest.cpp b/libs/binder/tests/binderClearBufTest.cpp
index a565e72..2d30c8d 100644
--- a/libs/binder/tests/binderClearBufTest.cpp
+++ b/libs/binder/tests/binderClearBufTest.cpp
@@ -83,7 +83,6 @@
lastReply = reply.data();
lastReplySize = reply.dataSize();
}
- IPCThreadState::self()->flushCommands();
*outBuffer = hexString(lastReply, lastReplySize);
return result;
}
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 2e72cc4..462f0db 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -99,7 +99,7 @@
struct dirent **dirlist;
const char basepath[] = "/sys/devices/system/cpu/cpufreq";
int ret = scandir(basepath, &dirlist, isPolicyFile, comparePolicyFiles);
- if (ret == -1) return false;
+ if (ret == -1 || ret == 0) return false;
gNPolicies = ret;
std::vector<std::string> policyFileNames;
diff --git a/libs/dumputils/dump_utils.cpp b/libs/dumputils/dump_utils.cpp
index 3faf792..8b3c3ad 100644
--- a/libs/dumputils/dump_utils.cpp
+++ b/libs/dumputils/dump_utils.cpp
@@ -52,7 +52,6 @@
/* list of hal interface to dump containing process during native dumps */
static const char* hal_interfaces_to_dump[] {
- "android.hardware.audio@2.0::IDevicesFactory",
"android.hardware.audio@4.0::IDevicesFactory",
"android.hardware.audio@5.0::IDevicesFactory",
"android.hardware.audio@6.0::IDevicesFactory",
diff --git a/services/gpuservice/tests/unittests/Android.bp b/services/gpuservice/tests/unittests/Android.bp
index 940a26b..9606daa 100644
--- a/services/gpuservice/tests/unittests/Android.bp
+++ b/services/gpuservice/tests/unittests/Android.bp
@@ -20,6 +20,7 @@
},
srcs: [
"GpuMemTest.cpp",
+ "GpuMemTracerTest.cpp",
"GpuStatsTest.cpp",
],
shared_libs: [
@@ -29,14 +30,19 @@
"libcutils",
"libgfxstats",
"libgpumem",
+ "libgpumemtracer",
"libgraphicsenv",
"liblog",
+ "libprotobuf-cpp-lite",
+ "libprotoutil",
"libstatslog",
"libstatspull",
"libutils",
],
static_libs: [
"libgmock",
+ "libperfetto_client_experimental",
+ "perfetto_trace_protos",
],
require_root: true,
}
diff --git a/services/gpuservice/tests/unittests/GpuMemTracerTest.cpp b/services/gpuservice/tests/unittests/GpuMemTracerTest.cpp
new file mode 100644
index 0000000..d76f039
--- /dev/null
+++ b/services/gpuservice/tests/unittests/GpuMemTracerTest.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "gpuservice_unittest"
+
+#include <bpf/BpfMap.h>
+#include <gpumem/GpuMem.h>
+#include <gtest/gtest.h>
+#include <perfetto/trace/trace.pb.h>
+#include <tracing/GpuMemTracer.h>
+
+#include "TestableGpuMem.h"
+
+namespace android {
+
+constexpr uint32_t TEST_MAP_SIZE = 10;
+constexpr uint64_t TEST_GLOBAL_KEY = 0;
+constexpr uint32_t TEST_GLOBAL_PID = 0;
+constexpr uint64_t TEST_GLOBAL_VAL = 123;
+constexpr uint32_t TEST_GLOBAL_GPU_ID = 0;
+constexpr uint64_t TEST_PROC_KEY_1 = 1;
+constexpr uint32_t TEST_PROC_PID_1 = 1;
+constexpr uint64_t TEST_PROC_VAL_1 = 234;
+constexpr uint32_t TEST_PROC_1_GPU_ID = 0;
+constexpr uint64_t TEST_PROC_KEY_2 = 4294967298; // (1 << 32) + 2
+constexpr uint32_t TEST_PROC_PID_2 = 2;
+constexpr uint64_t TEST_PROC_VAL_2 = 345;
+constexpr uint32_t TEST_PROC_2_GPU_ID = 1;
+
+class GpuMemTracerTest : public testing::Test {
+public:
+ GpuMemTracerTest() {
+ const ::testing::TestInfo* const test_info =
+ ::testing::UnitTest::GetInstance()->current_test_info();
+ ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
+ }
+
+ ~GpuMemTracerTest() {
+ const ::testing::TestInfo* const test_info =
+ ::testing::UnitTest::GetInstance()->current_test_info();
+ ALOGD("**** Tearing down after %s.%s\n", test_info->test_case_name(), test_info->name());
+ }
+
+ void SetUp() override {
+ bpf::setrlimitForTest();
+
+ mGpuMem = std::make_shared<GpuMem>();
+ mGpuMemTracer = std::make_unique<GpuMemTracer>();
+ mGpuMemTracer->initializeForTest(mGpuMem);
+ mTestableGpuMem = TestableGpuMem(mGpuMem.get());
+
+ errno = 0;
+ mTestMap = bpf::BpfMap<uint64_t, uint64_t>(BPF_MAP_TYPE_HASH, TEST_MAP_SIZE,
+ BPF_F_NO_PREALLOC);
+
+ EXPECT_EQ(0, errno);
+ EXPECT_LE(0, mTestMap.getMap().get());
+ EXPECT_TRUE(mTestMap.isValid());
+ }
+
+ int getTracerThreadCount() { return mGpuMemTracer->tracerThreadCount; }
+
+ std::vector<perfetto::protos::TracePacket> readGpuMemTotalPacketsBlocking(
+ perfetto::TracingSession* tracingSession) {
+ std::vector<char> raw_trace = tracingSession->ReadTraceBlocking();
+ perfetto::protos::Trace trace;
+ trace.ParseFromArray(raw_trace.data(), int(raw_trace.size()));
+
+ std::vector<perfetto::protos::TracePacket> packets;
+ for (const auto& packet : trace.packet()) {
+ if (!packet.has_gpu_mem_total_event()) {
+ continue;
+ }
+ packets.emplace_back(packet);
+ }
+ return packets;
+ }
+
+ std::shared_ptr<GpuMem> mGpuMem;
+ TestableGpuMem mTestableGpuMem;
+ std::unique_ptr<GpuMemTracer> mGpuMemTracer;
+ bpf::BpfMap<uint64_t, uint64_t> mTestMap;
+};
+
+static constexpr uint64_t getSizeForPid(uint32_t pid) {
+ switch (pid) {
+ case TEST_GLOBAL_PID:
+ return TEST_GLOBAL_VAL;
+ case TEST_PROC_PID_1:
+ return TEST_PROC_VAL_1;
+ case TEST_PROC_PID_2:
+ return TEST_PROC_VAL_2;
+ }
+ return 0;
+}
+
+static constexpr uint32_t getGpuIdForPid(uint32_t pid) {
+ switch (pid) {
+ case TEST_GLOBAL_PID:
+ return TEST_GLOBAL_GPU_ID;
+ case TEST_PROC_PID_1:
+ return TEST_PROC_1_GPU_ID;
+ case TEST_PROC_PID_2:
+ return TEST_PROC_2_GPU_ID;
+ }
+ return 0;
+}
+
+TEST_F(GpuMemTracerTest, traceInitialCountersAfterGpuMemInitialize) {
+ ASSERT_RESULT_OK(mTestMap.writeValue(TEST_GLOBAL_KEY, TEST_GLOBAL_VAL, BPF_ANY));
+ ASSERT_RESULT_OK(mTestMap.writeValue(TEST_PROC_KEY_1, TEST_PROC_VAL_1, BPF_ANY));
+ ASSERT_RESULT_OK(mTestMap.writeValue(TEST_PROC_KEY_2, TEST_PROC_VAL_2, BPF_ANY));
+ mTestableGpuMem.setGpuMemTotalMap(mTestMap);
+ mTestableGpuMem.setInitialized();
+
+ // Only 1 tracer thread should be existing for test.
+ EXPECT_EQ(getTracerThreadCount(), 1);
+ auto tracingSession = mGpuMemTracer->getTracingSessionForTest();
+
+ tracingSession->StartBlocking();
+ // Sleep for a short time to let the tracer thread finish its work
+ sleep(1);
+ tracingSession->StopBlocking();
+
+ // The test tracer thread should have finished its execution by now.
+ EXPECT_EQ(getTracerThreadCount(), 0);
+
+ auto packets = readGpuMemTotalPacketsBlocking(tracingSession.get());
+ EXPECT_EQ(packets.size(), 3);
+
+ const auto& packet0 = packets[0];
+ ASSERT_TRUE(packet0.has_timestamp());
+ ASSERT_TRUE(packet0.has_gpu_mem_total_event());
+ const auto& gpuMemEvent0 = packet0.gpu_mem_total_event();
+ ASSERT_TRUE(gpuMemEvent0.has_pid());
+ const auto& pid0 = gpuMemEvent0.pid();
+ ASSERT_TRUE(gpuMemEvent0.has_size());
+ EXPECT_EQ(gpuMemEvent0.size(), getSizeForPid(pid0));
+ ASSERT_TRUE(gpuMemEvent0.has_gpu_id());
+ EXPECT_EQ(gpuMemEvent0.gpu_id(), getGpuIdForPid(pid0));
+
+ const auto& packet1 = packets[1];
+ ASSERT_TRUE(packet1.has_timestamp());
+ ASSERT_TRUE(packet1.has_gpu_mem_total_event());
+ const auto& gpuMemEvent1 = packet1.gpu_mem_total_event();
+ ASSERT_TRUE(gpuMemEvent1.has_pid());
+ const auto& pid1 = gpuMemEvent1.pid();
+ ASSERT_TRUE(gpuMemEvent1.has_size());
+ EXPECT_EQ(gpuMemEvent1.size(), getSizeForPid(pid1));
+ ASSERT_TRUE(gpuMemEvent1.has_gpu_id());
+ EXPECT_EQ(gpuMemEvent1.gpu_id(), getGpuIdForPid(pid1));
+
+ const auto& packet2 = packets[2];
+ ASSERT_TRUE(packet2.has_timestamp());
+ ASSERT_TRUE(packet2.has_gpu_mem_total_event());
+ const auto& gpuMemEvent2 = packet2.gpu_mem_total_event();
+ ASSERT_TRUE(gpuMemEvent2.has_pid());
+ const auto& pid2 = gpuMemEvent2.pid();
+ ASSERT_TRUE(gpuMemEvent2.has_size());
+ EXPECT_EQ(gpuMemEvent2.size(), getSizeForPid(pid2));
+ ASSERT_TRUE(gpuMemEvent2.has_gpu_id());
+ EXPECT_EQ(gpuMemEvent2.gpu_id(), getGpuIdForPid(pid2));
+}
+
+TEST_F(GpuMemTracerTest, noTracingWithoutGpuMemInitialize) {
+ // Only 1 tracer thread should be existing for test.
+ EXPECT_EQ(getTracerThreadCount(), 1);
+
+ auto tracingSession = mGpuMemTracer->getTracingSessionForTest();
+
+ tracingSession->StartBlocking();
+ // Sleep for a short time to let the tracer thread finish its work
+ sleep(1);
+ tracingSession->StopBlocking();
+
+ // The test tracer thread should have finished its execution by now.
+ EXPECT_EQ(getTracerThreadCount(), 0);
+
+ auto packets = readGpuMemTotalPacketsBlocking(tracingSession.get());
+ EXPECT_EQ(packets.size(), 0);
+}
+} // namespace android
diff --git a/services/gpuservice/tracing/GpuMemTracer.cpp b/services/gpuservice/tracing/GpuMemTracer.cpp
index 6366e1d..44a30ea 100644
--- a/services/gpuservice/tracing/GpuMemTracer.cpp
+++ b/services/gpuservice/tracing/GpuMemTracer.cpp
@@ -44,9 +44,35 @@
args.backends = perfetto::kSystemBackend;
perfetto::Tracing::Initialize(args);
registerDataSource();
- std::thread tracerThread(&GpuMemTracer::threadLoop, this);
+ std::thread tracerThread(&GpuMemTracer::threadLoop, this, true);
pthread_setname_np(tracerThread.native_handle(), "GpuMemTracerThread");
tracerThread.detach();
+ tracerThreadCount++;
+}
+
+void GpuMemTracer::initializeForTest(std::shared_ptr<GpuMem> gpuMem) {
+ mGpuMem = gpuMem;
+ perfetto::TracingInitArgs args;
+ args.backends = perfetto::kInProcessBackend;
+ perfetto::Tracing::Initialize(args);
+ registerDataSource();
+ std::thread tracerThread(&GpuMemTracer::threadLoop, this, false);
+ pthread_setname_np(tracerThread.native_handle(), "GpuMemTracerThreadForTest");
+ tracerThread.detach();
+ tracerThreadCount++;
+}
+
+// Each tracing session can be used for a single block of Start -> Stop.
+std::unique_ptr<perfetto::TracingSession> GpuMemTracer::getTracingSessionForTest() {
+ perfetto::TraceConfig cfg;
+ cfg.set_duration_ms(500);
+ cfg.add_buffers()->set_size_kb(1024);
+ auto* ds_cfg = cfg.add_data_sources()->mutable_config();
+ ds_cfg->set_name(GpuMemTracer::kGpuMemDataSource);
+
+ auto tracingSession = perfetto::Tracing::NewTrace(perfetto::kInProcessBackend);
+ tracingSession->Setup(cfg);
+ return tracingSession;
}
void GpuMemTracer::registerDataSource() {
@@ -55,8 +81,8 @@
GpuMemDataSource::Register(dsd);
}
-void GpuMemTracer::threadLoop() {
- while (true) {
+void GpuMemTracer::threadLoop(bool infiniteLoop) {
+ do {
{
std::unique_lock<std::mutex> lock(GpuMemTracer::sTraceMutex);
while (!sTraceStarted) {
@@ -68,7 +94,11 @@
std::lock_guard<std::mutex> lock(GpuMemTracer::sTraceMutex);
sTraceStarted = false;
}
- }
+ } while (infiniteLoop);
+
+ // Thread loop is exiting. Reduce the tracerThreadCount to reflect the number of active threads
+ // in the wait loop.
+ tracerThreadCount--;
}
void GpuMemTracer::traceInitialCounters() {
diff --git a/services/gpuservice/tracing/include/tracing/GpuMemTracer.h b/services/gpuservice/tracing/include/tracing/GpuMemTracer.h
index 40deb4c..ae871f1 100644
--- a/services/gpuservice/tracing/include/tracing/GpuMemTracer.h
+++ b/services/gpuservice/tracing/include/tracing/GpuMemTracer.h
@@ -20,6 +20,10 @@
#include <mutex>
+namespace perfetto::protos {
+class TracePacket;
+}
+
namespace android {
class GpuMem;
@@ -45,16 +49,37 @@
// perfetto::kInProcessBackend in tests.
void registerDataSource();
+ // TODO(b/175904796): Refactor gpuservice lib to include perfetto lib and move the test
+ // functions into the unittests.
+ // Functions only used for testing with in-process backend. These functions require the static
+ // perfetto lib to be linked. If the tests have a perfetto linked, while libgpumemtracer.so also
+ // has one linked, they will both use different static states maintained in perfetto. Since the
+ // static perfetto states are not shared, tracing sessions created in the unit test are not
+ // recognized by GpuMemTracer. As a result, we cannot use any of the perfetto functions from
+ // this class, which defeats the purpose of the unit test. To solve this, we restrict all
+ // tracing functionality to this class, while the unit test validates the data.
+ // Sets up the perfetto in-process backend and calls into registerDataSource.
+ void initializeForTest(std::shared_ptr<GpuMem>);
+ // Creates a tracing session with in process backend, for testing.
+ std::unique_ptr<perfetto::TracingSession> getTracingSessionForTest();
+ // Read and filter the gpu memory packets from the created trace.
+ std::vector<perfetto::protos::TracePacket> readGpuMemTotalPacketsForTestBlocking(
+ perfetto::TracingSession* tracingSession);
+
static constexpr char kGpuMemDataSource[] = "android.gpu.memory";
static std::condition_variable sCondition;
static std::mutex sTraceMutex;
static bool sTraceStarted;
private:
- void traceInitialCounters();
- void threadLoop();
+ // Friend class for testing
+ friend class GpuMemTracerTest;
+ void threadLoop(bool infiniteLoop);
+ void traceInitialCounters();
std::shared_ptr<GpuMem> mGpuMem;
+ // Count of how many tracer threads are currently active. Useful for testing.
+ std::atomic<int32_t> tracerThreadCount = 0;
};
} // namespace android
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index f0b0200..df26a3d 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -176,7 +176,14 @@
if (!holes.isEmpty()) {
targetSettings.clearRegion.orSelf(holes);
}
- return std::nullopt;
+
+ if (mSidebandStream != nullptr) {
+ // For surfaceview of tv sideband, there is no activeBuffer
+ // in bufferqueue, we need return LayerSettings.
+ return result;
+ } else {
+ return std::nullopt;
+ }
}
bool blackOutLayer = (isProtected() && !targetSettings.supportsProtectedContent) ||
(isSecure() && !targetSettings.isSecure);
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index 61f3fbb..708a5b8 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -78,8 +78,12 @@
if (!validate(timestamp)) {
// VSR could elect to ignore the incongruent timestamp or resetModel(). If ts is ignored,
- // don't insert this ts into mTimestamps ringbuffer.
- if (!mTimestamps.empty()) {
+ // don't insert this ts into mTimestamps ringbuffer. If we are still
+ // in the learning phase we should just clear all timestamps and start
+ // over.
+ if (mTimestamps.size() < kMinimumSamplesForPrediction) {
+ clearTimestamps();
+ } else if (!mTimestamps.empty()) {
mKnownTimestamp =
std::max(timestamp, *std::max_element(mTimestamps.begin(), mTimestamps.end()));
} else {
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 261722d..b15663e 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1725,8 +1725,14 @@
// calculate the expected present time once and use the cached
// value throughout this frame to make sure all layers are
// seeing this same value.
- const nsecs_t lastExpectedPresentTime = mExpectedPresentTime.load();
- mExpectedPresentTime = expectedVSyncTime;
+ if (expectedVSyncTime >= frameStart) {
+ mExpectedPresentTime = expectedVSyncTime;
+ } else {
+ mExpectedPresentTime = mScheduler->getDispSyncExpectedPresentTime(frameStart);
+ }
+
+ const nsecs_t lastScheduledPresentTime = mScheduledPresentTime;
+ mScheduledPresentTime = expectedVSyncTime;
// When Backpressure propagation is enabled we want to give a small grace period
// for the present fence to fire instead of just giving up on this frame to handle cases
@@ -1756,7 +1762,7 @@
const TracedOrdinal<bool> frameMissed = {"PrevFrameMissed",
framePending ||
(previousPresentTime >= 0 &&
- (lastExpectedPresentTime <
+ (lastScheduledPresentTime <
previousPresentTime - frameMissedSlop))};
const TracedOrdinal<bool> hwcFrameMissed = {"PrevHwcFrameMissed",
mHadDeviceComposition && frameMissed};
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 61bd020..90ac856 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -1199,6 +1199,7 @@
std::unique_ptr<scheduler::RefreshRateStats> mRefreshRateStats;
std::atomic<nsecs_t> mExpectedPresentTime = 0;
+ nsecs_t mScheduledPresentTime = 0;
hal::Vsync mHWCVsyncPendingState = hal::Vsync::DISABLE;
/* ------------------------------------------------------------------------
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
index d4cd11d..5e5d51c 100644
--- a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -450,6 +450,20 @@
EXPECT_THAT(intercept, Eq(0));
}
+TEST_F(VSyncPredictorTest, InconsistentVsyncValueIsFlushedEventually) {
+ EXPECT_TRUE(tracker.addVsyncTimestamp(600));
+ EXPECT_TRUE(tracker.needsMoreSamples());
+
+ EXPECT_FALSE(tracker.addVsyncTimestamp(mNow += mPeriod));
+
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples());
+ EXPECT_TRUE(tracker.addVsyncTimestamp(mNow += mPeriod));
+ }
+
+ EXPECT_FALSE(tracker.needsMoreSamples());
+}
+
} // namespace android::scheduler
// TODO(b/129481165): remove the #pragma below and fix conversion issues