Merge "Revert "Revert "libbinder_rs: Treat previously associated Binder as remote"""
diff --git a/libs/binder/Binder.cpp b/libs/binder/Binder.cpp
index f2d223d..d964d25 100644
--- a/libs/binder/Binder.cpp
+++ b/libs/binder/Binder.cpp
@@ -133,6 +133,7 @@
public:
// unlocked objects
bool mRequestingSid = false;
+ bool mInheritRt = false;
sp<IBinder> mExtension;
int mPolicy = SCHED_NORMAL;
int mPriority = 0;
@@ -327,6 +328,27 @@
return e->mPriority;
}
+bool BBinder::isInheritRt() {
+ Extras* e = mExtras.load(std::memory_order_acquire);
+
+ return e && e->mInheritRt;
+}
+
+void BBinder::setInheritRt(bool inheritRt) {
+ Extras* e = mExtras.load(std::memory_order_acquire);
+
+ if (!e) {
+ if (!inheritRt) {
+ return;
+ }
+
+ e = getOrCreateExtras();
+ if (!e) return; // out of memory
+ }
+
+ e->mInheritRt = inheritRt;
+}
+
pid_t BBinder::getDebugPid() {
return getpid();
}
diff --git a/libs/binder/BufferedTextOutput.cpp b/libs/binder/BufferedTextOutput.cpp
index 8cf6097..88c85bf 100644
--- a/libs/binder/BufferedTextOutput.cpp
+++ b/libs/binder/BufferedTextOutput.cpp
@@ -18,7 +18,6 @@
#include <binder/Debug.h>
#include <cutils/atomic.h>
-#include <cutils/threads.h>
#include <utils/Log.h>
#include <utils/RefBase.h>
#include <utils/Vector.h>
@@ -91,22 +90,6 @@
static pthread_mutex_t gMutex = PTHREAD_MUTEX_INITIALIZER;
-static thread_store_t tls;
-
-BufferedTextOutput::ThreadState* BufferedTextOutput::getThreadState()
-{
- ThreadState* ts = (ThreadState*) thread_store_get( &tls );
- if (ts) return ts;
- ts = new ThreadState;
- thread_store_set( &tls, ts, threadDestructor );
- return ts;
-}
-
-void BufferedTextOutput::threadDestructor(void *st)
-{
- delete ((ThreadState*)st);
-}
-
static volatile int32_t gSequence = 0;
static volatile int32_t gFreeBufferIndex = -1;
@@ -266,16 +249,14 @@
BufferedTextOutput::BufferState* BufferedTextOutput::getBuffer() const
{
if ((mFlags&MULTITHREADED) != 0) {
- ThreadState* ts = getThreadState();
- if (ts) {
- while (ts->states.size() <= (size_t)mIndex) ts->states.add(nullptr);
- BufferState* bs = ts->states[mIndex].get();
- if (bs != nullptr && bs->seq == mSeq) return bs;
-
- ts->states.editItemAt(mIndex) = new BufferState(mIndex);
- bs = ts->states[mIndex].get();
- if (bs != nullptr) return bs;
- }
+ thread_local ThreadState ts;
+ while (ts.states.size() <= (size_t)mIndex) ts.states.add(nullptr);
+ BufferState* bs = ts.states[mIndex].get();
+ if (bs != nullptr && bs->seq == mSeq) return bs;
+
+ ts.states.editItemAt(mIndex) = new BufferState(mIndex);
+ bs = ts.states[mIndex].get();
+ if (bs != nullptr) return bs;
}
return mGlobalState;
diff --git a/libs/binder/BufferedTextOutput.h b/libs/binder/BufferedTextOutput.h
index 1b27bb2..fdd532a 100644
--- a/libs/binder/BufferedTextOutput.h
+++ b/libs/binder/BufferedTextOutput.h
@@ -47,10 +47,7 @@
private:
struct BufferState;
struct ThreadState;
-
- static ThreadState*getThreadState();
- static void threadDestructor(void *st);
-
+
BufferState*getBuffer() const;
uint32_t mFlags;
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index a7d8df2..3330a2e 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -222,6 +222,9 @@
if (local->isRequestingSid()) {
obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
}
+ if (local->isInheritRt()) {
+ obj.flags |= FLAT_BINDER_FLAG_INHERIT_RT;
+ }
obj.hdr.type = BINDER_TYPE_BINDER;
obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
obj.cookie = reinterpret_cast<uintptr_t>(local);
diff --git a/libs/binder/include/binder/Binder.h b/libs/binder/include/binder/Binder.h
index d6da397..7079544 100644
--- a/libs/binder/include/binder/Binder.h
+++ b/libs/binder/include/binder/Binder.h
@@ -87,6 +87,11 @@
int getMinSchedulerPolicy();
int getMinSchedulerPriority();
+ // Whether realtime scheduling policies are inherited.
+ bool isInheritRt();
+ // This must be called before the object is sent to another process. Not thread safe.
+ void setInheritRt(bool inheritRt);
+
pid_t getDebugPid();
protected:
diff --git a/libs/binder/tests/binderLibTest.cpp b/libs/binder/tests/binderLibTest.cpp
index ad4729d..0f7d159 100644
--- a/libs/binder/tests/binderLibTest.cpp
+++ b/libs/binder/tests/binderLibTest.cpp
@@ -30,6 +30,7 @@
#include <binder/IServiceManager.h>
#include <private/binder/binder_module.h>
+#include <linux/sched.h>
#include <sys/epoll.h>
#include <sys/prctl.h>
@@ -53,6 +54,7 @@
static constexpr int kSchedPolicy = SCHED_RR;
static constexpr int kSchedPriority = 7;
+static constexpr int kSchedPriorityMore = 8;
static String16 binderLibTestServiceName = String16("test.binderLib");
@@ -1088,6 +1090,25 @@
EXPECT_EQ(kSchedPriority, priority);
}
+TEST_F(BinderLibTest, InheritRt) {
+ sp<IBinder> server = addServer();
+ ASSERT_TRUE(server != nullptr);
+
+ const struct sched_param param {
+ .sched_priority = kSchedPriorityMore,
+ };
+ EXPECT_EQ(0, sched_setscheduler(getpid(), SCHED_RR, ¶m));
+
+ Parcel data, reply;
+ status_t ret = server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply);
+ EXPECT_EQ(NO_ERROR, ret);
+
+ int policy = reply.readInt32();
+ int priority = reply.readInt32();
+
+ EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
+ EXPECT_EQ(kSchedPriorityMore, priority);
+}
TEST_F(BinderLibTest, VectorSent) {
Parcel data, reply;
@@ -1460,6 +1481,8 @@
testService->setMinSchedulerPolicy(kSchedPolicy, kSchedPriority);
+ testService->setInheritRt(true);
+
/*
* Normally would also contain functionality as well, but we are only
* testing the extension mechanism.
diff --git a/libs/binder/tests/fuzzers/BinderFuzzFunctions.h b/libs/binder/tests/fuzzers/BinderFuzzFunctions.h
index 9ac65bb..69f1b9d 100644
--- a/libs/binder/tests/fuzzers/BinderFuzzFunctions.h
+++ b/libs/binder/tests/fuzzers/BinderFuzzFunctions.h
@@ -37,8 +37,8 @@
bbinder->isRequestingSid();
},
[](FuzzedDataProvider* fdp, const sp<BBinder>& bbinder) -> void {
- bool request_sid = fdp->ConsumeBool();
- bbinder->setRequestingSid(request_sid);
+ bool requestSid = fdp->ConsumeBool();
+ bbinder->setRequestingSid(requestSid);
},
[](FuzzedDataProvider*, const sp<BBinder>& bbinder) -> void {
bbinder->getExtension();
@@ -63,6 +63,13 @@
[](FuzzedDataProvider*, const sp<BBinder>& bbinder) -> void {
bbinder->getMinSchedulerPriority();
},
+ [](FuzzedDataProvider* fdp, const sp<BBinder>& bbinder) -> void {
+ bool inheritRt = fdp->ConsumeBool();
+ bbinder->setInheritRt(inheritRt);
+ },
+ [](FuzzedDataProvider*, const sp<BBinder>& bbinder) -> void {
+ bbinder->isInheritRt();
+ },
[](FuzzedDataProvider*, const sp<BBinder>& bbinder) -> void {
bbinder->getDebugPid();
}};
diff --git a/libs/cputimeinstate/Android.bp b/libs/cputimeinstate/Android.bp
index b1943a4..e3cd085 100644
--- a/libs/cputimeinstate/Android.bp
+++ b/libs/cputimeinstate/Android.bp
@@ -33,5 +33,6 @@
"-Wall",
"-Wextra",
],
+ require_root: true,
}
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 6058430..4209dc5 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -59,6 +59,7 @@
static unique_fd gTisMapFd;
static unique_fd gConcurrentMapFd;
static unique_fd gUidLastUpdateMapFd;
+static unique_fd gPidTisMapFd;
static std::optional<std::vector<uint32_t>> readNumbersFromFile(const std::string &path) {
std::string data;
@@ -139,6 +140,12 @@
unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_last_update_map")};
if (gUidLastUpdateMapFd < 0) return false;
+ gPidTisMapFd = unique_fd{mapRetrieveRO(BPF_FS_PATH "map_time_in_state_pid_time_in_state_map")};
+ if (gPidTisMapFd < 0) return false;
+
+ unique_fd trackedPidMapFd(mapRetrieveWO(BPF_FS_PATH "map_time_in_state_pid_tracked_map"));
+ if (trackedPidMapFd < 0) return false;
+
gInitialized = true;
return true;
}
@@ -222,7 +229,8 @@
}
gTracking = attachTracepointProgram("sched", "sched_switch") &&
- attachTracepointProgram("power", "cpu_frequency");
+ attachTracepointProgram("power", "cpu_frequency") &&
+ attachTracepointProgram("sched", "sched_process_free");
return gTracking;
}
@@ -502,5 +510,106 @@
return true;
}
+bool startTrackingProcessCpuTimes(pid_t pid) {
+ if (!gInitialized && !initGlobals()) return false;
+
+ unique_fd trackedPidHashMapFd(
+ mapRetrieveWO(BPF_FS_PATH "map_time_in_state_pid_tracked_hash_map"));
+ if (trackedPidHashMapFd < 0) return false;
+
+ unique_fd trackedPidMapFd(mapRetrieveWO(BPF_FS_PATH "map_time_in_state_pid_tracked_map"));
+ if (trackedPidMapFd < 0) return false;
+
+ for (uint32_t index = 0; index < MAX_TRACKED_PIDS; index++) {
+ // Find first available [index, pid] entry in the pid_tracked_hash_map map
+ if (writeToMapEntry(trackedPidHashMapFd, &index, &pid, BPF_NOEXIST) != 0) {
+ if (errno != EEXIST) {
+ return false;
+ }
+ continue; // This index is already taken
+ }
+
+ tracked_pid_t tracked_pid = {.pid = pid, .state = TRACKED_PID_STATE_ACTIVE};
+ if (writeToMapEntry(trackedPidMapFd, &index, &tracked_pid, BPF_ANY) != 0) {
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+// Marks the specified task identified by its PID (aka TID) for CPU time-in-state tracking
+// aggregated with other tasks sharing the same TGID and aggregation key.
+bool startAggregatingTaskCpuTimes(pid_t pid, uint16_t aggregationKey) {
+ if (!gInitialized && !initGlobals()) return false;
+
+ unique_fd taskAggregationMapFd(
+ mapRetrieveWO(BPF_FS_PATH "map_time_in_state_pid_task_aggregation_map"));
+ if (taskAggregationMapFd < 0) return false;
+
+ return writeToMapEntry(taskAggregationMapFd, &pid, &aggregationKey, BPF_ANY) == 0;
+}
+
+// Retrieves the times in ns that each thread spent running at each CPU freq, aggregated by
+// aggregation key.
+// Return contains no value on error, otherwise it contains a map from aggregation keys
+// to vectors of vectors using the format:
+// { aggKey0 -> [[t0_0_0, t0_0_1, ...], [t0_1_0, t0_1_1, ...], ...],
+// aggKey1 -> [[t1_0_0, t1_0_1, ...], [t1_1_0, t1_1_1, ...], ...], ... }
+// where ti_j_k is the ns tid i spent running on the jth cluster at the cluster's kth lowest freq.
+std::optional<std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>>>
+getAggregatedTaskCpuFreqTimes(pid_t tgid, const std::vector<uint16_t> &aggregationKeys) {
+ if (!gInitialized && !initGlobals()) return {};
+
+ uint32_t maxFreqCount = 0;
+ std::vector<std::vector<uint64_t>> mapFormat;
+ for (const auto &freqList : gPolicyFreqs) {
+ if (freqList.size() > maxFreqCount) maxFreqCount = freqList.size();
+ mapFormat.emplace_back(freqList.size(), 0);
+ }
+
+ bool dataCollected = false;
+ std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>> map;
+ std::vector<tis_val_t> vals(gNCpus);
+ for (uint16_t aggregationKey : aggregationKeys) {
+ map.emplace(aggregationKey, mapFormat);
+
+ aggregated_task_tis_key_t key{.tgid = tgid, .aggregation_key = aggregationKey};
+ for (key.bucket = 0; key.bucket <= (maxFreqCount - 1) / FREQS_PER_ENTRY; ++key.bucket) {
+ if (findMapEntry(gPidTisMapFd, &key, vals.data()) != 0) {
+ if (errno != ENOENT) {
+ return {};
+ }
+ continue;
+ } else {
+ dataCollected = true;
+ }
+
+ // Combine data by aggregating time-in-state data grouped by CPU cluster aka policy.
+ uint32_t offset = key.bucket * FREQS_PER_ENTRY;
+ uint32_t nextOffset = offset + FREQS_PER_ENTRY;
+ for (uint32_t j = 0; j < gNPolicies; ++j) {
+ if (offset >= gPolicyFreqs[j].size()) continue;
+ auto begin = map[key.aggregation_key][j].begin() + offset;
+ auto end = nextOffset < gPolicyFreqs[j].size() ? begin + FREQS_PER_ENTRY
+ : map[key.aggregation_key][j].end();
+ for (const auto &cpu : gPolicyCpus[j]) {
+ std::transform(begin, end, std::begin(vals[cpu].ar), begin,
+ std::plus<uint64_t>());
+ }
+ }
+ }
+ }
+
+ if (!dataCollected) {
+ // Check if eBPF is supported on this device. If it is, gTisMap should not be empty.
+ time_key_t key;
+ if (getFirstMapKey(gTisMapFd, &key) != 0) {
+ return {};
+ }
+ }
+ return map;
+}
+
} // namespace bpf
} // namespace android
diff --git a/libs/cputimeinstate/cputimeinstate.h b/libs/cputimeinstate/cputimeinstate.h
index b7600f5..87a328a 100644
--- a/libs/cputimeinstate/cputimeinstate.h
+++ b/libs/cputimeinstate/cputimeinstate.h
@@ -41,5 +41,10 @@
getUidsUpdatedConcurrentTimes(uint64_t *lastUpdate);
bool clearUidTimes(unsigned int uid);
+bool startTrackingProcessCpuTimes(pid_t pid);
+bool startAggregatingTaskCpuTimes(pid_t pid, uint16_t aggregationKey);
+std::optional<std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>>>
+getAggregatedTaskCpuFreqTimes(pid_t pid, const std::vector<uint16_t> &aggregationKeys);
+
} // namespace bpf
} // namespace android
diff --git a/libs/cputimeinstate/testtimeinstate.cpp b/libs/cputimeinstate/testtimeinstate.cpp
index 0d5f412..519689b 100644
--- a/libs/cputimeinstate/testtimeinstate.cpp
+++ b/libs/cputimeinstate/testtimeinstate.cpp
@@ -19,6 +19,8 @@
#include <sys/sysinfo.h>
+#include <pthread.h>
+#include <semaphore.h>
#include <numeric>
#include <unordered_map>
#include <vector>
@@ -504,5 +506,85 @@
for (size_t i = 0; i < freqs->size(); ++i) EXPECT_EQ((*freqs)[i].size(), (*times)[i].size());
}
+uint64_t timeNanos() {
+ struct timespec spec;
+ clock_gettime(CLOCK_MONOTONIC, &spec);
+ return spec.tv_sec * 1000000000 + spec.tv_nsec;
+}
+
+// Keeps CPU busy with some number crunching
+void useCpu() {
+ long sum = 0;
+ for (int i = 0; i < 100000; i++) {
+ sum *= i;
+ }
+}
+
+sem_t pingsem, pongsem;
+
+void *testThread(void *) {
+ for (int i = 0; i < 10; i++) {
+ sem_wait(&pingsem);
+ useCpu();
+ sem_post(&pongsem);
+ }
+ return nullptr;
+}
+
+TEST(TimeInStateTest, GetAggregatedTaskCpuFreqTimes) {
+ uint64_t startTimeNs = timeNanos();
+
+ sem_init(&pingsem, 0, 1);
+ sem_init(&pongsem, 0, 0);
+
+ pthread_t thread;
+ ASSERT_EQ(pthread_create(&thread, NULL, &testThread, NULL), 0);
+
+ // This process may have been running for some time, so when we start tracking
+ // CPU time, the very first switch may include the accumulated time.
+ // Yield the remainder of this timeslice to the newly created thread.
+ sem_wait(&pongsem);
+ sem_post(&pingsem);
+
+ pid_t tgid = getpid();
+ startTrackingProcessCpuTimes(tgid);
+
+ pid_t tid = pthread_gettid_np(thread);
+ startAggregatingTaskCpuTimes(tid, 42);
+
+ // Play ping-pong with the other thread to ensure that both threads get
+ // some CPU time.
+ for (int i = 0; i < 9; i++) {
+ sem_wait(&pongsem);
+ useCpu();
+ sem_post(&pingsem);
+ }
+
+ pthread_join(thread, NULL);
+
+ std::optional<std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>>> optionalMap =
+ getAggregatedTaskCpuFreqTimes(tgid, {0, 42});
+ ASSERT_TRUE(optionalMap);
+
+ std::unordered_map<uint16_t, std::vector<std::vector<uint64_t>>> map = *optionalMap;
+ ASSERT_EQ(map.size(), 2u);
+
+ uint64_t testDurationNs = timeNanos() - startTimeNs;
+ for (auto pair : map) {
+ uint16_t aggregationKey = pair.first;
+ ASSERT_TRUE(aggregationKey == 0 || aggregationKey == 42);
+
+ std::vector<std::vector<uint64_t>> timesInState = pair.second;
+ uint64_t totalCpuTime = 0;
+ for (size_t i = 0; i < timesInState.size(); i++) {
+ for (size_t j = 0; j < timesInState[i].size(); j++) {
+ totalCpuTime += timesInState[i][j];
+ }
+ }
+ ASSERT_GT(totalCpuTime, 0ul);
+ ASSERT_LE(totalCpuTime, testDurationNs);
+ }
+}
+
} // namespace bpf
} // namespace android