Merge "Make PersistableBundle a stable parcelable."
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 36a53bb..db297a0 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -13,10 +13,12 @@
libs/renderengine/
libs/ui/
libs/vr/
+ opengl/libs/
services/bufferhub/
services/inputflinger/
services/surfaceflinger/
services/vr/
+ vulkan/
[Hook Scripts]
owners_hook = ${REPO_ROOT}/frameworks/base/tools/aosp/aosp_sha.sh ${PREUPLOAD_COMMIT} "OWNERS$"
diff --git a/cmds/atrace/atrace.cpp b/cmds/atrace/atrace.cpp
index 5186ad3..cf75bba 100644
--- a/cmds/atrace/atrace.cpp
+++ b/cmds/atrace/atrace.cpp
@@ -854,7 +854,6 @@
tags |= c.tags;
}
}
- ok &= setTagsProperty(tags);
bool coreServicesTagEnabled = false;
for (size_t i = 0; i < arraysize(k_categories); i++) {
@@ -876,9 +875,11 @@
packageList += android::base::GetProperty(k_coreServicesProp, "");
}
ok &= setAppCmdlineProperty(&packageList[0]);
+ ok &= setTagsProperty(tags);
+#if !ATRACE_SHMEM
ok &= pokeBinderServices();
pokeHalServices();
-
+#endif
if (g_tracePdx) {
ok &= ServiceUtility::PokeServices();
}
diff --git a/cmds/idlcli/Android.bp b/cmds/idlcli/Android.bp
index 08a31c1..402767a 100644
--- a/cmds/idlcli/Android.bp
+++ b/cmds/idlcli/Android.bp
@@ -15,6 +15,7 @@
cc_defaults {
name: "idlcli-defaults",
shared_libs: [
+ "android.hardware.vibrator-ndk_platform",
"android.hardware.vibrator@1.0",
"android.hardware.vibrator@1.1",
"android.hardware.vibrator@1.2",
@@ -24,7 +25,6 @@
"libhidlbase",
"liblog",
"libutils",
- "vintf-vibrator-ndk_platform",
],
cflags: [
"-DLOG_TAG=\"idlcli\"",
diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp
index 737c6c9..0fde31a 100644
--- a/cmds/installd/InstalldNativeService.cpp
+++ b/cmds/installd/InstalldNativeService.cpp
@@ -103,6 +103,9 @@
static constexpr const char* kPropApkVerityMode = "ro.apk_verity.mode";
static constexpr const char* kFuseProp = "persist.sys.fuse";
+static constexpr const char* kMntSdcardfs = "/mnt/runtime/default/";
+static constexpr const char* kMntFuse = "/mnt/pass_through/0/";
+
namespace {
constexpr const char* kDump = "android.permission.DUMP";
@@ -626,10 +629,8 @@
if (delete_dir_contents(path, true) != 0) {
res = error("Failed to delete contents of " + path);
}
- path = StringPrintf("%s/Android/obb/%s", extPath.c_str(), pkgname);
- if (delete_dir_contents(path, true) != 0) {
- res = error("Failed to delete contents of " + path);
- }
+ // Note that we explicitly don't delete OBBs - those are only removed on
+ // app uninstall.
}
}
}
@@ -2799,15 +2800,13 @@
std::getline(in, ignored);
if (android::base::GetBoolProperty(kFuseProp, false)) {
- // TODO(b/146139106): Use sdcardfs mounts on devices running sdcardfs so we don't bypass
- // it's VFS cache
- if (target.compare(0, 17, "/mnt/pass_through") == 0) {
+ if (target.find(kMntFuse) == 0) {
LOG(DEBUG) << "Found storage mount " << source << " at " << target;
mStorageMounts[source] = target;
}
} else {
#if !BYPASS_SDCARDFS
- if (target.compare(0, 21, "/mnt/runtime/default/") == 0) {
+ if (target.find(kMntSdcardfs) == 0) {
LOG(DEBUG) << "Found storage mount " << source << " at " << target;
mStorageMounts[source] = target;
}
@@ -2822,17 +2821,6 @@
std::lock_guard<std::recursive_mutex> lock(mMountsLock);
const char* uuid_ = uuid ? uuid->c_str() : nullptr;
auto path = StringPrintf("%s/media", create_data_path(uuid_).c_str());
- if (android::base::GetBoolProperty(kFuseProp, false)) {
- // TODO(b/146139106): This is only safe on devices not running sdcardfs where there is no
- // risk of bypassing the sdcardfs VFS cache
-
- // Always use the lower filesystem path on FUSE enabled devices not running sdcardfs
- // The upper filesystem path, /mnt/pass_through/<userid>/<vol>/ which was a bind mount
- // to the lower filesytem may have been unmounted already when a user is
- // removed and the path will now be pointing to a tmpfs without content
- return StringPrintf("%s/%u", path.c_str(), userid);
- }
-
auto resolved = mStorageMounts[path];
if (resolved.empty()) {
LOG(WARNING) << "Failed to find storage mount for " << path;
diff --git a/cmds/installd/QuotaUtils.cpp b/cmds/installd/QuotaUtils.cpp
index b238dd3..f2abf3a 100644
--- a/cmds/installd/QuotaUtils.cpp
+++ b/cmds/installd/QuotaUtils.cpp
@@ -97,6 +97,26 @@
}
}
+int64_t GetOccupiedSpaceForProjectId(const std::string& uuid, int projectId) {
+ const std::string device = FindQuotaDeviceForUuid(uuid);
+ if (device == "") {
+ return -1;
+ }
+ struct dqblk dq;
+ if (quotactl(QCMD(Q_GETQUOTA, PRJQUOTA), device.c_str(), projectId,
+ reinterpret_cast<char*>(&dq)) != 0) {
+ if (errno != ESRCH) {
+ PLOG(ERROR) << "Failed to quotactl " << device << " for Project ID " << projectId;
+ }
+ return -1;
+ } else {
+#if MEASURE_DEBUG
+ LOG(DEBUG) << "quotactl() for Project ID " << projectId << " " << dq.dqb_curspace;
+#endif
+ return dq.dqb_curspace;
+ }
+}
+
int64_t GetOccupiedSpaceForGid(const std::string& uuid, gid_t gid) {
const std::string device = FindQuotaDeviceForUuid(uuid);
if (device == "") {
diff --git a/cmds/installd/QuotaUtils.h b/cmds/installd/QuotaUtils.h
index 9ad170f..96aca04 100644
--- a/cmds/installd/QuotaUtils.h
+++ b/cmds/installd/QuotaUtils.h
@@ -35,6 +35,8 @@
/* Get the current occupied space in bytes for a gid or -1 if fails */
int64_t GetOccupiedSpaceForGid(const std::string& uuid, gid_t gid);
+/* Get the current occupied space in bytes for a project id or -1 if fails */
+int64_t GetOccupiedSpaceForProjectId(const std::string& uuid, int projectId);
} // namespace installd
} // namespace android
diff --git a/libs/adbd_auth/adbd_auth.cpp b/libs/adbd_auth/adbd_auth.cpp
index 6479109..a9c2311 100644
--- a/libs/adbd_auth/adbd_auth.cpp
+++ b/libs/adbd_auth/adbd_auth.cpp
@@ -178,6 +178,10 @@
this->callbacks_.key_authorized(arg, id);
this->dispatched_prompt_ = std::nullopt;
+
+ // We need to dispatch pending prompts here upon success as well,
+ // since we might have multiple queued prompts.
+ DispatchPendingPrompt();
} else if (packet[0] == 'N' && packet[1] == 'O') {
CHECK_EQ(2UL, packet.length());
// TODO: Do we want a callback if the key is denied?
diff --git a/libs/android_runtime_lazy/android_runtime_lazy.cpp b/libs/android_runtime_lazy/android_runtime_lazy.cpp
index 98d8e8a..8062be6 100644
--- a/libs/android_runtime_lazy/android_runtime_lazy.cpp
+++ b/libs/android_runtime_lazy/android_runtime_lazy.cpp
@@ -15,6 +15,7 @@
*/
#define LOG_TAG "ANDROID_RUNTIME_LAZY"
#include "android_runtime/AndroidRuntime.h"
+#include "android_os_Parcel.h"
#include "android_util_Binder.h"
#include <dlfcn.h>
@@ -28,12 +29,18 @@
std::once_flag loadFlag;
typedef JNIEnv* (*getJNIEnv_t)();
+
+// android_util_Binder.h
typedef sp<IBinder> (*ibinderForJavaObject_t)(JNIEnv* env, jobject obj);
typedef jobject (*javaObjectForIBinder_t)(JNIEnv* env, const sp<IBinder>& val);
+// android_os_Parcel.h
+typedef Parcel* (*parcelForJavaObject_t)(JNIEnv* env, jobject obj);
+
getJNIEnv_t _getJNIEnv;
ibinderForJavaObject_t _ibinderForJavaObject;
javaObjectForIBinder_t _javaObjectForIBinder;
+parcelForJavaObject_t _parcelForJavaObject;
void load() {
std::call_once(loadFlag, []() {
@@ -64,6 +71,13 @@
ALOGW("Could not find javaObjectForIBinder.");
// no return
}
+
+ _parcelForJavaObject = reinterpret_cast<parcelForJavaObject_t>(
+ dlsym(handle, "_ZN7android19parcelForJavaObjectEP7_JNIEnvP8_jobject"));
+ if (_parcelForJavaObject == nullptr) {
+ ALOGW("Could not find parcelForJavaObject.");
+ // no return
+ }
});
}
@@ -95,4 +109,12 @@
return _javaObjectForIBinder(env, val);
}
+Parcel* parcelForJavaObject(JNIEnv* env, jobject obj) {
+ load();
+ if (_parcelForJavaObject == nullptr) {
+ return nullptr;
+ }
+ return _parcelForJavaObject(env, obj);
+}
+
} // namespace android
diff --git a/libs/android_runtime_lazy/include/android_os_Parcel.h b/libs/android_runtime_lazy/include/android_os_Parcel.h
new file mode 100644
index 0000000..19b094d
--- /dev/null
+++ b/libs/android_runtime_lazy/include/android_os_Parcel.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <binder/Parcel.h>
+#include "jni.h"
+
+namespace android {
+
+// The name of this file is same with the file in frameworks/base/core/jni/
+// This is intentional to make the client use these exported functions
+// in the same way with the original.
+
+Parcel* parcelForJavaObject(JNIEnv* env, jobject obj);
+
+} // namespace android
diff --git a/libs/binder/ndk/Android.bp b/libs/binder/ndk/Android.bp
index c0ea6d7..e66e425 100644
--- a/libs/binder/ndk/Android.bp
+++ b/libs/binder/ndk/Android.bp
@@ -50,6 +50,7 @@
"ibinder.cpp",
"ibinder_jni.cpp",
"parcel.cpp",
+ "parcel_jni.cpp",
"process.cpp",
"stability.cpp",
"status.cpp",
diff --git a/libs/binder/ndk/include_ndk/android/binder_ibinder_jni.h b/libs/binder/ndk/include_ndk/android/binder_ibinder_jni.h
index be3029c..cd1ff1f 100644
--- a/libs/binder/ndk/include_ndk/android/binder_ibinder_jni.h
+++ b/libs/binder/ndk/include_ndk/android/binder_ibinder_jni.h
@@ -36,13 +36,13 @@
/**
* Converts an android.os.IBinder object into an AIBinder* object.
*
- * If either env or the binder is null, null is returned. If this binder object was originally an
+ * If the binder is null, null is returned. If this binder object was originally an
* AIBinder object, the original object is returned. The returned object has one refcount
* associated with it, and so this should be accompanied with an AIBinder_decStrong call.
*
* Available since API level 29.
*
- * \param env Java environment.
+ * \param env Java environment. Must not be null.
* \param binder android.os.IBinder java object.
*
* \return an AIBinder object representing the Java binder object. If either parameter is null, or
@@ -54,12 +54,12 @@
/**
* Converts an AIBinder* object into an android.os.IBinder object.
*
- * If either env or the binder is null, null is returned. If this binder object was originally an
- * IBinder object, the original java object will be returned.
+ * If the binder is null, null is returned. If this binder object was originally an IBinder object,
+ * the original java object will be returned.
*
* Available since API level 29.
*
- * \param env Java environment.
+ * \param env Java environment. Must not be null.
* \param binder the object to convert.
*
* \return an android.os.IBinder object or null if the parameters were null.
diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel_jni.h b/libs/binder/ndk/include_ndk/android/binder_parcel_jni.h
new file mode 100644
index 0000000..65e1704
--- /dev/null
+++ b/libs/binder/ndk/include_ndk/android/binder_parcel_jni.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup NdkBinder
+ * @{
+ */
+
+/**
+ * @file binder_parcel_jni.h
+ * @brief Conversions between AParcel and android.os.Parcel
+ */
+
+#pragma once
+
+#include <android/binder_parcel.h>
+
+#include <jni.h>
+
+__BEGIN_DECLS
+#if __ANDROID_API__ >= 30
+
+/**
+ * Converts an android.os.Parcel object into an AParcel* object.
+ *
+ * If the parcel is null, null is returned.
+ *
+ * Available since API level 30.
+ *
+ * \param env Java environment. Must not be null.
+ * \param parcel android.os.Parcel java object.
+ *
+ * \return an AParcel object representing the Java parcel object. If either parameter is null, this
+ * will return null. This must be deleted with AParcel_delete. This does not take ownership of the
+ * jobject and is only good for as long as the jobject is alive.
+ */
+__attribute__((warn_unused_result)) AParcel* AParcel_fromJavaParcel(JNIEnv* env, jobject parcel)
+ __INTRODUCED_IN(30);
+
+#endif //__ANDROID_API__ >= 30
+__END_DECLS
+
+/** @} */
diff --git a/libs/binder/ndk/libbinder_ndk.map.txt b/libs/binder/ndk/libbinder_ndk.map.txt
index 71d8103..f3158d7 100644
--- a/libs/binder/ndk/libbinder_ndk.map.txt
+++ b/libs/binder/ndk/libbinder_ndk.map.txt
@@ -105,6 +105,7 @@
AIBinder_setExtension;
AStatus_getDescription;
AStatus_deleteDescription;
+ AParcel_fromJavaParcel;
AIBinder_markSystemStability; # apex
AIBinder_markVendorStability; # llndk
diff --git a/libs/binder/ndk/parcel_jni.cpp b/libs/binder/ndk/parcel_jni.cpp
new file mode 100644
index 0000000..53b2d7c
--- /dev/null
+++ b/libs/binder/ndk/parcel_jni.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/binder_parcel_jni.h>
+#include "parcel_internal.h"
+
+#include <android_os_Parcel.h>
+
+using ::android::Parcel;
+using ::android::parcelForJavaObject;
+
+AParcel* AParcel_fromJavaParcel(JNIEnv* env, jobject jbinder) {
+ if (jbinder == nullptr) {
+ return nullptr;
+ }
+
+ Parcel* parcel = parcelForJavaObject(env, jbinder);
+
+ if (parcel == nullptr) {
+ return nullptr;
+ }
+
+ return new AParcel(nullptr /*binder*/, parcel, false /*shouldOwn*/);
+}
diff --git a/libs/cputimeinstate/Android.bp b/libs/cputimeinstate/Android.bp
index 28cb138..b1943a4 100644
--- a/libs/cputimeinstate/Android.bp
+++ b/libs/cputimeinstate/Android.bp
@@ -8,19 +8,26 @@
"liblog",
"libnetdutils"
],
+ header_libs: ["bpf_prog_headers"],
cflags: [
"-Werror",
"-Wall",
"-Wextra",
],
+ export_include_dirs: ["."],
}
cc_test {
name: "libtimeinstate_test",
srcs: ["testtimeinstate.cpp"],
shared_libs: [
+ "libbase",
+ "libbpf",
+ "libbpf_android",
"libtimeinstate",
+ "libnetdutils",
],
+ header_libs: ["bpf_prog_headers"],
cflags: [
"-Werror",
"-Wall",
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 5fd4a95..037846b 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -17,12 +17,16 @@
#define LOG_TAG "libtimeinstate"
#include "cputimeinstate.h"
+#include <bpf_timeinstate.h>
#include <dirent.h>
#include <errno.h>
#include <inttypes.h>
+#include <sys/sysinfo.h>
#include <mutex>
+#include <numeric>
+#include <optional>
#include <set>
#include <string>
#include <unordered_map>
@@ -37,44 +41,36 @@
#include <libbpf.h>
#include <log/log.h>
-#define BPF_FS_PATH "/sys/fs/bpf/"
-
using android::base::StringPrintf;
using android::base::unique_fd;
namespace android {
namespace bpf {
-struct time_key_t {
- uint32_t uid;
- uint32_t freq;
-};
-
-struct val_t {
- uint64_t ar[100];
-};
-
static std::mutex gInitializedMutex;
static bool gInitialized = false;
static uint32_t gNPolicies = 0;
+static uint32_t gNCpus = 0;
static std::vector<std::vector<uint32_t>> gPolicyFreqs;
static std::vector<std::vector<uint32_t>> gPolicyCpus;
static std::set<uint32_t> gAllFreqs;
-static unique_fd gMapFd;
+static unique_fd gTisMapFd;
+static unique_fd gConcurrentMapFd;
-static bool readNumbersFromFile(const std::string &path, std::vector<uint32_t> *out) {
+static std::optional<std::vector<uint32_t>> readNumbersFromFile(const std::string &path) {
std::string data;
- if (!android::base::ReadFileToString(path, &data)) return false;
+ if (!android::base::ReadFileToString(path, &data)) return {};
auto strings = android::base::Split(data, " \n");
+ std::vector<uint32_t> ret;
for (const auto &s : strings) {
if (s.empty()) continue;
uint32_t n;
- if (!android::base::ParseUint(s, &n)) return false;
- out->emplace_back(n);
+ if (!android::base::ParseUint(s, &n)) return {};
+ ret.emplace_back(n);
}
- return true;
+ return ret;
}
static int isPolicyFile(const struct dirent *d) {
@@ -89,10 +85,22 @@
return policyN1 - policyN2;
}
+static int bpf_obj_get_wronly(const char *pathname) {
+ union bpf_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.pathname = ptr_to_u64((void *)pathname);
+ attr.file_flags = BPF_F_WRONLY;
+
+ return syscall(__NR_bpf, BPF_OBJ_GET, &attr, sizeof(attr));
+}
+
static bool initGlobals() {
std::lock_guard<std::mutex> guard(gInitializedMutex);
if (gInitialized) return true;
+ gNCpus = get_nprocs_conf();
+
struct dirent **dirlist;
const char basepath[] = "/sys/devices/system/cpu/cpufreq";
int ret = scandir(basepath, &dirlist, isPolicyFile, comparePolicyFiles);
@@ -111,21 +119,28 @@
for (const auto &name : {"available", "boost"}) {
std::string path =
StringPrintf("%s/%s/scaling_%s_frequencies", basepath, policy.c_str(), name);
- if (!readNumbersFromFile(path, &freqs)) return false;
+ auto nums = readNumbersFromFile(path);
+ if (!nums) continue;
+ freqs.insert(freqs.end(), nums->begin(), nums->end());
}
+ if (freqs.empty()) return false;
std::sort(freqs.begin(), freqs.end());
gPolicyFreqs.emplace_back(freqs);
for (auto freq : freqs) gAllFreqs.insert(freq);
- std::vector<uint32_t> cpus;
std::string path = StringPrintf("%s/%s/%s", basepath, policy.c_str(), "related_cpus");
- if (!readNumbersFromFile(path, &cpus)) return false;
- gPolicyCpus.emplace_back(cpus);
+ auto cpus = readNumbersFromFile(path);
+ if (!cpus) return false;
+ gPolicyCpus.emplace_back(*cpus);
}
- gMapFd = unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_times")};
- if (gMapFd < 0) return false;
+ gTisMapFd = unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_time_in_state_map")};
+ if (gTisMapFd < 0) return false;
+
+ gConcurrentMapFd =
+ unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_concurrent_times_map")};
+ if (gConcurrentMapFd < 0) return false;
gInitialized = true;
return true;
@@ -145,97 +160,286 @@
// process dies then it must be called again to resume tracking.
// This function should *not* be called while tracking is already active; doing so is unnecessary
// and can lead to accounting errors.
-bool startTrackingUidCpuFreqTimes() {
+bool startTrackingUidTimes() {
+ if (!initGlobals()) return false;
+
+ unique_fd cpuPolicyFd(bpf_obj_get_wronly(BPF_FS_PATH "map_time_in_state_cpu_policy_map"));
+ if (cpuPolicyFd < 0) return false;
+
+ for (uint32_t i = 0; i < gPolicyCpus.size(); ++i) {
+ for (auto &cpu : gPolicyCpus[i]) {
+ if (writeToMapEntry(cpuPolicyFd, &cpu, &i, BPF_ANY)) return false;
+ }
+ }
+
+ unique_fd freqToIdxFd(bpf_obj_get_wronly(BPF_FS_PATH "map_time_in_state_freq_to_idx_map"));
+ if (freqToIdxFd < 0) return false;
+ freq_idx_key_t key;
+ for (uint32_t i = 0; i < gNPolicies; ++i) {
+ key.policy = i;
+ for (uint32_t j = 0; j < gPolicyFreqs[i].size(); ++j) {
+ key.freq = gPolicyFreqs[i][j];
+ // Start indexes at 1 so that uninitialized state is distinguishable from lowest freq.
+ // The uid_times map still uses 0-based indexes, and the sched_switch program handles
+ // conversion between them, so this does not affect our map reading code.
+ uint32_t idx = j + 1;
+ if (writeToMapEntry(freqToIdxFd, &key, &idx, BPF_ANY)) return false;
+ }
+ }
+
+ unique_fd cpuLastUpdateFd(bpf_obj_get_wronly(BPF_FS_PATH "map_time_in_state_cpu_last_update_map"));
+ if (cpuLastUpdateFd < 0) return false;
+ std::vector<uint64_t> zeros(get_nprocs_conf(), 0);
+ uint32_t zero = 0;
+ if (writeToMapEntry(cpuLastUpdateFd, &zero, zeros.data(), BPF_ANY)) return false;
+
+ unique_fd nrActiveFd(bpf_obj_get_wronly(BPF_FS_PATH "map_time_in_state_nr_active_map"));
+ if (nrActiveFd < 0) return false;
+ if (writeToMapEntry(nrActiveFd, &zero, &zero, BPF_ANY)) return false;
+
+ unique_fd policyNrActiveFd(bpf_obj_get_wronly(BPF_FS_PATH "map_time_in_state_policy_nr_active_map"));
+ if (policyNrActiveFd < 0) return false;
+ for (uint32_t i = 0; i < gNPolicies; ++i) {
+ if (writeToMapEntry(policyNrActiveFd, &i, &zero, BPF_ANY)) return false;
+ }
+
+ unique_fd policyFreqIdxFd(bpf_obj_get_wronly(BPF_FS_PATH "map_time_in_state_policy_freq_idx_map"));
+ if (policyFreqIdxFd < 0) return false;
+ for (uint32_t i = 0; i < gNPolicies; ++i) {
+ if (writeToMapEntry(policyFreqIdxFd, &i, &zero, BPF_ANY)) return false;
+ }
+
return attachTracepointProgram("sched", "sched_switch") &&
attachTracepointProgram("power", "cpu_frequency");
}
-// Retrieve the times in ns that uid spent running at each CPU frequency and store in freqTimes.
-// Returns false on error. Otherwise, returns true and populates freqTimes with a vector of vectors
-// using the format:
+std::optional<std::vector<std::vector<uint32_t>>> getCpuFreqs() {
+ if (!gInitialized && !initGlobals()) return {};
+ return gPolicyFreqs;
+}
+
+// Retrieve the times in ns that uid spent running at each CPU frequency.
+// Return contains no value on error, otherwise it contains a vector of vectors using the format:
// [[t0_0, t0_1, ...],
// [t1_0, t1_1, ...], ...]
// where ti_j is the ns that uid spent running on the ith cluster at that cluster's jth lowest freq.
-bool getUidCpuFreqTimes(uint32_t uid, std::vector<std::vector<uint64_t>> *freqTimes) {
- if (!gInitialized && !initGlobals()) return false;
- time_key_t key = {.uid = uid, .freq = 0};
+std::optional<std::vector<std::vector<uint64_t>>> getUidCpuFreqTimes(uint32_t uid) {
+ if (!gInitialized && !initGlobals()) return {};
- freqTimes->clear();
- freqTimes->resize(gNPolicies);
- std::vector<uint32_t> idxs(gNPolicies, 0);
+ std::vector<std::vector<uint64_t>> out;
+ uint32_t maxFreqCount = 0;
+ for (const auto &freqList : gPolicyFreqs) {
+ if (freqList.size() > maxFreqCount) maxFreqCount = freqList.size();
+ out.emplace_back(freqList.size(), 0);
+ }
- val_t value;
- for (uint32_t freq : gAllFreqs) {
- key.freq = freq;
- int ret = findMapEntry(gMapFd, &key, &value);
- if (ret) {
- if (errno == ENOENT)
- memset(&value.ar, 0, sizeof(value.ar));
- else
- return false;
+ std::vector<tis_val_t> vals(gNCpus);
+ time_key_t key = {.uid = uid};
+ for (uint32_t i = 0; i <= (maxFreqCount - 1) / FREQS_PER_ENTRY; ++i) {
+ key.bucket = i;
+ if (findMapEntry(gTisMapFd, &key, vals.data())) {
+ if (errno != ENOENT) return {};
+ continue;
}
- for (uint32_t i = 0; i < gNPolicies; ++i) {
- if (idxs[i] == gPolicyFreqs[i].size() || freq != gPolicyFreqs[i][idxs[i]]) continue;
- uint64_t time = 0;
- for (uint32_t cpu : gPolicyCpus[i]) time += value.ar[cpu];
- idxs[i] += 1;
- (*freqTimes)[i].emplace_back(time);
+
+ auto offset = i * FREQS_PER_ENTRY;
+ auto nextOffset = (i + 1) * FREQS_PER_ENTRY;
+ for (uint32_t j = 0; j < gNPolicies; ++j) {
+ if (offset >= gPolicyFreqs[j].size()) continue;
+ auto begin = out[j].begin() + offset;
+ auto end = nextOffset < gPolicyFreqs[j].size() ? begin + FREQS_PER_ENTRY : out[j].end();
+
+ for (const auto &cpu : gPolicyCpus[j]) {
+ std::transform(begin, end, std::begin(vals[cpu].ar), begin, std::plus<uint64_t>());
+ }
}
}
- return true;
+ return out;
}
-// Retrieve the times in ns that each uid spent running at each CPU freq and store in freqTimeMap.
-// Returns false on error. Otherwise, returns true and populates freqTimeMap with a map from uids to
-// vectors of vectors using the format:
+// Retrieve the times in ns that each uid spent running at each CPU freq.
+// Return contains no value on error, otherwise it contains a map from uids to vectors of vectors
+// using the format:
// { uid0 -> [[t0_0_0, t0_0_1, ...], [t0_1_0, t0_1_1, ...], ...],
// uid1 -> [[t1_0_0, t1_0_1, ...], [t1_1_0, t1_1_1, ...], ...], ... }
// where ti_j_k is the ns uid i spent running on the jth cluster at the cluster's kth lowest freq.
-bool getUidsCpuFreqTimes(
- std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>> *freqTimeMap) {
- if (!gInitialized && !initGlobals()) return false;
-
- int fd = bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_times");
- if (fd < 0) return false;
- BpfMap<time_key_t, val_t> m(fd);
-
- std::vector<std::unordered_map<uint32_t, uint32_t>> policyFreqIdxs;
- for (uint32_t i = 0; i < gNPolicies; ++i) {
- std::unordered_map<uint32_t, uint32_t> freqIdxs;
- for (size_t j = 0; j < gPolicyFreqs[i].size(); ++j) freqIdxs[gPolicyFreqs[i][j]] = j;
- policyFreqIdxs.emplace_back(freqIdxs);
+std::optional<std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>>>
+getUidsCpuFreqTimes() {
+ if (!gInitialized && !initGlobals()) return {};
+ time_key_t key, prevKey;
+ std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>> map;
+ if (getFirstMapKey(gTisMapFd, &key)) {
+ if (errno == ENOENT) return map;
+ return std::nullopt;
}
- auto fn = [freqTimeMap, &policyFreqIdxs](const time_key_t &key, const val_t &val,
- const BpfMap<time_key_t, val_t> &) {
- if (freqTimeMap->find(key.uid) == freqTimeMap->end()) {
- (*freqTimeMap)[key.uid].resize(gNPolicies);
- for (uint32_t i = 0; i < gNPolicies; ++i) {
- (*freqTimeMap)[key.uid][i].resize(gPolicyFreqs[i].size(), 0);
+ std::vector<std::vector<uint64_t>> mapFormat;
+ for (const auto &freqList : gPolicyFreqs) mapFormat.emplace_back(freqList.size(), 0);
+
+ std::vector<tis_val_t> vals(gNCpus);
+ do {
+ if (findMapEntry(gTisMapFd, &key, vals.data())) return {};
+ if (map.find(key.uid) == map.end()) map.emplace(key.uid, mapFormat);
+
+ auto offset = key.bucket * FREQS_PER_ENTRY;
+ auto nextOffset = (key.bucket + 1) * FREQS_PER_ENTRY;
+ for (uint32_t i = 0; i < gNPolicies; ++i) {
+ if (offset >= gPolicyFreqs[i].size()) continue;
+ auto begin = map[key.uid][i].begin() + offset;
+ auto end = nextOffset < gPolicyFreqs[i].size() ? begin + FREQS_PER_ENTRY :
+ map[key.uid][i].end();
+ for (const auto &cpu : gPolicyCpus[i]) {
+ std::transform(begin, end, std::begin(vals[cpu].ar), begin, std::plus<uint64_t>());
}
}
+ prevKey = key;
+ } while (!getNextMapKey(gTisMapFd, &prevKey, &key));
+ if (errno != ENOENT) return {};
+ return map;
+}
+
+static bool verifyConcurrentTimes(const concurrent_time_t &ct) {
+ uint64_t activeSum = std::accumulate(ct.active.begin(), ct.active.end(), (uint64_t)0);
+ uint64_t policySum = 0;
+ for (const auto &vec : ct.policy) {
+ policySum += std::accumulate(vec.begin(), vec.end(), (uint64_t)0);
+ }
+ return activeSum == policySum;
+}
+
+// Retrieve the times in ns that uid spent running concurrently with each possible number of other
+// tasks on each cluster (policy times) and overall (active times).
+// Return contains no value on error, otherwise it contains a concurrent_time_t with the format:
+// {.active = [a0, a1, ...], .policy = [[p0_0, p0_1, ...], [p1_0, p1_1, ...], ...]}
+// where ai is the ns spent running concurrently with tasks on i other cpus and pi_j is the ns spent
+// running on the ith cluster, concurrently with tasks on j other cpus in the same cluster
+std::optional<concurrent_time_t> getUidConcurrentTimes(uint32_t uid, bool retry) {
+ if (!gInitialized && !initGlobals()) return {};
+ concurrent_time_t ret = {.active = std::vector<uint64_t>(gNCpus, 0)};
+ for (const auto &cpuList : gPolicyCpus) ret.policy.emplace_back(cpuList.size(), 0);
+ std::vector<concurrent_val_t> vals(gNCpus);
+ time_key_t key = {.uid = uid};
+ for (key.bucket = 0; key.bucket <= (gNCpus - 1) / CPUS_PER_ENTRY; ++key.bucket) {
+ if (findMapEntry(gConcurrentMapFd, &key, vals.data())) {
+ if (errno != ENOENT) return {};
+ continue;
+ }
+ auto offset = key.bucket * CPUS_PER_ENTRY;
+ auto nextOffset = (key.bucket + 1) * CPUS_PER_ENTRY;
+
+ auto activeBegin = ret.active.begin() + offset;
+ auto activeEnd = nextOffset < gNCpus ? activeBegin + CPUS_PER_ENTRY : ret.active.end();
+
+ for (uint32_t cpu = 0; cpu < gNCpus; ++cpu) {
+ std::transform(activeBegin, activeEnd, std::begin(vals[cpu].active), activeBegin,
+ std::plus<uint64_t>());
+ }
- for (size_t policy = 0; policy < gNPolicies; ++policy) {
+ for (uint32_t policy = 0; policy < gNPolicies; ++policy) {
+ if (offset >= gPolicyCpus[policy].size()) continue;
+ auto policyBegin = ret.policy[policy].begin() + offset;
+ auto policyEnd = nextOffset < gPolicyCpus[policy].size() ? policyBegin + CPUS_PER_ENTRY
+ : ret.policy[policy].end();
+
for (const auto &cpu : gPolicyCpus[policy]) {
- auto freqIdx = policyFreqIdxs[policy][key.freq];
- (*freqTimeMap)[key.uid][policy][freqIdx] += val.ar[cpu];
+ std::transform(policyBegin, policyEnd, std::begin(vals[cpu].policy), policyBegin,
+ std::plus<uint64_t>());
}
}
- return android::netdutils::status::ok;
- };
- return isOk(m.iterateWithValue(fn));
+ }
+ if (!verifyConcurrentTimes(ret) && retry) return getUidConcurrentTimes(uid, false);
+ return ret;
+}
+
+// Retrieve the times in ns that each uid spent running concurrently with each possible number of
+// other tasks on each cluster (policy times) and overall (active times).
+// Return contains no value on error, otherwise it contains a map from uids to concurrent_time_t's
+// using the format:
+// { uid0 -> {.active = [a0, a1, ...], .policy = [[p0_0, p0_1, ...], [p1_0, p1_1, ...], ...] }, ...}
+// where ai is the ns spent running concurrently with tasks on i other cpus and pi_j is the ns spent
+// running on the ith cluster, concurrently with tasks on j other cpus in the same cluster.
+std::optional<std::unordered_map<uint32_t, concurrent_time_t>> getUidsConcurrentTimes() {
+ if (!gInitialized && !initGlobals()) return {};
+ time_key_t key, prevKey;
+ std::unordered_map<uint32_t, concurrent_time_t> ret;
+ if (getFirstMapKey(gConcurrentMapFd, &key)) {
+ if (errno == ENOENT) return ret;
+ return {};
+ }
+
+ concurrent_time_t retFormat = {.active = std::vector<uint64_t>(gNCpus, 0)};
+ for (const auto &cpuList : gPolicyCpus) retFormat.policy.emplace_back(cpuList.size(), 0);
+
+ std::vector<concurrent_val_t> vals(gNCpus);
+ std::vector<uint64_t>::iterator activeBegin, activeEnd, policyBegin, policyEnd;
+
+ do {
+ if (findMapEntry(gConcurrentMapFd, &key, vals.data())) return {};
+ if (ret.find(key.uid) == ret.end()) ret.emplace(key.uid, retFormat);
+
+ auto offset = key.bucket * CPUS_PER_ENTRY;
+ auto nextOffset = (key.bucket + 1) * CPUS_PER_ENTRY;
+
+ activeBegin = ret[key.uid].active.begin();
+ activeEnd = nextOffset < gNCpus ? activeBegin + CPUS_PER_ENTRY : ret[key.uid].active.end();
+
+ for (uint32_t cpu = 0; cpu < gNCpus; ++cpu) {
+ std::transform(activeBegin, activeEnd, std::begin(vals[cpu].active), activeBegin,
+ std::plus<uint64_t>());
+ }
+
+ for (uint32_t policy = 0; policy < gNPolicies; ++policy) {
+ if (offset >= gPolicyCpus[policy].size()) continue;
+ policyBegin = ret[key.uid].policy[policy].begin() + offset;
+ policyEnd = nextOffset < gPolicyCpus[policy].size() ? policyBegin + CPUS_PER_ENTRY
+ : ret[key.uid].policy[policy].end();
+
+ for (const auto &cpu : gPolicyCpus[policy]) {
+ std::transform(policyBegin, policyEnd, std::begin(vals[cpu].policy), policyBegin,
+ std::plus<uint64_t>());
+ }
+ }
+ prevKey = key;
+ } while (!getNextMapKey(gConcurrentMapFd, &prevKey, &key));
+ if (errno != ENOENT) return {};
+ for (const auto &[key, value] : ret) {
+ if (!verifyConcurrentTimes(value)) {
+ auto val = getUidConcurrentTimes(key, false);
+ if (val.has_value()) ret[key] = val.value();
+ }
+ }
+ return ret;
}
// Clear all time in state data for a given uid. Returns false on error, true otherwise.
-bool clearUidCpuFreqTimes(uint32_t uid) {
+// This is only suitable for clearing data when an app is uninstalled; if called on a UID with
+// running tasks it will cause time in state vs. concurrent time totals to be inconsistent for that
+// UID.
+bool clearUidTimes(uint32_t uid) {
if (!gInitialized && !initGlobals()) return false;
- time_key_t key = {.uid = uid, .freq = 0};
- std::vector<uint32_t> idxs(gNPolicies, 0);
- for (auto freq : gAllFreqs) {
- key.freq = freq;
- if (deleteMapEntry(gMapFd, &key) && errno != ENOENT) return false;
+ time_key_t key = {.uid = uid};
+
+ uint32_t maxFreqCount = 0;
+ for (const auto &freqList : gPolicyFreqs) {
+ if (freqList.size() > maxFreqCount) maxFreqCount = freqList.size();
+ }
+
+ tis_val_t zeros = {0};
+ std::vector<tis_val_t> vals(gNCpus, zeros);
+ for (key.bucket = 0; key.bucket <= (maxFreqCount - 1) / FREQS_PER_ENTRY; ++key.bucket) {
+ if (writeToMapEntry(gTisMapFd, &key, vals.data(), BPF_EXIST) && errno != ENOENT)
+ return false;
+ if (deleteMapEntry(gTisMapFd, &key) && errno != ENOENT) return false;
+ }
+
+ concurrent_val_t czeros = {.policy = {0}, .active = {0}};
+ std::vector<concurrent_val_t> cvals(gNCpus, czeros);
+ for (key.bucket = 0; key.bucket <= (gNCpus - 1) / CPUS_PER_ENTRY; ++key.bucket) {
+ if (writeToMapEntry(gConcurrentMapFd, &key, cvals.data(), BPF_EXIST) && errno != ENOENT)
+ return false;
+ if (deleteMapEntry(gConcurrentMapFd, &key) && errno != ENOENT) return false;
}
return true;
}
diff --git a/libs/cputimeinstate/cputimeinstate.h b/libs/cputimeinstate/cputimeinstate.h
index 9f6103e..49469d8 100644
--- a/libs/cputimeinstate/cputimeinstate.h
+++ b/libs/cputimeinstate/cputimeinstate.h
@@ -22,10 +22,20 @@
namespace android {
namespace bpf {
-bool startTrackingUidCpuFreqTimes();
-bool getUidCpuFreqTimes(unsigned int uid, std::vector<std::vector<uint64_t>> *freqTimes);
-bool getUidsCpuFreqTimes(std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>> *tisMap);
-bool clearUidCpuFreqTimes(unsigned int uid);
+bool startTrackingUidTimes();
+std::optional<std::vector<std::vector<uint64_t>>> getUidCpuFreqTimes(uint32_t uid);
+std::optional<std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>>>
+ getUidsCpuFreqTimes();
+std::optional<std::vector<std::vector<uint32_t>>> getCpuFreqs();
+
+struct concurrent_time_t {
+ std::vector<uint64_t> active;
+ std::vector<std::vector<uint64_t>> policy;
+};
+
+std::optional<concurrent_time_t> getUidConcurrentTimes(uint32_t uid, bool retry = true);
+std::optional<std::unordered_map<uint32_t, concurrent_time_t>> getUidsConcurrentTimes();
+bool clearUidTimes(unsigned int uid);
} // namespace bpf
} // namespace android
diff --git a/libs/cputimeinstate/testtimeinstate.cpp b/libs/cputimeinstate/testtimeinstate.cpp
index 9837865..23d87fd 100644
--- a/libs/cputimeinstate/testtimeinstate.cpp
+++ b/libs/cputimeinstate/testtimeinstate.cpp
@@ -1,57 +1,381 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <bpf_timeinstate.h>
+
+#include <sys/sysinfo.h>
+
+#include <numeric>
#include <unordered_map>
#include <vector>
#include <gtest/gtest.h>
+#include <android-base/unique_fd.h>
+#include <bpf/BpfMap.h>
#include <cputimeinstate.h>
+#include <libbpf.h>
namespace android {
namespace bpf {
+static constexpr uint64_t NSEC_PER_SEC = 1000000000;
+static constexpr uint64_t NSEC_PER_YEAR = NSEC_PER_SEC * 60 * 60 * 24 * 365;
+
using std::vector;
-TEST(TimeInStateTest, SingleUid) {
- vector<vector<uint64_t>> times;
- ASSERT_TRUE(getUidCpuFreqTimes(0, ×));
- EXPECT_FALSE(times.empty());
+TEST(TimeInStateTest, SingleUidTimeInState) {
+ auto times = getUidCpuFreqTimes(0);
+ ASSERT_TRUE(times.has_value());
+ EXPECT_FALSE(times->empty());
}
-TEST(TimeInStateTest, AllUid) {
+TEST(TimeInStateTest, SingleUidConcurrentTimes) {
+ auto concurrentTimes = getUidConcurrentTimes(0);
+ ASSERT_TRUE(concurrentTimes.has_value());
+ ASSERT_FALSE(concurrentTimes->active.empty());
+ ASSERT_FALSE(concurrentTimes->policy.empty());
+
+ uint64_t policyEntries = 0;
+ for (const auto &policyTimeVec : concurrentTimes->policy) policyEntries += policyTimeVec.size();
+ ASSERT_EQ(concurrentTimes->active.size(), policyEntries);
+}
+
+static void TestConcurrentTimesConsistent(const struct concurrent_time_t &concurrentTime) {
+ size_t maxPolicyCpus = 0;
+ for (const auto &vec : concurrentTime.policy) {
+ maxPolicyCpus = std::max(maxPolicyCpus, vec.size());
+ }
+ uint64_t policySum = 0;
+ for (size_t i = 0; i < maxPolicyCpus; ++i) {
+ for (const auto &vec : concurrentTime.policy) {
+ if (i < vec.size()) policySum += vec[i];
+ }
+ ASSERT_LE(concurrentTime.active[i], policySum);
+ policySum -= concurrentTime.active[i];
+ }
+ policySum = 0;
+ for (size_t i = 0; i < concurrentTime.active.size(); ++i) {
+ for (const auto &vec : concurrentTime.policy) {
+ if (i < vec.size()) policySum += vec[vec.size() - 1 - i];
+ }
+ auto activeSum = concurrentTime.active[concurrentTime.active.size() - 1 - i];
+ // This check is slightly flaky because we may read a map entry in the middle of an update
+ // when active times have been updated but policy times have not. This happens infrequently
+ // and can be distinguished from more serious bugs by re-running the test: if the underlying
+ // data itself is inconsistent, the test will fail every time.
+ ASSERT_LE(activeSum, policySum);
+ policySum -= activeSum;
+ }
+}
+
+static void TestUidTimesConsistent(const std::vector<std::vector<uint64_t>> &timeInState,
+ const struct concurrent_time_t &concurrentTime) {
+ ASSERT_NO_FATAL_FAILURE(TestConcurrentTimesConsistent(concurrentTime));
+ ASSERT_EQ(timeInState.size(), concurrentTime.policy.size());
+ uint64_t policySum = 0;
+ for (uint32_t i = 0; i < timeInState.size(); ++i) {
+ uint64_t tisSum =
+ std::accumulate(timeInState[i].begin(), timeInState[i].end(), (uint64_t)0);
+ uint64_t concurrentSum = std::accumulate(concurrentTime.policy[i].begin(),
+ concurrentTime.policy[i].end(), (uint64_t)0);
+ if (tisSum < concurrentSum)
+ ASSERT_LE(concurrentSum - tisSum, NSEC_PER_SEC);
+ else
+ ASSERT_LE(tisSum - concurrentSum, NSEC_PER_SEC);
+ policySum += concurrentSum;
+ }
+ uint64_t activeSum = std::accumulate(concurrentTime.active.begin(), concurrentTime.active.end(),
+ (uint64_t)0);
+ EXPECT_EQ(activeSum, policySum);
+}
+
+TEST(TimeInStateTest, SingleUidTimesConsistent) {
+ auto times = getUidCpuFreqTimes(0);
+ ASSERT_TRUE(times.has_value());
+
+ auto concurrentTimes = getUidConcurrentTimes(0);
+ ASSERT_TRUE(concurrentTimes.has_value());
+
+ ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(*times, *concurrentTimes));
+}
+
+TEST(TimeInStateTest, AllUidTimeInState) {
vector<size_t> sizes;
- std::unordered_map<uint32_t, vector<vector<uint64_t>>> map;
- ASSERT_TRUE(getUidsCpuFreqTimes(&map));
+ auto map = getUidsCpuFreqTimes();
+ ASSERT_TRUE(map.has_value());
- ASSERT_FALSE(map.empty());
+ ASSERT_FALSE(map->empty());
- auto firstEntry = map.begin()->second;
+ auto firstEntry = map->begin()->second;
for (const auto &subEntry : firstEntry) sizes.emplace_back(subEntry.size());
- for (const auto &vec : map) {
+ for (const auto &vec : *map) {
ASSERT_EQ(vec.second.size(), sizes.size());
for (size_t i = 0; i < vec.second.size(); ++i) ASSERT_EQ(vec.second[i].size(), sizes[i]);
}
}
+TEST(TimeInStateTest, SingleAndAllUidTimeInStateConsistent) {
+ auto map = getUidsCpuFreqTimes();
+ ASSERT_TRUE(map.has_value());
+ ASSERT_FALSE(map->empty());
+
+ for (const auto &kv : *map) {
+ uint32_t uid = kv.first;
+ auto times1 = kv.second;
+ auto times2 = getUidCpuFreqTimes(uid);
+ ASSERT_TRUE(times2.has_value());
+
+ ASSERT_EQ(times1.size(), times2->size());
+ for (uint32_t i = 0; i < times1.size(); ++i) {
+ ASSERT_EQ(times1[i].size(), (*times2)[i].size());
+ for (uint32_t j = 0; j < times1[i].size(); ++j) {
+ ASSERT_LE((*times2)[i][j] - times1[i][j], NSEC_PER_SEC);
+ }
+ }
+ }
+}
+
+TEST(TimeInStateTest, AllUidConcurrentTimes) {
+ auto map = getUidsConcurrentTimes();
+ ASSERT_TRUE(map.has_value());
+ ASSERT_FALSE(map->empty());
+
+ auto firstEntry = map->begin()->second;
+ for (const auto &kv : *map) {
+ ASSERT_EQ(kv.second.active.size(), firstEntry.active.size());
+ ASSERT_EQ(kv.second.policy.size(), firstEntry.policy.size());
+ for (size_t i = 0; i < kv.second.policy.size(); ++i) {
+ ASSERT_EQ(kv.second.policy[i].size(), firstEntry.policy[i].size());
+ }
+ }
+}
+
+TEST(TimeInStateTest, SingleAndAllUidConcurrentTimesConsistent) {
+ auto map = getUidsConcurrentTimes();
+ ASSERT_TRUE(map.has_value());
+ for (const auto &kv : *map) {
+ uint32_t uid = kv.first;
+ auto times1 = kv.second;
+ auto times2 = getUidConcurrentTimes(uid);
+ ASSERT_TRUE(times2.has_value());
+ for (uint32_t i = 0; i < times1.active.size(); ++i) {
+ ASSERT_LE(times2->active[i] - times1.active[i], NSEC_PER_SEC);
+ }
+ for (uint32_t i = 0; i < times1.policy.size(); ++i) {
+ for (uint32_t j = 0; j < times1.policy[i].size(); ++j) {
+ ASSERT_LE(times2->policy[i][j] - times1.policy[i][j], NSEC_PER_SEC);
+ }
+ }
+ }
+}
+
+void TestCheckDelta(uint64_t before, uint64_t after) {
+ // Times should never decrease
+ ASSERT_LE(before, after);
+ // UID can't have run for more than ~1s on each CPU
+ ASSERT_LE(after - before, NSEC_PER_SEC * 2 * get_nprocs_conf());
+}
+
+TEST(TimeInStateTest, AllUidTimeInStateMonotonic) {
+ auto map1 = getUidsCpuFreqTimes();
+ ASSERT_TRUE(map1.has_value());
+ sleep(1);
+ auto map2 = getUidsCpuFreqTimes();
+ ASSERT_TRUE(map2.has_value());
+
+ for (const auto &kv : *map1) {
+ uint32_t uid = kv.first;
+ auto times = kv.second;
+ ASSERT_NE(map2->find(uid), map2->end());
+ for (uint32_t policy = 0; policy < times.size(); ++policy) {
+ for (uint32_t freqIdx = 0; freqIdx < times[policy].size(); ++freqIdx) {
+ auto before = times[policy][freqIdx];
+ auto after = (*map2)[uid][policy][freqIdx];
+ ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
+ }
+ }
+ }
+}
+
+TEST(TimeInStateTest, AllUidConcurrentTimesMonotonic) {
+ auto map1 = getUidsConcurrentTimes();
+ ASSERT_TRUE(map1.has_value());
+ ASSERT_FALSE(map1->empty());
+ sleep(1);
+ auto map2 = getUidsConcurrentTimes();
+ ASSERT_TRUE(map2.has_value());
+ ASSERT_FALSE(map2->empty());
+
+ for (const auto &kv : *map1) {
+ uint32_t uid = kv.first;
+ auto times = kv.second;
+ ASSERT_NE(map2->find(uid), map2->end());
+ for (uint32_t i = 0; i < times.active.size(); ++i) {
+ auto before = times.active[i];
+ auto after = (*map2)[uid].active[i];
+ ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
+ }
+ for (uint32_t policy = 0; policy < times.policy.size(); ++policy) {
+ for (uint32_t idx = 0; idx < times.policy[policy].size(); ++idx) {
+ auto before = times.policy[policy][idx];
+ auto after = (*map2)[uid].policy[policy][idx];
+ ASSERT_NO_FATAL_FAILURE(TestCheckDelta(before, after));
+ }
+ }
+ }
+}
+
+TEST(TimeInStateTest, AllUidTimeInStateSanityCheck) {
+ auto map = getUidsCpuFreqTimes();
+ ASSERT_TRUE(map.has_value());
+
+ bool foundLargeValue = false;
+ for (const auto &kv : *map) {
+ for (const auto &timeVec : kv.second) {
+ for (const auto &time : timeVec) {
+ ASSERT_LE(time, NSEC_PER_YEAR);
+ if (time > UINT32_MAX) foundLargeValue = true;
+ }
+ }
+ }
+ // UINT32_MAX nanoseconds is less than 5 seconds, so if every part of our pipeline is using
+ // uint64_t as expected, we should have some times higher than that.
+ ASSERT_TRUE(foundLargeValue);
+}
+
+TEST(TimeInStateTest, AllUidConcurrentTimesSanityCheck) {
+ auto concurrentMap = getUidsConcurrentTimes();
+ ASSERT_TRUE(concurrentMap);
+
+ bool activeFoundLargeValue = false;
+ bool policyFoundLargeValue = false;
+ for (const auto &kv : *concurrentMap) {
+ for (const auto &time : kv.second.active) {
+ ASSERT_LE(time, NSEC_PER_YEAR);
+ if (time > UINT32_MAX) activeFoundLargeValue = true;
+ }
+ for (const auto &policyTimeVec : kv.second.policy) {
+ for (const auto &time : policyTimeVec) {
+ ASSERT_LE(time, NSEC_PER_YEAR);
+ if (time > UINT32_MAX) policyFoundLargeValue = true;
+ }
+ }
+ }
+ // UINT32_MAX nanoseconds is less than 5 seconds, so if every part of our pipeline is using
+ // uint64_t as expected, we should have some times higher than that.
+ ASSERT_TRUE(activeFoundLargeValue);
+ ASSERT_TRUE(policyFoundLargeValue);
+}
+
+TEST(TimeInStateTest, AllUidTimesConsistent) {
+ auto tisMap = getUidsCpuFreqTimes();
+ ASSERT_TRUE(tisMap.has_value());
+
+ auto concurrentMap = getUidsConcurrentTimes();
+ ASSERT_TRUE(concurrentMap.has_value());
+
+ ASSERT_EQ(tisMap->size(), concurrentMap->size());
+ for (const auto &kv : *tisMap) {
+ uint32_t uid = kv.first;
+ auto times = kv.second;
+ ASSERT_NE(concurrentMap->find(uid), concurrentMap->end());
+
+ auto concurrentTimes = (*concurrentMap)[uid];
+ ASSERT_NO_FATAL_FAILURE(TestUidTimesConsistent(times, concurrentTimes));
+ }
+}
+
TEST(TimeInStateTest, RemoveUid) {
- vector<vector<uint64_t>> times, times2;
- ASSERT_TRUE(getUidCpuFreqTimes(0, ×));
- ASSERT_FALSE(times.empty());
+ uint32_t uid = 0;
+ {
+ // Find an unused UID
+ auto times = getUidsCpuFreqTimes();
+ ASSERT_TRUE(times.has_value());
+ ASSERT_FALSE(times->empty());
+ for (const auto &kv : *times) uid = std::max(uid, kv.first);
+ ++uid;
+ }
+ {
+ // Add a map entry for our fake UID by copying a real map entry
+ android::base::unique_fd fd{
+ bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_time_in_state_map")};
+ ASSERT_GE(fd, 0);
+ time_key_t k;
+ ASSERT_FALSE(getFirstMapKey(fd, &k));
+ std::vector<tis_val_t> vals(get_nprocs_conf());
+ ASSERT_FALSE(findMapEntry(fd, &k, vals.data()));
+ uint32_t copiedUid = k.uid;
+ k.uid = uid;
+ ASSERT_FALSE(writeToMapEntry(fd, &k, vals.data(), BPF_NOEXIST));
+
+ android::base::unique_fd fd2{
+ bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_concurrent_times_map")};
+ k.uid = copiedUid;
+ k.bucket = 0;
+ std::vector<concurrent_val_t> cvals(get_nprocs_conf());
+ ASSERT_FALSE(findMapEntry(fd2, &k, cvals.data()));
+ k.uid = uid;
+ ASSERT_FALSE(writeToMapEntry(fd2, &k, cvals.data(), BPF_NOEXIST));
+ }
+ auto times = getUidCpuFreqTimes(uid);
+ ASSERT_TRUE(times.has_value());
+ ASSERT_FALSE(times->empty());
+
+ auto concurrentTimes = getUidConcurrentTimes(0);
+ ASSERT_TRUE(concurrentTimes.has_value());
+ ASSERT_FALSE(concurrentTimes->active.empty());
+ ASSERT_FALSE(concurrentTimes->policy.empty());
uint64_t sum = 0;
- for (size_t i = 0; i < times.size(); ++i) {
- for (auto x : times[i]) sum += x;
+ for (size_t i = 0; i < times->size(); ++i) {
+ for (auto x : (*times)[i]) sum += x;
}
ASSERT_GT(sum, (uint64_t)0);
- ASSERT_TRUE(clearUidCpuFreqTimes(0));
-
- ASSERT_TRUE(getUidCpuFreqTimes(0, ×2));
- ASSERT_EQ(times2.size(), times.size());
- for (size_t i = 0; i < times.size(); ++i) {
- ASSERT_EQ(times2[i].size(), times[i].size());
- for (size_t j = 0; j < times[i].size(); ++j) ASSERT_LE(times2[i][j], times[i][j]);
+ uint64_t activeSum = 0;
+ for (size_t i = 0; i < concurrentTimes->active.size(); ++i) {
+ activeSum += concurrentTimes->active[i];
}
+ ASSERT_GT(activeSum, (uint64_t)0);
+
+ ASSERT_TRUE(clearUidTimes(uid));
+
+ auto allTimes = getUidsCpuFreqTimes();
+ ASSERT_TRUE(allTimes.has_value());
+ ASSERT_FALSE(allTimes->empty());
+ ASSERT_EQ(allTimes->find(uid), allTimes->end());
+
+ auto allConcurrentTimes = getUidsConcurrentTimes();
+ ASSERT_TRUE(allConcurrentTimes.has_value());
+ ASSERT_FALSE(allConcurrentTimes->empty());
+ ASSERT_EQ(allConcurrentTimes->find(uid), allConcurrentTimes->end());
+}
+
+TEST(TimeInStateTest, GetCpuFreqs) {
+ auto freqs = getCpuFreqs();
+ ASSERT_TRUE(freqs.has_value());
+
+ auto times = getUidCpuFreqTimes(0);
+ ASSERT_TRUE(times.has_value());
+
+ ASSERT_EQ(freqs->size(), times->size());
+ for (size_t i = 0; i < freqs->size(); ++i) EXPECT_EQ((*freqs)[i].size(), (*times)[i].size());
}
} // namespace bpf
diff --git a/libs/dumputils/dump_utils.cpp b/libs/dumputils/dump_utils.cpp
index 250f902..a772e57 100644
--- a/libs/dumputils/dump_utils.cpp
+++ b/libs/dumputils/dump_utils.cpp
@@ -16,7 +16,9 @@
#include <set>
#include <android-base/file.h>
+#include <android-base/properties.h>
#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
#include <android/hidl/manager/1.0/IServiceManager.h>
#include <dumputils/dump_utils.h>
#include <log/log.h>
@@ -32,7 +34,6 @@
"/system/bin/mediametrics", // media.metrics
"/system/bin/mediaserver",
"/system/bin/netd",
- "/system/bin/vold",
"/system/bin/sdcard",
"/system/bin/statsd",
"/system/bin/surfaceflinger",
@@ -42,6 +43,13 @@
NULL,
};
+
+// Native processes to dump on debuggable builds.
+static const char* debuggable_native_processes_to_dump[] = {
+ "/system/bin/vold",
+ NULL,
+};
+
/* list of hal interface to dump containing process during native dumps */
static const char* hal_interfaces_to_dump[] {
"android.hardware.audio@2.0::IDevicesFactory",
@@ -65,13 +73,34 @@
NULL,
};
-bool should_dump_hal_interface(const char* interface) {
+/* list of extra hal interfaces to dump containing process during native dumps */
+// This is filled when dumpstate is called.
+static std::set<const std::string> extra_hal_interfaces_to_dump;
+
+static void read_extra_hals_to_dump_from_property() {
+ // extra hals to dump are already filled
+ if (extra_hal_interfaces_to_dump.size() > 0) {
+ return;
+ }
+ std::string value = android::base::GetProperty("ro.dump.hals.extra", "");
+ std::vector<std::string> tokens = android::base::Split(value, ",");
+ for (const auto &token : tokens) {
+ std::string trimmed_token = android::base::Trim(token);
+ if (trimmed_token.length() == 0) {
+ continue;
+ }
+ extra_hal_interfaces_to_dump.insert(trimmed_token);
+ }
+}
+
+// check if interface is included in either default hal list or extra hal list
+bool should_dump_hal_interface(const std::string& interface) {
for (const char** i = hal_interfaces_to_dump; *i; i++) {
- if (!strcmp(*i, interface)) {
+ if (interface == *i) {
return true;
}
}
- return false;
+ return extra_hal_interfaces_to_dump.find(interface) != extra_hal_interfaces_to_dump.end();
}
bool should_dump_native_traces(const char* path) {
@@ -80,6 +109,15 @@
return true;
}
}
+
+ if (android::base::GetBoolProperty("ro.debuggable", false)) {
+ for (const char** p = debuggable_native_processes_to_dump; *p; p++) {
+ if (!strcmp(*p, path)) {
+ return true;
+ }
+ }
+ }
+
return false;
}
@@ -91,13 +129,15 @@
sp<IServiceManager> manager = IServiceManager::getService();
std::set<int> pids;
+ read_extra_hals_to_dump_from_property();
+
Return<void> ret = manager->debugDump([&](auto& hals) {
for (const auto &info : hals) {
if (info.pid == static_cast<int>(IServiceManager::PidConstant::NO_PID)) {
continue;
}
- if (!should_dump_hal_interface(info.interfaceName.c_str())) {
+ if (!should_dump_hal_interface(info.interfaceName)) {
continue;
}
diff --git a/libs/sensor/Sensor.cpp b/libs/sensor/Sensor.cpp
index 139987e..abc9103 100644
--- a/libs/sensor/Sensor.cpp
+++ b/libs/sensor/Sensor.cpp
@@ -577,7 +577,8 @@
uint32_t len = static_cast<uint32_t>(string8.length());
FlattenableUtils::write(buffer, size, len);
memcpy(static_cast<char*>(buffer), string8.string(), len);
- FlattenableUtils::advance(buffer, size, FlattenableUtils::align<4>(len));
+ FlattenableUtils::advance(buffer, size, len);
+ size -= FlattenableUtils::align<4>(buffer);
}
bool Sensor::unflattenString8(void const*& buffer, size_t& size, String8& outputString8) {
diff --git a/services/surfaceflinger/OWNERS b/services/surfaceflinger/OWNERS
index 69d8c89..f2bc65d 100644
--- a/services/surfaceflinger/OWNERS
+++ b/services/surfaceflinger/OWNERS
@@ -5,4 +5,6 @@
lpy@google.com
marissaw@google.com
racarr@google.com
-stoza@google.com
\ No newline at end of file
+steventhomas@google.com
+stoza@google.com
+vhau@google.com
diff --git a/services/surfaceflinger/surfaceflinger.rc b/services/surfaceflinger/surfaceflinger.rc
index aea602b..d3942e8 100644
--- a/services/surfaceflinger/surfaceflinger.rc
+++ b/services/surfaceflinger/surfaceflinger.rc
@@ -2,6 +2,7 @@
class core animation
user system
group graphics drmrpc readproc
+ capabilities SYS_NICE
onrestart restart zygote
writepid /dev/stune/foreground/tasks
socket pdx/system/vr/display/client stream 0666 system graphics u:object_r:pdx_display_client_endpoint_socket:s0