Merge "Remove VrFlinger usages from SF."
diff --git a/cmds/dumpstate/DumpstateUtil.cpp b/cmds/dumpstate/DumpstateUtil.cpp
index eeaa5a3..c833d0e 100644
--- a/cmds/dumpstate/DumpstateUtil.cpp
+++ b/cmds/dumpstate/DumpstateUtil.cpp
@@ -124,6 +124,12 @@
return *this;
}
+CommandOptions::CommandOptionsBuilder&
+CommandOptions::CommandOptionsBuilder::CloseAllFileDescriptorsOnExec() {
+ values.close_all_fds_on_exec_ = true;
+ return *this;
+}
+
CommandOptions::CommandOptionsBuilder& CommandOptions::CommandOptionsBuilder::Log(
const std::string& message) {
values.logging_message_ = message;
@@ -137,6 +143,7 @@
CommandOptions::CommandOptionsValues::CommandOptionsValues(int64_t timeout_ms)
: timeout_ms_(timeout_ms),
always_(false),
+ close_all_fds_on_exec_(false),
account_mode_(DONT_DROP_ROOT),
output_mode_(NORMAL_OUTPUT),
logging_message_("") {
@@ -157,6 +164,10 @@
return values.always_;
}
+bool CommandOptions::ShouldCloseAllFileDescriptorsOnExec() const {
+ return values.close_all_fds_on_exec_;
+}
+
PrivilegeMode CommandOptions::PrivilegeMode() const {
return values.account_mode_;
}
@@ -277,7 +288,8 @@
MYLOGI(logging_message.c_str(), command_string.c_str());
}
- bool silent = (options.OutputMode() == REDIRECT_TO_STDERR);
+ bool silent = (options.OutputMode() == REDIRECT_TO_STDERR ||
+ options.ShouldCloseAllFileDescriptorsOnExec());
bool redirecting_to_fd = STDOUT_FILENO != fd;
if (PropertiesHelper::IsDryRun() && !options.Always()) {
@@ -314,7 +326,27 @@
return -1;
}
- if (silent) {
+ if (options.ShouldCloseAllFileDescriptorsOnExec()) {
+ int devnull_fd = TEMP_FAILURE_RETRY(open("/dev/null", O_RDONLY));
+ TEMP_FAILURE_RETRY(dup2(devnull_fd, STDIN_FILENO));
+ close(devnull_fd);
+ devnull_fd = TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY));
+ TEMP_FAILURE_RETRY(dup2(devnull_fd, STDOUT_FILENO));
+ TEMP_FAILURE_RETRY(dup2(devnull_fd, STDERR_FILENO));
+ close(devnull_fd);
+ // This is to avoid leaking FDs that, accidentally, have not been
+ // marked as O_CLOEXEC. Leaking FDs across exec can cause failures
+ // when execing a process that has a SELinux auto_trans rule.
+ // Here we assume that the dumpstate process didn't open more than
+ // 1000 FDs. In theory we could iterate through /proc/self/fd/, but
+ // doing that in a fork-safe way is too complex and not worth it
+ // (opendir()/readdir() do heap allocations and take locks).
+ for (int i = 0; i < 1000; i++) {
+ if (i != STDIN_FILENO && i!= STDOUT_FILENO && i != STDERR_FILENO) {
+ close(i);
+ }
+ }
+ } else if (silent) {
// Redirects stdout to stderr
TEMP_FAILURE_RETRY(dup2(STDERR_FILENO, STDOUT_FILENO));
} else if (redirecting_to_fd) {
diff --git a/cmds/dumpstate/DumpstateUtil.h b/cmds/dumpstate/DumpstateUtil.h
index b099443..b00c46e 100644
--- a/cmds/dumpstate/DumpstateUtil.h
+++ b/cmds/dumpstate/DumpstateUtil.h
@@ -80,6 +80,7 @@
int64_t timeout_ms_;
bool always_;
+ bool close_all_fds_on_exec_;
PrivilegeMode account_mode_;
OutputMode output_mode_;
std::string logging_message_;
@@ -112,6 +113,13 @@
CommandOptionsBuilder& DropRoot();
/* Sets the command's OutputMode as `REDIRECT_TO_STDERR` */
CommandOptionsBuilder& RedirectStderr();
+ /* Closes all file descriptors before exec-ing the target process. This
+ * includes also stdio pipes, which are dup-ed on /dev/null. It prevents
+ * leaking opened FDs to the target process, which in turn can hit
+ * selinux denials in presence of auto_trans rules.
+ */
+ CommandOptionsBuilder& CloseAllFileDescriptorsOnExec();
+
/* When not empty, logs a message before executing the command.
* Must contain a `%s`, which will be replaced by the full command line, and end on `\n`. */
CommandOptionsBuilder& Log(const std::string& message);
@@ -130,6 +138,8 @@
int64_t TimeoutInMs() const;
/* Checks whether the command should always be run, even on dry-run mode. */
bool Always() const;
+ /* Checks whether all FDs should be closed prior to the exec() calls. */
+ bool ShouldCloseAllFileDescriptorsOnExec() const;
/** Gets the PrivilegeMode of the command. */
PrivilegeMode PrivilegeMode() const;
/** Gets the OutputMode of the command. */
diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp
index 9c5b883..fbb0a18 100644
--- a/cmds/dumpstate/dumpstate.cpp
+++ b/cmds/dumpstate/dumpstate.cpp
@@ -28,6 +28,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/mount.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/resource.h>
@@ -174,6 +175,7 @@
#define SNAPSHOTCTL_LOG_DIR "/data/misc/snapshotctl_log"
#define LINKERCONFIG_DIR "/linkerconfig"
#define PACKAGE_DEX_USE_LIST "/data/system/package-dex-usage.list"
+#define SYSTEM_TRACE_SNAPSHOT "/data/misc/perfetto-traces/bugreport/systrace.pftrace"
// TODO(narayan): Since this information has to be kept in sync
// with tombstoned, we should just put it in a common header.
@@ -1052,6 +1054,24 @@
}
}
+static void MaybeAddSystemTraceToZip() {
+ // This function copies into the .zip the system trace that was snapshotted
+ // by the early call to MaybeSnapshotSystemTrace(), if any background
+ // tracing was happening.
+ if (!ds.IsZipping()) {
+ MYLOGD("Not dumping system trace because it's not a zipped bugreport\n");
+ return;
+ }
+ if (!ds.has_system_trace_) {
+ // No background trace was happening at the time dumpstate was invoked.
+ return;
+ }
+ ds.AddZipEntry(
+ ZIP_ROOT_DIR + SYSTEM_TRACE_SNAPSHOT,
+ SYSTEM_TRACE_SNAPSHOT);
+ android::os::UnlinkAndLogOnError(SYSTEM_TRACE_SNAPSHOT);
+}
+
static void DumpVisibleWindowViews() {
if (!ds.IsZipping()) {
MYLOGD("Not dumping visible views because it's not a zipped bugreport\n");
@@ -1548,7 +1568,7 @@
dprintf(out_fd, "========================================================\n");
RunDumpsys("APP PROVIDERS PLATFORM", {"activity", "provider", "all-platform"},
- DUMPSYS_COMPONENTS_OPTIONS, out_fd);
+ DUMPSYS_COMPONENTS_OPTIONS, 0, out_fd);
dprintf(out_fd, "========================================================\n");
dprintf(out_fd, "== Running Application Providers (non-platform)\n");
@@ -1648,6 +1668,8 @@
AddAnrTraceFiles();
+ MaybeAddSystemTraceToZip();
+
// NOTE: tombstones are always added as separate entries in the zip archive
// and are not interspersed with the main report.
const bool tombstones_dumped = AddDumps(ds.tombstone_data_.begin(), ds.tombstone_data_.end(),
@@ -2156,6 +2178,22 @@
return;
}
+ /*
+ * mount debugfs for non-user builds which launch with S and unmount it
+ * after invoking dumpstateBoard_* methods. This is to enable debug builds
+ * to not have debugfs mounted during runtime. It will also ensure that
+ * debugfs is only accessed by the dumpstate HAL.
+ */
+ auto api_level = android::base::GetIntProperty("ro.product.first_api_level", 0);
+ bool mount_debugfs = !PropertiesHelper::IsUserBuild() && api_level >= 31;
+
+ if (mount_debugfs) {
+ RunCommand("mount debugfs", {"mount", "-t", "debugfs", "debugfs", "/sys/kernel/debug"},
+ AS_ROOT_20);
+ RunCommand("chmod debugfs", {"chmod", "0755", "/sys/kernel/debug"},
+ AS_ROOT_20);
+ }
+
std::vector<std::string> paths;
std::vector<android::base::ScopeGuard<std::function<void()>>> remover;
for (int i = 0; i < NUM_OF_DUMPS; i++) {
@@ -2255,6 +2293,10 @@
"there might be racing in content\n", killing_timeout_sec);
}
+ if (mount_debugfs) {
+ RunCommand("unmount debugfs", {"umount", "/sys/kernel/debug"}, AS_ROOT_20);
+ }
+
auto file_sizes = std::make_unique<ssize_t[]>(paths.size());
for (size_t i = 0; i < paths.size(); i++) {
struct stat s;
@@ -2865,6 +2907,13 @@
RunDumpsysCritical();
}
MaybeTakeEarlyScreenshot();
+
+ if (!is_dumpstate_restricted) {
+ // Snapshot the system trace now (if running) to avoid that dumpstate's
+ // own activity pushes out interesting data from the trace ring buffer.
+ // The trace file is added to the zip by MaybeAddSystemTraceToZip().
+ MaybeSnapshotSystemTrace();
+ }
onUiIntensiveBugreportDumpsFinished(calling_uid);
MaybeCheckUserConsent(calling_uid, calling_package);
if (options_->telephony_only) {
@@ -2955,6 +3004,26 @@
TakeScreenshot();
}
+void Dumpstate::MaybeSnapshotSystemTrace() {
+ // If a background system trace is happening and is marked as "suitable for
+ // bugreport" (i.e. bugreport_score > 0 in the trace config), this command
+ // will stop it and serialize into SYSTEM_TRACE_SNAPSHOT. In the (likely)
+ // case that no trace is ongoing, this command is a no-op.
+ // Note: this should not be enqueued as we need to freeze the trace before
+ // dumpstate starts. Otherwise the trace ring buffers will contain mostly
+ // the dumpstate's own activity which is irrelevant.
+ int res = RunCommand(
+ "SERIALIZE PERFETTO TRACE",
+ {"perfetto", "--save-for-bugreport"},
+ CommandOptions::WithTimeout(10)
+ .DropRoot()
+ .CloseAllFileDescriptorsOnExec()
+ .Build());
+ has_system_trace_ = res == 0;
+ // MaybeAddSystemTraceToZip() will take care of copying the trace in the zip
+ // file in the later stages.
+}
+
void Dumpstate::onUiIntensiveBugreportDumpsFinished(int32_t calling_uid) {
if (calling_uid == AID_SHELL || !CalledByApi()) {
return;
diff --git a/cmds/dumpstate/dumpstate.h b/cmds/dumpstate/dumpstate.h
index 255243f..f83968b 100644
--- a/cmds/dumpstate/dumpstate.h
+++ b/cmds/dumpstate/dumpstate.h
@@ -458,6 +458,11 @@
// Whether it should take an screenshot earlier in the process.
bool do_early_screenshot_ = false;
+ // This is set to true when the trace snapshot request in the early call to
+ // MaybeSnapshotSystemTrace(). When this is true, the later stages of
+ // dumpstate will append the trace to the zip archive.
+ bool has_system_trace_ = false;
+
std::unique_ptr<Progress> progress_;
// When set, defines a socket file-descriptor use to report progress to bugreportz
@@ -543,6 +548,7 @@
RunStatus DumpstateDefaultAfterCritical();
void MaybeTakeEarlyScreenshot();
+ void MaybeSnapshotSystemTrace();
void onUiIntensiveBugreportDumpsFinished(int32_t calling_uid);
diff --git a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
index fe6a34a..0e366cb 100644
--- a/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
+++ b/cmds/dumpstate/tests/dumpstate_smoke_test.cpp
@@ -319,6 +319,16 @@
*/
class BugreportSectionTest : public Test {
public:
+ ZipArchiveHandle handle;
+
+ void SetUp() {
+ ASSERT_EQ(OpenArchive(ZippedBugreportGenerationTest::getZipFilePath().c_str(), &handle), 0);
+ }
+
+ void TearDown() {
+ CloseArchive(handle);
+ }
+
static void SetUpTestCase() {
ParseSections(ZippedBugreportGenerationTest::getZipFilePath().c_str(),
ZippedBugreportGenerationTest::sections.get());
@@ -343,6 +353,19 @@
}
FAIL() << sectionName << " not found.";
}
+
+ /**
+ * Whether or not the content of the section is injected by other commands.
+ */
+ bool IsContentInjectedByOthers(const std::string& line) {
+ // Command header such as `------ APP ACTIVITIES (/system/bin/dumpsys activity -v) ------`.
+ static const std::regex kCommandHeader = std::regex{"------ .+ \\(.+\\) ------"};
+ std::smatch match;
+ if (std::regex_match(line, match, kCommandHeader)) {
+ return true;
+ }
+ return false;
+ }
};
TEST_F(BugreportSectionTest, Atleast3CriticalDumpsysSectionsGenerated) {
@@ -400,6 +423,28 @@
SectionExists("wifi", /* bytes= */ 100000);
}
+TEST_F(BugreportSectionTest, NoInjectedContentByOtherCommand) {
+ // Extract the main entry to a temp file
+ TemporaryFile tmp_binary;
+ ASSERT_NE(-1, tmp_binary.fd);
+ ExtractBugreport(&handle, tmp_binary.fd);
+
+ // Read line by line and identify sections
+ std::ifstream ifs(tmp_binary.path, std::ifstream::in);
+ std::string line;
+ std::string current_section_name;
+ while (std::getline(ifs, line)) {
+ std::string section_name;
+ if (IsSectionStart(line, §ion_name)) {
+ current_section_name = section_name;
+ } else if (IsSectionEnd(line)) {
+ current_section_name = "";
+ } else if (!current_section_name.empty()) {
+ EXPECT_FALSE(IsContentInjectedByOthers(line));
+ }
+ }
+}
+
class DumpstateBinderTest : public Test {
protected:
void SetUp() override {
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index 08b984e..e754d74 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -181,15 +181,26 @@
"--header-filter=^.*frameworks/native/libs/binder/.*.h$",
],
tidy_checks_as_errors: [
- "*",
+ // Explicitly list the checks that should not occur in this module.
+ "abseil-*",
+ "android-*",
+ "bugprone-*",
+ "cert-*",
+ "clang-analyzer-*",
"-clang-analyzer-core.CallAndMessage",
"-clang-analyzer-core.uninitialized.Assign",
- "-clang-analyzer-unix.Malloc,",
+ "-clang-analyzer-unix.Malloc",
"-clang-analyzer-deadcode.DeadStores",
"-clang-analyzer-optin.cplusplus.UninitializedObject",
+ "google-*",
+ "-google-readability-*",
+ "-google-runtime-references",
+ "misc-*",
"-misc-no-recursion",
"-misc-redundant-expression",
"-misc-unused-using-decls",
+ "performance*",
+ "portability*",
],
}
diff --git a/libs/binder/LazyServiceRegistrar.cpp b/libs/binder/LazyServiceRegistrar.cpp
index 325e204..ff1fbec 100644
--- a/libs/binder/LazyServiceRegistrar.cpp
+++ b/libs/binder/LazyServiceRegistrar.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "log/log_main.h"
#define LOG_TAG "AidlLazyServiceRegistrar"
#include <binder/LazyServiceRegistrar.h>
@@ -41,6 +42,13 @@
*/
void forcePersist(bool persist);
+ void setActiveServicesCountCallback(const std::function<bool(int)>&
+ activeServicesCountCallback);
+
+ bool tryUnregister();
+
+ void reRegister();
+
protected:
Status onClients(const sp<IBinder>& service, bool clients) override;
@@ -53,6 +61,7 @@
// whether, based on onClients calls, we know we have a client for this
// service or not
bool clients = false;
+ bool registered = true;
};
/**
@@ -66,6 +75,14 @@
*/
void tryShutdown();
+ /**
+ * Try to shutdown the process, unless:
+ * - 'forcePersist' is 'true', or
+ * - The active services count callback returns 'true', or
+ * - Some services have clients.
+ */
+ void maybeTryShutdown();
+
// count of services with clients
size_t mNumConnectedServices;
@@ -73,6 +90,9 @@
std::map<std::string, Service> mRegisteredServices;
bool mForcePersist;
+
+ // Callback used to report the number of services with clients
+ std::function<bool(int)> mActiveServicesCountCallback;
};
bool ClientCounterCallback::registerService(const sp<IBinder>& service, const std::string& name,
@@ -119,8 +139,60 @@
void ClientCounterCallback::forcePersist(bool persist) {
mForcePersist = persist;
- if(!mForcePersist) {
+ if (!mForcePersist && mNumConnectedServices == 0) {
// Attempt a shutdown in case the number of clients hit 0 while the flag was on
+ maybeTryShutdown();
+ }
+}
+
+bool ClientCounterCallback::tryUnregister() {
+ auto manager = interface_cast<AidlServiceManager>(asBinder(defaultServiceManager()));
+
+ for (auto& [name, entry] : mRegisteredServices) {
+ bool success = manager->tryUnregisterService(name, entry.service).isOk();
+
+ if (!success) {
+ ALOGI("Failed to unregister service %s", name.c_str());
+ return false;
+ }
+ entry.registered = false;
+ }
+
+ return true;
+}
+
+void ClientCounterCallback::reRegister() {
+ for (auto& [name, entry] : mRegisteredServices) {
+ // re-register entry if not already registered
+ if (entry.registered) {
+ continue;
+ }
+
+ if (!registerService(entry.service, name, entry.allowIsolated,
+ entry.dumpFlags)) {
+ // Must restart. Otherwise, clients will never be able to get a hold of this service.
+ LOG_ALWAYS_FATAL("Bad state: could not re-register services");
+ }
+
+ entry.registered = true;
+ }
+}
+
+void ClientCounterCallback::maybeTryShutdown() {
+ if (mForcePersist) {
+ ALOGI("Shutdown prevented by forcePersist override flag.");
+ return;
+ }
+
+ bool handledInCallback = false;
+ if (mActiveServicesCountCallback != nullptr) {
+ handledInCallback = mActiveServicesCountCallback(mNumConnectedServices);
+ }
+
+ // If there is no callback defined or the callback did not handle this
+ // client count change event, try to shutdown the process if its services
+ // have no clients.
+ if (!handledInCallback && mNumConnectedServices == 0) {
tryShutdown();
}
}
@@ -150,54 +222,25 @@
ALOGI("Process has %zu (of %zu available) client(s) in use after notification %s has clients: %d",
mNumConnectedServices, mRegisteredServices.size(), name.c_str(), clients);
- tryShutdown();
+ maybeTryShutdown();
+
return Status::ok();
}
void ClientCounterCallback::tryShutdown() {
- if(mNumConnectedServices > 0) {
- // Should only shut down if there are no clients
- return;
- }
-
- if(mForcePersist) {
- ALOGI("Shutdown prevented by forcePersist override flag.");
- return;
- }
-
ALOGI("Trying to shut down the service. No clients in use for any service in process.");
- auto manager = interface_cast<AidlServiceManager>(asBinder(defaultServiceManager()));
-
- auto unRegisterIt = mRegisteredServices.begin();
- for (; unRegisterIt != mRegisteredServices.end(); ++unRegisterIt) {
- auto& entry = (*unRegisterIt);
-
- bool success = manager->tryUnregisterService(entry.first, entry.second.service).isOk();
-
-
- if (!success) {
- ALOGI("Failed to unregister service %s", entry.first.c_str());
- break;
- }
- }
-
- if (unRegisterIt == mRegisteredServices.end()) {
+ if (tryUnregister()) {
ALOGI("Unregistered all clients and exiting");
exit(EXIT_SUCCESS);
}
- for (auto reRegisterIt = mRegisteredServices.begin(); reRegisterIt != unRegisterIt;
- reRegisterIt++) {
- auto& entry = (*reRegisterIt);
+ reRegister();
+}
- // re-register entry
- if (!registerService(entry.second.service, entry.first, entry.second.allowIsolated,
- entry.second.dumpFlags)) {
- // Must restart. Otherwise, clients will never be able to get a hold of this service.
- ALOGE("Bad state: could not re-register services");
- }
- }
+void ClientCounterCallback::setActiveServicesCountCallback(const std::function<bool(int)>&
+ activeServicesCountCallback) {
+ mActiveServicesCountCallback = activeServicesCountCallback;
}
} // namespace internal
@@ -223,5 +266,18 @@
mClientCC->forcePersist(persist);
}
+void LazyServiceRegistrar::setActiveServicesCountCallback(const std::function<bool(int)>&
+ activeServicesCountCallback) {
+ mClientCC->setActiveServicesCountCallback(activeServicesCountCallback);
+}
+
+bool LazyServiceRegistrar::tryUnregister() {
+ return mClientCC->tryUnregister();
+}
+
+void LazyServiceRegistrar::reRegister() {
+ mClientCC->reRegister();
+}
+
} // namespace hardware
} // namespace android
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index 9bba369..8087443 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -1477,6 +1477,31 @@
goto data_sorted;
}
+status_t Parcel::readVectorSizeWithCoarseBoundCheck(int32_t *size) const {
+ int32_t requestedSize;
+ const status_t status = readInt32(&requestedSize);
+ if (status != NO_ERROR) return status;
+
+ // We permit negative sizes, which indicate presence of a nullable vector,
+ // i.e. a vector embedded in std::optional, std::unique_ptr, or std::shared_ptr.
+ if (requestedSize > 0) {
+ // Check if there are fewer bytes than vector elements.
+ // A lower bound is 1 byte per element, satisfied by some enum and int8_t and uint8_t.
+ const size_t availableBytes = dataAvail();
+ if (static_cast<size_t>(requestedSize) > availableBytes) {
+ // We have a size that is greater than the number of bytes available.
+ // On bounds failure we do not 'rewind' position by 4 bytes of the size already read.
+ ALOGW("%s: rejecting out of bounds vector size (requestedSize):%d "
+ "Parcel{dataAvail:%zu mDataSize:%zu mDataPos:%zu mDataCapacity:%zu}",
+ __func__, requestedSize, availableBytes, mDataSize, mDataPos, mDataCapacity);
+ return BAD_VALUE;
+ }
+ }
+
+ *size = requestedSize;
+ return NO_ERROR;
+}
+
status_t Parcel::read(void* outData, size_t len) const
{
if (len > INT32_MAX) {
@@ -1699,7 +1724,7 @@
status_t Parcel::readBoolVector(std::optional<std::vector<bool>>* val) const {
const int32_t start = dataPosition();
int32_t size;
- status_t status = readInt32(&size);
+ status_t status = readVectorSizeWithCoarseBoundCheck(&size);
val->reset();
if (status != OK || size < 0) {
@@ -1721,7 +1746,7 @@
status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
const int32_t start = dataPosition();
int32_t size;
- status_t status = readInt32(&size);
+ status_t status = readVectorSizeWithCoarseBoundCheck(&size);
val->reset();
if (status != OK || size < 0) {
@@ -1742,7 +1767,7 @@
status_t Parcel::readBoolVector(std::vector<bool>* val) const {
int32_t size;
- status_t status = readInt32(&size);
+ status_t status = readVectorSizeWithCoarseBoundCheck(&size);
if (status != OK) {
return status;
diff --git a/libs/binder/ParcelableHolder.cpp b/libs/binder/ParcelableHolder.cpp
index b2b8671..2e86b74 100644
--- a/libs/binder/ParcelableHolder.cpp
+++ b/libs/binder/ParcelableHolder.cpp
@@ -37,7 +37,7 @@
size_t sizePos = p->dataPosition();
RETURN_ON_FAILURE(p->writeInt32(0));
size_t dataStartPos = p->dataPosition();
- RETURN_ON_FAILURE(p->writeUtf8AsUtf16(this->mParcelableName));
+ RETURN_ON_FAILURE(p->writeString16(this->mParcelableName));
this->mParcelable->writeToParcel(p);
size_t dataSize = p->dataPosition() - dataStartPos;
diff --git a/libs/binder/TEST_MAPPING b/libs/binder/TEST_MAPPING
index 97e282e..00a14f4 100644
--- a/libs/binder/TEST_MAPPING
+++ b/libs/binder/TEST_MAPPING
@@ -10,6 +10,9 @@
"name": "binderAllocationLimits"
},
{
+ "name": "binderClearBufTest"
+ },
+ {
"name": "binderDriverInterfaceTest"
},
{
diff --git a/libs/binder/include/binder/LazyServiceRegistrar.h b/libs/binder/include/binder/LazyServiceRegistrar.h
index d18c88e..73893c7 100644
--- a/libs/binder/include/binder/LazyServiceRegistrar.h
+++ b/libs/binder/include/binder/LazyServiceRegistrar.h
@@ -16,6 +16,8 @@
#pragma once
+#include <functional>
+
#include <binder/IServiceManager.h>
#include <binder/Status.h>
#include <utils/StrongPointer.h>
@@ -53,6 +55,40 @@
*/
void forcePersist(bool persist);
+ /**
+ * Set a callback that is executed when the total number of services with
+ * clients changes.
+ * The callback takes an argument, which is the number of registered
+ * lazy services for this process which have clients.
+ *
+ * Callback return value:
+ * - false: Default behavior for lazy services (shut down the process if there
+ * are no clients).
+ * - true: Don't shut down the process even if there are no clients.
+ *
+ * This callback gives a chance to:
+ * 1 - Perform some additional operations before exiting;
+ * 2 - Prevent the process from exiting by returning "true" from the
+ * callback.
+ *
+ * This method should be called before 'registerService' to avoid races.
+ */
+ void setActiveServicesCountCallback(const std::function<bool(int)>&
+ activeServicesCountCallback);
+
+ /**
+ * Try to unregister all services previously registered with 'registerService'.
+ * Returns 'true' if successful.
+ */
+ bool tryUnregister();
+
+ /**
+ * Re-register services that were unregistered by 'tryUnregister'.
+ * This method should be called in the case 'tryUnregister' fails
+ * (and should be called on the same thread).
+ */
+ void reRegister();
+
private:
std::shared_ptr<internal::ClientCounterCallback> mClientCC;
LazyServiceRegistrar();
diff --git a/libs/binder/include/binder/Parcel.h b/libs/binder/include/binder/Parcel.h
index b49951b..54c49e4 100644
--- a/libs/binder/include/binder/Parcel.h
+++ b/libs/binder/include/binder/Parcel.h
@@ -517,6 +517,11 @@
void initState();
void scanForFds() const;
status_t validateReadData(size_t len) const;
+
+ // Reads an int32 size and does a coarse bounds check against the number
+ // of available bytes in the Parcel.
+ status_t readVectorSizeWithCoarseBoundCheck(int32_t *size) const;
+
void updateWorkSourceRequestHeaderPosition() const;
status_t finishFlattenBinder(const sp<IBinder>& binder);
@@ -787,6 +792,7 @@
template<typename T>
status_t Parcel::resizeOutVector(std::vector<T>* val) const {
int32_t size;
+ // used for allocating 'out' vector args, do not use readVectorSizeWithCoarseBoundCheck() here
status_t err = readInt32(&size);
if (err != NO_ERROR) {
return err;
@@ -802,6 +808,7 @@
template<typename T>
status_t Parcel::resizeOutVector(std::optional<std::vector<T>>* val) const {
int32_t size;
+ // used for allocating 'out' vector args, do not use readVectorSizeWithCoarseBoundCheck() here
status_t err = readInt32(&size);
if (err != NO_ERROR) {
return err;
@@ -818,6 +825,7 @@
template<typename T>
status_t Parcel::resizeOutVector(std::unique_ptr<std::vector<T>>* val) const {
int32_t size;
+ // used for allocating 'out' vector args, do not use readVectorSizeWithCoarseBoundCheck() here
status_t err = readInt32(&size);
if (err != NO_ERROR) {
return err;
@@ -834,7 +842,7 @@
template<typename T>
status_t Parcel::reserveOutVector(std::vector<T>* val, size_t* size) const {
int32_t read_size;
- status_t err = readInt32(&read_size);
+ status_t err = readVectorSizeWithCoarseBoundCheck(&read_size);
if (err != NO_ERROR) {
return err;
}
@@ -850,7 +858,7 @@
template<typename T>
status_t Parcel::reserveOutVector(std::optional<std::vector<T>>* val, size_t* size) const {
int32_t read_size;
- status_t err = readInt32(&read_size);
+ status_t err = readVectorSizeWithCoarseBoundCheck(&read_size);
if (err != NO_ERROR) {
return err;
}
@@ -870,7 +878,7 @@
status_t Parcel::reserveOutVector(std::unique_ptr<std::vector<T>>* val,
size_t* size) const {
int32_t read_size;
- status_t err = readInt32(&read_size);
+ status_t err = readVectorSizeWithCoarseBoundCheck(&read_size);
if (err != NO_ERROR) {
return err;
}
@@ -923,7 +931,7 @@
std::vector<T>* val,
status_t(Parcel::*read_func)(U*) const) const {
int32_t size;
- status_t status = this->readInt32(&size);
+ status_t status = this->readVectorSizeWithCoarseBoundCheck(&size);
if (status != OK) {
return status;
@@ -965,7 +973,7 @@
status_t(Parcel::*read_func)(T*) const) const {
const size_t start = dataPosition();
int32_t size;
- status_t status = readInt32(&size);
+ status_t status = readVectorSizeWithCoarseBoundCheck(&size);
val->reset();
if (status != OK || size < 0) {
@@ -989,7 +997,7 @@
status_t(Parcel::*read_func)(T*) const) const {
const size_t start = dataPosition();
int32_t size;
- status_t status = readInt32(&size);
+ status_t status = readVectorSizeWithCoarseBoundCheck(&size);
val->reset();
if (status != OK || size < 0) {
@@ -1093,7 +1101,7 @@
status_t Parcel::readParcelableVector(std::optional<std::vector<std::optional<T>>>* val) const {
const size_t start = dataPosition();
int32_t size;
- status_t status = readInt32(&size);
+ status_t status = readVectorSizeWithCoarseBoundCheck(&size);
val->reset();
if (status != OK || size < 0) {
@@ -1117,7 +1125,7 @@
status_t Parcel::readParcelableVector(std::unique_ptr<std::vector<std::unique_ptr<T>>>* val) const {
const size_t start = dataPosition();
int32_t size;
- status_t status = readInt32(&size);
+ status_t status = readVectorSizeWithCoarseBoundCheck(&size);
val->reset();
if (status != OK || size < 0) {
diff --git a/libs/binder/include/binder/ParcelableHolder.h b/libs/binder/include/binder/ParcelableHolder.h
index ff0a686..9e4475c 100644
--- a/libs/binder/include/binder/ParcelableHolder.h
+++ b/libs/binder/include/binder/ParcelableHolder.h
@@ -18,6 +18,7 @@
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
+#include <utils/String16.h>
#include <mutex>
#include <optional>
#include <tuple>
@@ -72,7 +73,7 @@
template <typename T>
status_t getParcelable(std::shared_ptr<T>* ret) const {
static_assert(std::is_base_of<Parcelable, T>::value, "T must be derived from Parcelable");
- const std::string& parcelableDesc = T::getParcelableDescriptor();
+ const String16& parcelableDesc = T::getParcelableDescriptor();
if (!this->mParcelPtr) {
if (!this->mParcelable || !this->mParcelableName) {
ALOGD("empty ParcelableHolder");
@@ -80,7 +81,7 @@
return android::OK;
} else if (parcelableDesc != *mParcelableName) {
ALOGD("extension class name mismatch expected:%s actual:%s",
- mParcelableName->c_str(), parcelableDesc.c_str());
+ String8(*mParcelableName).c_str(), String8(parcelableDesc).c_str());
*ret = nullptr;
return android::BAD_VALUE;
}
@@ -88,7 +89,7 @@
return android::OK;
}
this->mParcelPtr->setDataPosition(0);
- status_t status = this->mParcelPtr->readUtf8FromUtf16(&this->mParcelableName);
+ status_t status = this->mParcelPtr->readString16(&this->mParcelableName);
if (status != android::OK || parcelableDesc != this->mParcelableName) {
this->mParcelableName = std::nullopt;
*ret = nullptr;
@@ -130,7 +131,7 @@
private:
mutable std::shared_ptr<Parcelable> mParcelable;
- mutable std::optional<std::string> mParcelableName;
+ mutable std::optional<String16> mParcelableName;
mutable std::unique_ptr<Parcel> mParcelPtr;
Stability mStability;
};
diff --git a/libs/binder/ndk/Android.bp b/libs/binder/ndk/Android.bp
index bdb74dc..82f3882 100644
--- a/libs/binder/ndk/Android.bp
+++ b/libs/binder/ndk/Android.bp
@@ -104,15 +104,28 @@
"--header-filter=^.*frameworks/native/libs/binder/.*.h$",
],
tidy_checks_as_errors: [
- "*",
+ // Explicitly list the checks that should not occur in this module.
+ "abseil-*",
+ "android-*",
+ "bugprone-*",
+ "cert-*",
+ "clang-analyzer-*",
"-clang-analyzer-core.CallAndMessage",
"-clang-analyzer-core.uninitialized.Assign",
- "-clang-analyzer-unix.Malloc,",
+ "-clang-analyzer-unix.Malloc",
"-clang-analyzer-deadcode.DeadStores",
"-clang-analyzer-optin.cplusplus.UninitializedObject",
+ "google-*",
+ "-google-readability-*",
+ "-google-runtime-references",
+ "misc-*",
"-misc-no-recursion",
+ "-misc-non-private-member-variables-in-classes",
"-misc-redundant-expression",
+ "-misc-unused-parameters",
"-misc-unused-using-decls",
+ "performance*",
+ "portability*",
],
}
diff --git a/libs/binder/ndk/ibinder.cpp b/libs/binder/ndk/ibinder.cpp
index 350c658..454fbd0 100644
--- a/libs/binder/ndk/ibinder.cpp
+++ b/libs/binder/ndk/ibinder.cpp
@@ -301,6 +301,26 @@
return binder.get();
}
+AIBinder_Weak* AIBinder_Weak_clone(const AIBinder_Weak* weak) {
+ if (weak == nullptr) {
+ return nullptr;
+ }
+
+ return new AIBinder_Weak{weak->binder};
+}
+
+bool AIBinder_lt(const AIBinder* lhs, const AIBinder* rhs) {
+ if (lhs == nullptr || rhs == nullptr) return lhs < rhs;
+
+ return const_cast<AIBinder*>(lhs)->getBinder() < const_cast<AIBinder*>(rhs)->getBinder();
+}
+
+bool AIBinder_Weak_lt(const AIBinder_Weak* lhs, const AIBinder_Weak* rhs) {
+ if (lhs == nullptr || rhs == nullptr) return lhs < rhs;
+
+ return lhs->binder < rhs->binder;
+}
+
AIBinder_Class::AIBinder_Class(const char* interfaceDescriptor, AIBinder_Class_onCreate onCreate,
AIBinder_Class_onDestroy onDestroy,
AIBinder_Class_onTransact onTransact)
diff --git a/libs/binder/ndk/include_cpp/android/binder_interface_utils.h b/libs/binder/ndk/include_cpp/android/binder_interface_utils.h
index a4f4441..a1102e2 100644
--- a/libs/binder/ndk/include_cpp/android/binder_interface_utils.h
+++ b/libs/binder/ndk/include_cpp/android/binder_interface_utils.h
@@ -83,7 +83,8 @@
template <class T, class... Args>
static std::shared_ptr<T> make(Args&&... args) {
T* t = new T(std::forward<Args>(args)...);
- return t->template ref<T>();
+ // warning: Potential leak of memory pointed to by 't' [clang-analyzer-unix.Malloc]
+ return t->template ref<T>(); // NOLINT(clang-analyzer-unix.Malloc)
}
static void operator delete(void* p) { std::free(p); }
diff --git a/libs/binder/ndk/include_ndk/android/binder_ibinder.h b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
index 5e1ed46..0ca3a07 100644
--- a/libs/binder/ndk/include_ndk/android/binder_ibinder.h
+++ b/libs/binder/ndk/include_ndk/android/binder_ibinder.h
@@ -657,6 +657,68 @@
*/
const char* AIBinder_Class_getDescriptor(const AIBinder_Class* clazz) __INTRODUCED_IN(31);
+/**
+ * Whether AIBinder is less than another.
+ *
+ * This provides a per-process-unique total ordering of binders determined by
+ * an underlying allocation address where a null AIBinder* is considered to be
+ * ordered before all other binders.
+ *
+ * AIBinder* pointers themselves actually also create a per-process-unique total
+ * ordering. However, this ordering is inconsistent with AIBinder_Weak_lt for
+ * remote binders.
+ *
+ * Available since API level 31.
+ *
+ * \param lhs comparison object
+ * \param rhs comparison object
+ *
+ * \return whether "lhs < rhs" is true
+ */
+bool AIBinder_lt(const AIBinder* lhs, const AIBinder* rhs);
+
+/**
+ * Clone an AIBinder_Weak. Useful because even if a weak binder promotes to a
+ * null value, after further binder transactions, it may no longer promote to a
+ * null value.
+ *
+ * Available since API level 31.
+ *
+ * \param weak Object to clone
+ *
+ * \return clone of the input parameter. This must be deleted with
+ * AIBinder_Weak_delete. Null if weak input parameter is also null.
+ */
+AIBinder_Weak* AIBinder_Weak_clone(const AIBinder_Weak* weak);
+
+/**
+ * Whether AIBinder_Weak is less than another.
+ *
+ * This provides a per-process-unique total ordering of binders which is exactly
+ * the same as AIBinder_lt. Similarly, a null AIBinder_Weak* is considered to be
+ * ordered before all other weak references.
+ *
+ * If you have many AIBinder_Weak* objects which are all references to distinct
+ * binder objects which happen to have the same underlying address (as ordered
+ * by AIBinder_lt), these AIBinder_Weak* objects will retain the same order with
+ * respect to all other AIBinder_Weak* pointers with different underlying
+ * addresses and are also guaranteed to have a per-process-unique ordering. That
+ * is, even though multiple AIBinder* instances may happen to be allocated at
+ * the same underlying address, this function will still correctly distinguish
+ * that these are weak pointers to different binder objects.
+ *
+ * Unlike AIBinder*, the AIBinder_Weak* addresses themselves have nothing to do
+ * with the underlying binder.
+ *
+ * Available since API level 31.
+ *
+ * \param lhs comparison object
+ * \param rhs comparison object
+ *
+ * \return whether "lhs < rhs" is true
+ */
+bool AIBinder_Weak_lt(const AIBinder_Weak* lhs, const AIBinder_Weak* rhs);
+
#endif //__ANDROID_API__ >= 31
__END_DECLS
diff --git a/libs/binder/ndk/libbinder_ndk.map.txt b/libs/binder/ndk/libbinder_ndk.map.txt
index e233ffd..9a93bf3 100644
--- a/libs/binder/ndk/libbinder_ndk.map.txt
+++ b/libs/binder/ndk/libbinder_ndk.map.txt
@@ -122,6 +122,9 @@
AServiceManager_waitForService; # apex llndk
AIBinder_Class_getDescriptor;
+ AIBinder_Weak_clone;
+ AIBinder_Weak_lt;
+ AIBinder_lt;
AParcel_appendFrom;
AParcel_create;
AParcel_getDataSize;
diff --git a/libs/binder/tests/Android.bp b/libs/binder/tests/Android.bp
index 87f1d45..988f7f3 100644
--- a/libs/binder/tests/Android.bp
+++ b/libs/binder/tests/Android.bp
@@ -157,6 +157,24 @@
require_root: true,
}
+cc_test {
+ name: "binderClearBufTest",
+ defaults: ["binder_test_defaults"],
+ srcs: [
+ "binderClearBufTest.cpp",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libbinder",
+ "liblog",
+ "libutils",
+ ],
+
+ test_suites: ["general-tests"],
+ require_root: true,
+}
+
aidl_interface {
name: "binderStabilityTestIface",
unstable: true,
diff --git a/libs/binder/tests/binderClearBufTest.cpp b/libs/binder/tests/binderClearBufTest.cpp
new file mode 100644
index 0000000..a565e72
--- /dev/null
+++ b/libs/binder/tests/binderClearBufTest.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+#include <binder/Binder.h>
+#include <binder/IBinder.h>
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/Parcel.h>
+#include <binder/Stability.h>
+#include <gtest/gtest.h>
+
+#include <sys/prctl.h>
+#include <thread>
+
+using namespace android;
+
+const String16 kServerName = String16("binderClearBuf");
+
+std::string hexString(const void* bytes, size_t len) {
+ if (bytes == nullptr) return "<null>";
+
+ const uint8_t* bytes8 = static_cast<const uint8_t*>(bytes);
+ char chars[] = "0123456789abcdef";
+ std::string result;
+ result.resize(len * 2);
+
+ for (size_t i = 0; i < len; i++) {
+ result[2 * i] = chars[bytes8[i] >> 4];
+ result[2 * i + 1] = chars[bytes8[i] & 0xf];
+ }
+
+ return result;
+}
+
+class FooBar : public BBinder {
+ public:
+ enum {
+ TRANSACTION_REPEAT_STRING = IBinder::FIRST_CALL_TRANSACTION,
+ };
+
+ std::mutex foo;
+ std::string last;
+
+ status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+ // not checking data, since there is no hook at the time this test is
+ // written to check values there are set to zero. Instead, we only check
+ // the reply parcel.
+
+ switch (code) {
+ case TRANSACTION_REPEAT_STRING: {
+ const char* str = data.readCString();
+ return reply->writeCString(str == nullptr ? "<null>" : str);
+ }
+ }
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+ static std::string RepeatString(const sp<IBinder> binder,
+ const std::string& repeat,
+ std::string* outBuffer) {
+ Parcel data;
+ data.writeCString(repeat.c_str());
+ std::string result;
+ const uint8_t* lastReply;
+ size_t lastReplySize;
+ {
+ Parcel reply;
+ binder->transact(TRANSACTION_REPEAT_STRING, data, &reply, FLAG_CLEAR_BUF);
+ result = reply.readCString();
+ lastReply = reply.data();
+ lastReplySize = reply.dataSize();
+ }
+ IPCThreadState::self()->flushCommands();
+ *outBuffer = hexString(lastReply, lastReplySize);
+ return result;
+ }
+};
+
+TEST(BinderClearBuf, ClearKernelBuffer) {
+ sp<IBinder> binder = defaultServiceManager()->getService(kServerName);
+ ASSERT_NE(nullptr, binder);
+
+ std::string replyBuffer;
+ std::string result = FooBar::RepeatString(binder, "foo", &replyBuffer);
+ EXPECT_EQ("foo", result);
+
+ // the buffer must have at least some length for the string, but we will
+ // just check it has some length, to avoid assuming anything about the
+ // format
+ EXPECT_GT(replyBuffer.size(), 0);
+
+ for (size_t i = 0; i < replyBuffer.size(); i++) {
+ EXPECT_EQ(replyBuffer[i], '0') << "reply buffer at " << i;
+ }
+}
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+
+ if (fork() == 0) {
+ prctl(PR_SET_PDEATHSIG, SIGHUP);
+
+ sp<IBinder> server = new FooBar;
+ android::defaultServiceManager()->addService(kServerName, server);
+
+ IPCThreadState::self()->joinThreadPool(true);
+ exit(1); // should not reach
+ }
+
+ // This is not racey. Just giving these services some time to register before we call
+ // getService which sleeps for much longer. One alternative would be to
+ // start a threadpool + use waitForService, but we want to have as few
+ // binder things going on in this test as possible, since we are checking
+ // memory is zero'd which the kernel has a right to change.
+ usleep(100000);
+
+ return RUN_ALL_TESTS();
+}
diff --git a/libs/binder/tests/fuzzers/Android.bp b/libs/binder/tests/fuzzers/Android.bp
index c465bed..5531296 100644
--- a/libs/binder/tests/fuzzers/Android.bp
+++ b/libs/binder/tests/fuzzers/Android.bp
@@ -69,3 +69,18 @@
defaults: ["binder_fuzz_defaults"],
srcs: ["TextOutputFuzz.cpp"],
}
+
+cc_fuzz {
+ name: "binder_bufferedTextOutputFuzz",
+ include_dirs: [
+ "frameworks/native/libs/binder",
+ ],
+ defaults: ["binder_fuzz_defaults"],
+ srcs: ["BufferedTextOutputFuzz.cpp"],
+}
+
+cc_fuzz {
+ name: "binder_memoryDealerFuzz",
+ defaults: ["binder_fuzz_defaults"],
+ srcs: ["MemoryDealerFuzz.cpp"],
+}
diff --git a/libs/binder/tests/fuzzers/BufferedTextOutputFuzz.cpp b/libs/binder/tests/fuzzers/BufferedTextOutputFuzz.cpp
new file mode 100644
index 0000000..09cb216
--- /dev/null
+++ b/libs/binder/tests/fuzzers/BufferedTextOutputFuzz.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <commonFuzzHelpers.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <string>
+#include <vector>
+#include "BufferedTextOutput.h"
+
+namespace android {
+
+class FuzzBufferedTextOutput : public BufferedTextOutput {
+public:
+ FuzzBufferedTextOutput(uint32_t flags) : BufferedTextOutput(flags) {}
+ virtual status_t writeLines(const struct iovec& buf, size_t) {
+ size_t len = buf.iov_len;
+ void* tmp_buf = malloc(len);
+
+ if (tmp_buf == NULL) {
+ return status_t();
+ }
+
+ // This will attempt to read data from iov_base to ensure valid params were passed.
+ memcpy(tmp_buf, buf.iov_base, len);
+ free(tmp_buf);
+ return status_t();
+ }
+};
+
+// Fuzzer entry point.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ FuzzedDataProvider fdp(data, size);
+ uint32_t flags = fdp.ConsumeIntegral<uint32_t>();
+ size_t push_count = 0;
+ std::shared_ptr<BufferedTextOutput> bTextOutput(new FuzzBufferedTextOutput(flags));
+
+ while (fdp.remaining_bytes() > 0) {
+ fdp.PickValueInArray<std::function<void()>>({
+ [&]() -> void {
+ bTextOutput->pushBundle();
+ push_count++;
+ },
+ [&]() -> void {
+ std::string txt = fdp.ConsumeRandomLengthString(fdp.remaining_bytes());
+ size_t len = fdp.ConsumeIntegralInRange<size_t>(0, txt.length());
+ bTextOutput->print(txt.c_str(), len);
+ },
+ [&]() -> void {
+ if (push_count == 0) return;
+
+ bTextOutput->popBundle();
+ push_count--;
+ },
+ })();
+ }
+
+ return 0;
+}
+} // namespace android
diff --git a/libs/binder/tests/fuzzers/MemoryDealerFuzz.cpp b/libs/binder/tests/fuzzers/MemoryDealerFuzz.cpp
new file mode 100644
index 0000000..f9dda8c
--- /dev/null
+++ b/libs/binder/tests/fuzzers/MemoryDealerFuzz.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/MemoryDealer.h>
+#include <commonFuzzHelpers.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <string>
+#include <unordered_set>
+
+namespace android {
+
+static constexpr size_t kMaxBufferSize = 10000;
+static constexpr size_t kMaxDealerSize = 1024 * 512;
+static constexpr size_t kMaxAllocSize = 1024;
+
+// Fuzzer entry point.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if (size > kMaxBufferSize) {
+ return 0;
+ }
+
+ FuzzedDataProvider fdp(data, size);
+ size_t dSize = fdp.ConsumeIntegralInRange<size_t>(0, kMaxDealerSize);
+ std::string name = fdp.ConsumeRandomLengthString(fdp.remaining_bytes());
+ uint32_t flags = fdp.ConsumeIntegral<uint32_t>();
+ sp<MemoryDealer> dealer = new MemoryDealer(dSize, name.c_str(), flags);
+
+ // This is used to track offsets that have been freed already to avoid an expected fatal log.
+ std::unordered_set<size_t> free_list;
+
+ while (fdp.remaining_bytes() > 0) {
+ fdp.PickValueInArray<std::function<void()>>({
+ [&]() -> void { dealer->getAllocationAlignment(); },
+ [&]() -> void { dealer->getMemoryHeap(); },
+ [&]() -> void {
+ size_t offset = fdp.ConsumeIntegral<size_t>();
+
+ // Offset has already been freed, so return instead.
+ if (free_list.find(offset) != free_list.end()) return;
+
+ dealer->deallocate(offset);
+ free_list.insert(offset);
+ },
+ [&]() -> void {
+ std::string randString = fdp.ConsumeRandomLengthString(fdp.remaining_bytes());
+ dealer->dump(randString.c_str());
+ },
+ [&]() -> void {
+ size_t allocSize = fdp.ConsumeIntegralInRange<size_t>(0, kMaxAllocSize);
+ sp<IMemory> allocated = dealer->allocate(allocSize);
+ // If the allocation was successful, try to write to it
+ if (allocated != nullptr && allocated->unsecurePointer() != nullptr) {
+ memset(allocated->unsecurePointer(), 1, allocated->size());
+
+ // Clear the address from freelist since it has been allocated over again.
+ free_list.erase(allocated->offset());
+ }
+ },
+ })();
+ }
+
+ return 0;
+}
+} // namespace android
diff --git a/libs/cputimeinstate/cputimeinstate.cpp b/libs/cputimeinstate/cputimeinstate.cpp
index 4209dc5..2e72cc4 100644
--- a/libs/cputimeinstate/cputimeinstate.cpp
+++ b/libs/cputimeinstate/cputimeinstate.cpp
@@ -56,6 +56,7 @@
static std::vector<std::vector<uint32_t>> gPolicyFreqs;
static std::vector<std::vector<uint32_t>> gPolicyCpus;
static std::set<uint32_t> gAllFreqs;
+static unique_fd gTisTotalMapFd;
static unique_fd gTisMapFd;
static unique_fd gConcurrentMapFd;
static unique_fd gUidLastUpdateMapFd;
@@ -129,6 +130,10 @@
gPolicyCpus.emplace_back(*cpus);
}
+ gTisTotalMapFd =
+ unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_total_time_in_state_map")};
+ if (gTisTotalMapFd < 0) return false;
+
gTisMapFd = unique_fd{bpf_obj_get(BPF_FS_PATH "map_time_in_state_uid_time_in_state_map")};
if (gTisMapFd < 0) return false;
@@ -239,6 +244,31 @@
return gPolicyFreqs;
}
+std::optional<std::vector<std::vector<uint64_t>>> getTotalCpuFreqTimes() {
+ if (!gInitialized && !initGlobals()) return {};
+
+ std::vector<std::vector<uint64_t>> out;
+ uint32_t maxFreqCount = 0;
+ for (const auto &freqList : gPolicyFreqs) {
+ if (freqList.size() > maxFreqCount) maxFreqCount = freqList.size();
+ out.emplace_back(freqList.size(), 0);
+ }
+
+ std::vector<uint64_t> vals(gNCpus);
+ const uint32_t freqCount = maxFreqCount <= MAX_FREQS_FOR_TOTAL ? maxFreqCount :
+ MAX_FREQS_FOR_TOTAL;
+ for (uint32_t freqIdx = 0; freqIdx < freqCount; ++freqIdx) {
+ if (findMapEntry(gTisTotalMapFd, &freqIdx, vals.data())) return {};
+ for (uint32_t policyIdx = 0; policyIdx < gNPolicies; ++policyIdx) {
+ if (freqIdx >= gPolicyFreqs[policyIdx].size()) continue;
+ for (const auto &cpu : gPolicyCpus[policyIdx]) {
+ out[policyIdx][freqIdx] += vals[cpu];
+ }
+ }
+ }
+
+ return out;
+}
// Retrieve the times in ns that uid spent running at each CPU frequency.
// Return contains no value on error, otherwise it contains a vector of vectors using the format:
// [[t0_0, t0_1, ...],
diff --git a/libs/cputimeinstate/cputimeinstate.h b/libs/cputimeinstate/cputimeinstate.h
index 87a328a..46de669 100644
--- a/libs/cputimeinstate/cputimeinstate.h
+++ b/libs/cputimeinstate/cputimeinstate.h
@@ -23,6 +23,7 @@
namespace bpf {
bool startTrackingUidTimes();
+std::optional<std::vector<std::vector<uint64_t>>> getTotalCpuFreqTimes();
std::optional<std::vector<std::vector<uint64_t>>> getUidCpuFreqTimes(uint32_t uid);
std::optional<std::unordered_map<uint32_t, std::vector<std::vector<uint64_t>>>>
getUidsCpuFreqTimes();
diff --git a/libs/cputimeinstate/testtimeinstate.cpp b/libs/cputimeinstate/testtimeinstate.cpp
index 519689b..d25b2e9 100644
--- a/libs/cputimeinstate/testtimeinstate.cpp
+++ b/libs/cputimeinstate/testtimeinstate.cpp
@@ -40,6 +40,12 @@
using std::vector;
+TEST(TimeInStateTest, TotalTimeInState) {
+ auto times = getTotalCpuFreqTimes();
+ ASSERT_TRUE(times.has_value());
+ EXPECT_FALSE(times->empty());
+}
+
TEST(TimeInStateTest, SingleUidTimeInState) {
auto times = getUidCpuFreqTimes(0);
ASSERT_TRUE(times.has_value());
@@ -186,6 +192,31 @@
}
}
+TEST(TimeInStateTest, TotalAndAllUidTimeInStateConsistent) {
+ auto allUid = getUidsCpuFreqTimes();
+ auto total = getTotalCpuFreqTimes();
+
+ ASSERT_TRUE(allUid.has_value() && total.has_value());
+
+ // Check the number of policies.
+ ASSERT_EQ(allUid->at(0).size(), total->size());
+
+ for (uint32_t policyIdx = 0; policyIdx < total->size(); ++policyIdx) {
+ std::vector<uint64_t> totalTimes = total->at(policyIdx);
+ uint32_t totalFreqsCount = totalTimes.size();
+ std::vector<uint64_t> allUidTimes(totalFreqsCount, 0);
+ for (auto const &[uid, uidTimes]: *allUid) {
+ for (uint32_t freqIdx = 0; freqIdx < uidTimes[policyIdx].size(); ++freqIdx) {
+ allUidTimes[std::min(freqIdx, totalFreqsCount - 1)] += uidTimes[policyIdx][freqIdx];
+ }
+ }
+
+ for (uint32_t freqIdx = 0; freqIdx < totalFreqsCount; ++freqIdx) {
+ ASSERT_LE(allUidTimes[freqIdx], totalTimes[freqIdx]);
+ }
+ }
+}
+
TEST(TimeInStateTest, SingleAndAllUidTimeInStateConsistent) {
uint64_t zero = 0;
auto maps = {getUidsCpuFreqTimes(), getUidsUpdatedCpuFreqTimes(&zero)};
@@ -292,6 +323,22 @@
ASSERT_LE(after - before, NSEC_PER_SEC * 2 * get_nprocs_conf());
}
+TEST(TimeInStateTest, TotalTimeInStateMonotonic) {
+ auto before = getTotalCpuFreqTimes();
+ ASSERT_TRUE(before.has_value());
+ sleep(1);
+ auto after = getTotalCpuFreqTimes();
+ ASSERT_TRUE(after.has_value());
+
+ for (uint32_t policyIdx = 0; policyIdx < after->size(); ++policyIdx) {
+ auto timesBefore = before->at(policyIdx);
+ auto timesAfter = after->at(policyIdx);
+ for (uint32_t freqIdx = 0; freqIdx < timesAfter.size(); ++freqIdx) {
+ ASSERT_NO_FATAL_FAILURE(TestCheckDelta(timesBefore[freqIdx], timesAfter[freqIdx]));
+ }
+ }
+}
+
TEST(TimeInStateTest, AllUidTimeInStateMonotonic) {
auto map1 = getUidsCpuFreqTimes();
ASSERT_TRUE(map1.has_value());
diff --git a/services/gpuservice/tests/unittests/GpuMemTest.cpp b/services/gpuservice/tests/unittests/GpuMemTest.cpp
index c5f8859..e916221 100644
--- a/services/gpuservice/tests/unittests/GpuMemTest.cpp
+++ b/services/gpuservice/tests/unittests/GpuMemTest.cpp
@@ -59,7 +59,6 @@
}
void SetUp() override {
- SKIP_IF_BPF_NOT_SUPPORTED;
bpf::setrlimitForTest();
mGpuMem = std::make_unique<GpuMem>();
@@ -87,8 +86,6 @@
};
TEST_F(GpuMemTest, validGpuMemTotalBpfPaths) {
- SKIP_IF_BPF_NOT_SUPPORTED;
-
EXPECT_EQ(mTestableGpuMem.getGpuMemTraceGroup(), "gpu_mem");
EXPECT_EQ(mTestableGpuMem.getGpuMemTotalTracepoint(), "gpu_mem_total");
EXPECT_EQ(mTestableGpuMem.getGpuMemTotalProgPath(),
@@ -97,20 +94,16 @@
}
TEST_F(GpuMemTest, bpfInitializationFailed) {
- SKIP_IF_BPF_NOT_SUPPORTED;
-
EXPECT_EQ(dumpsys(), "Failed to initialize GPU memory eBPF\n");
}
TEST_F(GpuMemTest, gpuMemTotalMapEmpty) {
- SKIP_IF_BPF_NOT_SUPPORTED;
mTestableGpuMem.setGpuMemTotalMap(mTestMap);
EXPECT_EQ(dumpsys(), "GPU memory total usage map is empty\n");
}
TEST_F(GpuMemTest, globalMemTotal) {
- SKIP_IF_BPF_NOT_SUPPORTED;
ASSERT_RESULT_OK(mTestMap.writeValue(TEST_GLOBAL_KEY, TEST_GLOBAL_VAL, BPF_ANY));
mTestableGpuMem.setGpuMemTotalMap(mTestMap);
@@ -118,7 +111,6 @@
}
TEST_F(GpuMemTest, missingGlobalMemTotal) {
- SKIP_IF_BPF_NOT_SUPPORTED;
ASSERT_RESULT_OK(mTestMap.writeValue(TEST_PROC_KEY_1, TEST_PROC_VAL_1, BPF_ANY));
mTestableGpuMem.setGpuMemTotalMap(mTestMap);
@@ -126,7 +118,6 @@
}
TEST_F(GpuMemTest, procMemTotal) {
- SKIP_IF_BPF_NOT_SUPPORTED;
ASSERT_RESULT_OK(mTestMap.writeValue(TEST_PROC_KEY_1, TEST_PROC_VAL_1, BPF_ANY));
ASSERT_RESULT_OK(mTestMap.writeValue(TEST_PROC_KEY_2, TEST_PROC_VAL_2, BPF_ANY));
mTestableGpuMem.setGpuMemTotalMap(mTestMap);
@@ -146,7 +137,6 @@
}
TEST_F(GpuMemTest, traverseGpuMemTotals) {
- SKIP_IF_BPF_NOT_SUPPORTED;
ASSERT_RESULT_OK(mTestMap.writeValue(TEST_GLOBAL_KEY, TEST_GLOBAL_VAL, BPF_ANY));
ASSERT_RESULT_OK(mTestMap.writeValue(TEST_PROC_KEY_1, TEST_PROC_VAL_1, BPF_ANY));
ASSERT_RESULT_OK(mTestMap.writeValue(TEST_PROC_KEY_2, TEST_PROC_VAL_2, BPF_ANY));
diff --git a/services/inputflinger/reader/mapper/TouchInputMapper.cpp b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
index 0b541fb..cbdb1d0 100644
--- a/services/inputflinger/reader/mapper/TouchInputMapper.cpp
+++ b/services/inputflinger/reader/mapper/TouchInputMapper.cpp
@@ -3673,6 +3673,9 @@
const float xScaled = float(x - mRawPointerAxes.x.minValue) * mXScale;
const float yScaled = float(y - mRawPointerAxes.y.minValue) * mYScale;
+ const float xScaledMax = float(mRawPointerAxes.x.maxValue - x) * mXScale;
+ const float yScaledMax = float(mRawPointerAxes.y.maxValue - y) * mYScale;
+
// Rotate to surface coordinate.
// 0 - no swap and reverse.
// 90 - swap x/y and reverse y.
@@ -3684,16 +3687,16 @@
y = yScaled + mYTranslate;
break;
case DISPLAY_ORIENTATION_90:
- y = mSurfaceRight - xScaled;
+ y = xScaledMax - (mRawSurfaceWidth - mSurfaceRight);
x = yScaled + mYTranslate;
break;
case DISPLAY_ORIENTATION_180:
- x = mSurfaceRight - xScaled;
- y = mSurfaceBottom - yScaled;
+ x = xScaledMax - (mRawSurfaceWidth - mSurfaceRight);
+ y = yScaledMax - (mRawSurfaceHeight - mSurfaceBottom);
break;
case DISPLAY_ORIENTATION_270:
y = xScaled + mXTranslate;
- x = mSurfaceBottom - yScaled;
+ x = yScaledMax - (mRawSurfaceHeight - mSurfaceBottom);
break;
default:
assert(false);
diff --git a/services/inputflinger/tests/InputReader_test.cpp b/services/inputflinger/tests/InputReader_test.cpp
index 58f83b5..e9bb169 100644
--- a/services/inputflinger/tests/InputReader_test.cpp
+++ b/services/inputflinger/tests/InputReader_test.cpp
@@ -7449,8 +7449,8 @@
configureDevice(InputReaderConfiguration::CHANGE_DISPLAY_INFO);
}
- void processPositionAndVerify(MultiTouchInputMapper& mapper, int32_t xInside, int32_t yInside,
- int32_t xOutside, int32_t yOutside, int32_t xExpected,
+ void processPositionAndVerify(MultiTouchInputMapper& mapper, int32_t xOutside, int32_t yOutside,
+ int32_t xInside, int32_t yInside, int32_t xExpected,
int32_t yExpected) {
// touch on outside area should not work.
processPosition(mapper, toRawX(xOutside), toRawY(yOutside));
@@ -7532,6 +7532,34 @@
processPositionAndVerify(mapper, x - 1, y, x + 1, y, xExpected, yExpected);
}
+TEST_F(MultiTouchInputMapperTest_SurfaceRange, Viewports_SurfaceRange_Corner) {
+ addConfigurationProperty("touch.deviceType", "touchScreen");
+ prepareDisplay(DISPLAY_ORIENTATION_0);
+ prepareAxes(POSITION);
+ MultiTouchInputMapper& mapper = addMapperAndConfigure<MultiTouchInputMapper>();
+
+ const int32_t x = 0;
+ const int32_t y = 0;
+
+ const int32_t xExpected = x;
+ const int32_t yExpected = y;
+ processPositionAndVerify(mapper, x - 1, y, x, y, xExpected, yExpected);
+
+ clearViewports();
+ prepareDisplay(DISPLAY_ORIENTATION_90);
+ // expect x/y = swap x/y then reverse y.
+ const int32_t xExpected90 = y;
+ const int32_t yExpected90 = DISPLAY_WIDTH - 1;
+ processPositionAndVerify(mapper, x - 1, y, x, y, xExpected90, yExpected90);
+
+ clearViewports();
+ prepareDisplay(DISPLAY_ORIENTATION_270);
+ // expect x/y = swap x/y then reverse x.
+ const int32_t xExpected270 = DISPLAY_HEIGHT - 1;
+ const int32_t yExpected270 = x;
+ processPositionAndVerify(mapper, x - 1, y, x, y, xExpected270, yExpected270);
+}
+
TEST_F(MultiTouchInputMapperTest, Process_TouchpadCapture) {
// we need a pointer controller for mouse mode of touchpad (start pointer at 0,0)
std::shared_ptr<FakePointerController> fakePointerController =
diff --git a/services/surfaceflinger/CompositionEngine/src/Output.cpp b/services/surfaceflinger/CompositionEngine/src/Output.cpp
index 34dc536..d3247e1 100644
--- a/services/surfaceflinger/CompositionEngine/src/Output.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/Output.cpp
@@ -835,6 +835,8 @@
needsProtected == renderEngine.isProtected()) {
mRenderSurface->setProtected(needsProtected);
}
+ } else if (!outputState.isSecure && renderEngine.isProtected()) {
+ renderEngine.useProtectedContext(false);
}
base::unique_fd fd;
diff --git a/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp b/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp
index 09f37fb..29fd4b9 100644
--- a/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp
@@ -159,6 +159,7 @@
EXPECT_CALL(mCompositionEngine, getHwComposer()).WillRepeatedly(ReturnRef(mHwComposer));
EXPECT_CALL(mCompositionEngine, getRenderEngine()).WillRepeatedly(ReturnRef(mRenderEngine));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
}
DisplayCreationArgs getDisplayCreationArgsForPhysicalHWCDisplay() {
diff --git a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
index 7a06400..8f76afa 100644
--- a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
@@ -2871,6 +2871,7 @@
mOutput.mState.usesClientComposition = false;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, setExpensiveRenderingExpected(false));
@@ -2883,6 +2884,7 @@
mOutput.mState.flipClientTarget = true;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillOnce(Return(mOutputBuffer));
EXPECT_CALL(mOutput, setExpensiveRenderingExpected(false));
@@ -2892,6 +2894,7 @@
TEST_F(OutputComposeSurfacesTest, doesMinimalWorkIfDequeueBufferFailsForClientComposition) {
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillOnce(Return(nullptr));
@@ -2904,6 +2907,7 @@
mOutput.mState.flipClientTarget = true;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillOnce(Return(nullptr));
@@ -2914,6 +2918,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -2936,6 +2941,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -2963,6 +2969,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1, r2}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -2991,6 +2998,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1, r2}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -3019,6 +3027,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1, r2}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -3050,6 +3059,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillOnce(Return(std::vector<LayerFE::LayerSettings>{r1, r2}))
.WillOnce(Return(std::vector<LayerFE::LayerSettings>{r1, r3}));
@@ -3072,6 +3082,7 @@
struct OutputComposeSurfacesTest_UsesExpectedDisplaySettings : public OutputComposeSurfacesTest {
OutputComposeSurfacesTest_UsesExpectedDisplaySettings() {
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -3219,6 +3230,8 @@
mOutput.mState.isSecure = false;
mLayer2.mLayerFEState.hasProtectedContent = true;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(true));
+ EXPECT_CALL(mRenderEngine, isProtected).WillOnce(Return(true));
+ EXPECT_CALL(mRenderEngine, useProtectedContext(false)).WillOnce(Return(true));
mOutput.composeSurfaces(kDebugRegion, kDefaultRefreshArgs);
}
@@ -3311,6 +3324,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
.WillRepeatedly(Return());
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillRepeatedly(Return(mOutputBuffer));
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index c4c3626..261722d 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -4368,12 +4368,9 @@
void SurfaceFlinger::dumpFrameEventsLocked(std::string& result) {
result.append("Layer frame timestamps:\n");
-
- const LayerVector& currentLayers = mCurrentState.layersSortedByZ;
- const size_t count = currentLayers.size();
- for (size_t i=0 ; i<count ; i++) {
- currentLayers[i]->dumpFrameEvents(result);
- }
+ // Traverse all layers to dump frame-events for each layer
+ mCurrentState.traverseInZOrder(
+ [&] (Layer* layer) { layer->dumpFrameEvents(result); });
}
void SurfaceFlinger::dumpBufferingStats(std::string& result) const {