Merge "Check if the window is partially obscured for slippery enters" into rvc-dev am: b27497c4bf
Original change: https://googleplex-android-review.googlesource.com/c/platform/frameworks/native/+/16194632
Change-Id: I64d40d85f97fcc59f9061f73044dead69a58fdaa
diff --git a/cmds/installd/QuotaUtils.cpp b/cmds/installd/QuotaUtils.cpp
index e080291..6027139 100644
--- a/cmds/installd/QuotaUtils.cpp
+++ b/cmds/installd/QuotaUtils.cpp
@@ -35,7 +35,7 @@
/* Map of all quota mounts from target to source */
std::unordered_map<std::string, std::string> mQuotaReverseMounts;
-std::string& FindQuotaDeviceForUuid(const std::string& uuid) {
+std::string FindQuotaDeviceForUuid(const std::string& uuid) {
std::lock_guard<std::recursive_mutex> lock(mMountsLock);
auto path = create_data_path(uuid.empty() ? nullptr : uuid.c_str());
return mQuotaReverseMounts[path];
diff --git a/cmds/servicemanager/ServiceManager.cpp b/cmds/servicemanager/ServiceManager.cpp
index 1f9892a..e80c321 100644
--- a/cmds/servicemanager/ServiceManager.cpp
+++ b/cmds/servicemanager/ServiceManager.cpp
@@ -213,17 +213,18 @@
return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
}
- auto entry = mNameToService.emplace(name, Service {
+ // Overwrite the old service if it exists
+ mNameToService[name] = Service {
.binder = binder,
.allowIsolated = allowIsolated,
.dumpPriority = dumpPriority,
.debugPid = ctx.debugPid,
- });
+ };
auto it = mNameToRegistrationCallback.find(name);
if (it != mNameToRegistrationCallback.end()) {
for (const sp<IServiceCallback>& cb : it->second) {
- entry.first->second.guaranteeClient = true;
+ mNameToService[name].guaranteeClient = true;
// permission checked in registerForNotifications
cb->onRegistration(name, binder);
}
diff --git a/cmds/servicemanager/test_sm.cpp b/cmds/servicemanager/test_sm.cpp
index 25245be..fb9f9df 100644
--- a/cmds/servicemanager/test_sm.cpp
+++ b/cmds/servicemanager/test_sm.cpp
@@ -135,6 +135,26 @@
IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT).isOk());
}
+TEST(AddService, OverwriteExistingService) {
+ auto sm = getPermissiveServiceManager();
+ sp<IBinder> serviceA = getBinder();
+ EXPECT_TRUE(sm->addService("foo", serviceA, false /*allowIsolated*/,
+ IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT).isOk());
+
+ sp<IBinder> outA;
+ EXPECT_TRUE(sm->getService("foo", &outA).isOk());
+ EXPECT_EQ(serviceA, outA);
+
+ // serviceA should be overwritten by serviceB
+ sp<IBinder> serviceB = getBinder();
+ EXPECT_TRUE(sm->addService("foo", serviceB, false /*allowIsolated*/,
+ IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT).isOk());
+
+ sp<IBinder> outB;
+ EXPECT_TRUE(sm->getService("foo", &outB).isOk());
+ EXPECT_EQ(serviceB, outB);
+}
+
TEST(AddService, NoPermissions) {
std::unique_ptr<MockAccess> access = std::make_unique<NiceMock<MockAccess>>();
diff --git a/data/etc/car_core_hardware.xml b/data/etc/car_core_hardware.xml
index 50f117d..6ffb947 100644
--- a/data/etc/car_core_hardware.xml
+++ b/data/etc/car_core_hardware.xml
@@ -38,7 +38,6 @@
<!-- basic system services -->
<feature name="android.software.connectionservice" />
<feature name="android.software.voice_recognizers" notLowRam="true" />
- <feature name="android.software.backup" />
<feature name="android.software.home_screen" />
<feature name="android.software.companion_device_setup" />
<feature name="android.software.autofill" />
diff --git a/libs/binder/Binder.cpp b/libs/binder/Binder.cpp
index e0fb543..6ca3b16 100644
--- a/libs/binder/Binder.cpp
+++ b/libs/binder/Binder.cpp
@@ -24,6 +24,7 @@
#include <binder/IShellCallback.h>
#include <binder/Parcel.h>
+#include <linux/sched.h>
#include <stdio.h>
namespace android {
@@ -133,6 +134,8 @@
// unlocked objects
bool mRequestingSid = false;
sp<IBinder> mExtension;
+ int mPolicy = SCHED_NORMAL;
+ int mPriority = 0;
// for below objects
Mutex mLock;
@@ -279,6 +282,47 @@
return e->mExtension;
}
+void BBinder::setMinSchedulerPolicy(int policy, int priority) {
+ switch (policy) {
+ case SCHED_NORMAL:
+ LOG_ALWAYS_FATAL_IF(priority < -20 || priority > 19, "Invalid priority for SCHED_NORMAL: %d", priority);
+ break;
+ case SCHED_RR:
+ case SCHED_FIFO:
+ LOG_ALWAYS_FATAL_IF(priority < 1 || priority > 99, "Invalid priority for sched %d: %d", policy, priority);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("Unrecognized scheduling policy: %d", policy);
+ }
+
+ Extras* e = mExtras.load(std::memory_order_acquire);
+
+ if (e == nullptr) {
+ // Avoid allocations if called with default.
+ if (policy == SCHED_NORMAL && priority == 0) {
+ return;
+ }
+
+ e = getOrCreateExtras();
+ if (!e) return; // out of memory
+ }
+
+ e->mPolicy = policy;
+ e->mPriority = priority;
+}
+
+int BBinder::getMinSchedulerPolicy() {
+ Extras* e = mExtras.load(std::memory_order_acquire);
+ if (e == nullptr) return SCHED_NORMAL;
+ return e->mPolicy;
+}
+
+int BBinder::getMinSchedulerPriority() {
+ Extras* e = mExtras.load(std::memory_order_acquire);
+ if (e == nullptr) return 0;
+ return e->mPriority;
+}
+
pid_t BBinder::getDebugPid() {
return getpid();
}
diff --git a/libs/binder/IPCThreadState.cpp b/libs/binder/IPCThreadState.cpp
index d67ce15..847b73a 100644
--- a/libs/binder/IPCThreadState.cpp
+++ b/libs/binder/IPCThreadState.cpp
@@ -860,6 +860,10 @@
err = FAILED_TRANSACTION;
goto finish;
+ case BR_FROZEN_REPLY:
+ err = FAILED_TRANSACTION;
+ goto finish;
+
case BR_ACQUIRE_RESULT:
{
ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
@@ -1316,6 +1320,42 @@
}
}
+status_t IPCThreadState::getProcessFreezeInfo(pid_t pid, bool *sync_received, bool *async_received)
+{
+ int ret = 0;
+ binder_frozen_status_info info;
+ info.pid = pid;
+
+#if defined(__ANDROID__)
+ if (ioctl(self()->mProcess->mDriverFD, BINDER_GET_FROZEN_INFO, &info) < 0)
+ ret = -errno;
+#endif
+ *sync_received = info.sync_recv;
+ *async_received = info.async_recv;
+
+ return ret;
+}
+
+status_t IPCThreadState::freeze(pid_t pid, bool enable, uint32_t timeout_ms) {
+ struct binder_freeze_info info;
+ int ret = 0;
+
+ info.pid = pid;
+ info.enable = enable;
+ info.timeout_ms = timeout_ms;
+
+
+#if defined(__ANDROID__)
+ if (ioctl(self()->mProcess->mDriverFD, BINDER_FREEZE, &info) < 0)
+ ret = -errno;
+#endif
+
+ //
+ // ret==-EAGAIN indicates that transactions have not drained.
+ // Call again to poll for completion.
+ //
+ return ret;
+}
void IPCThreadState::freeBuffer(Parcel* parcel, const uint8_t* data,
size_t /*dataSize*/,
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index efcc042..f790b88 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -20,6 +20,7 @@
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
+#include <linux/sched.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
@@ -188,16 +189,18 @@
return OK;
}
+static constexpr inline int schedPolicyMask(int policy, int priority) {
+ return (priority & FLAT_BINDER_FLAG_PRIORITY_MASK) | ((policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT);
+}
+
status_t Parcel::flattenBinder(const sp<IBinder>& binder)
{
flat_binder_object obj;
+ obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
- if (IPCThreadState::self()->backgroundSchedulingDisabled()) {
- /* minimum priority for all nodes is nice 0 */
- obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
- } else {
- /* minimum priority for all nodes is MAX_NICE(19) */
- obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS;
+ int schedBits = 0;
+ if (!IPCThreadState::self()->backgroundSchedulingDisabled()) {
+ schedBits = schedPolicyMask(SCHED_NORMAL, 19);
}
if (binder != nullptr) {
@@ -213,6 +216,13 @@
obj.handle = handle;
obj.cookie = 0;
} else {
+ int policy = local->getMinSchedulerPolicy();
+ int priority = local->getMinSchedulerPriority();
+
+ if (policy != 0 || priority != 0) {
+ // override value, since it is set explicitly
+ schedBits = schedPolicyMask(policy, priority);
+ }
if (local->isRequestingSid()) {
obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
}
@@ -226,6 +236,8 @@
obj.cookie = 0;
}
+ obj.flags |= schedBits;
+
return finishFlattenBinder(binder, obj);
}
@@ -2468,7 +2480,7 @@
releaseObjects();
- if (data) {
+ if (data || desired == 0) {
LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
gParcelGlobalAllocSize += desired;
diff --git a/libs/binder/include/binder/AppOpsManager.h b/libs/binder/include/binder/AppOpsManager.h
index 6afcd77..d93935a 100644
--- a/libs/binder/include/binder/AppOpsManager.h
+++ b/libs/binder/include/binder/AppOpsManager.h
@@ -131,7 +131,11 @@
OP_DEPRECATED_1 = 96,
OP_AUTO_REVOKE_PERMISSIONS_IF_UNUSED = 97,
OP_AUTO_REVOKE_MANAGED_BY_INSTALLER = 98,
- _NUM_OP = 99
+ OP_NO_ISOLATED_STORAGE = 99,
+ OP_PHONE_CALL_MICROPHONE = 100,
+ OP_PHONE_CALL_CAMERA = 101,
+ OP_RECORD_AUDIO_HOTWORD = 102,
+ _NUM_OP = 103
};
AppOpsManager();
diff --git a/libs/binder/include/binder/Binder.h b/libs/binder/include/binder/Binder.h
index 74e52db..4cf0515 100644
--- a/libs/binder/include/binder/Binder.h
+++ b/libs/binder/include/binder/Binder.h
@@ -72,6 +72,25 @@
// This must be called before the object is sent to another process. Not thread safe.
void setExtension(const sp<IBinder>& extension);
+ // This must be called before the object is sent to another process. Not thread safe.
+ //
+ // This function will abort if improper parameters are set. This is like
+ // sched_setscheduler. However, it sets the minimum scheduling policy
+ // only for the duration that this specific binder object is handling the
+ // call in a threadpool. By default, this API is set to SCHED_NORMAL/0. In
+ // this case, the scheduling priority will not actually be modified from
+ // binder defaults. See also IPCThreadState::disableBackgroundScheduling.
+ //
+ // Appropriate values are:
+ // SCHED_NORMAL: -20 <= priority <= 19
+ // SCHED_RR/SCHED_FIFO: 1 <= priority <= 99
+ __attribute__((weak))
+ void setMinSchedulerPolicy(int policy, int priority);
+ __attribute__((weak))
+ int getMinSchedulerPolicy();
+ __attribute__((weak))
+ int getMinSchedulerPriority();
+
pid_t getDebugPid();
protected:
diff --git a/libs/binder/include/binder/IPCThreadState.h b/libs/binder/include/binder/IPCThreadState.h
index 4818889..8ac7165 100644
--- a/libs/binder/include/binder/IPCThreadState.h
+++ b/libs/binder/include/binder/IPCThreadState.h
@@ -34,7 +34,26 @@
public:
static IPCThreadState* self();
static IPCThreadState* selfOrNull(); // self(), but won't instantiate
-
+
+ // Freeze or unfreeze the binder interface to a specific process. When freezing, this method
+ // will block up to timeout_ms to process pending transactions directed to pid. Unfreeze
+ // is immediate. Transactions to processes frozen via this method won't be delivered and the
+ // driver will return BR_FROZEN_REPLY to the client sending them. After unfreeze,
+ // transactions will be delivered normally.
+ //
+ // pid: id for the process for which the binder interface is to be frozen
+ // enable: freeze (true) or unfreeze (false)
+ // timeout_ms: maximum time this function is allowed to block the caller waiting for pending
+ // binder transactions to be processed.
+ //
+ // returns: 0 in case of success, a value < 0 in case of error
+ __attribute__((weak))
+ static status_t freeze(pid_t pid, bool enabled, uint32_t timeout_ms);
+
+ // Provide information about the state of a frozen process
+ __attribute__((weak))
+ static status_t getProcessFreezeInfo(pid_t pid, bool *sync_received,
+ bool *async_received);
sp<ProcessState> process();
status_t clearLastError();
diff --git a/libs/binder/include/private/binder/binder_module.h b/libs/binder/include/private/binder/binder_module.h
index c22be9f..7be8f7b 100644
--- a/libs/binder/include/private/binder/binder_module.h
+++ b/libs/binder/include/private/binder/binder_module.h
@@ -36,6 +36,60 @@
#include <sys/ioctl.h>
#include <linux/android/binder.h>
+#ifndef BR_FROZEN_REPLY
+// Temporary definition of BR_FROZEN_REPLY. For production
+// this will come from UAPI binder.h
+#define BR_FROZEN_REPLY _IO('r', 18)
+#endif //BR_FROZEN_REPLY
+
+#ifndef BINDER_FREEZE
+/*
+ * Temporary definitions for freeze support. For the final version
+ * these will be defined in the UAPI binder.h file from upstream kernel.
+ */
+#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
+
+struct binder_freeze_info {
+ //
+ // Group-leader PID of process to be frozen
+ //
+ uint32_t pid;
+ //
+ // Enable(1) / Disable(0) freeze for given PID
+ //
+ uint32_t enable;
+ //
+ // Timeout to wait for transactions to drain.
+ // 0: don't wait (ioctl will return EAGAIN if not drained)
+ // N: number of ms to wait
+ uint32_t timeout_ms;
+};
+#endif //BINDER_FREEZE
+
+#ifndef BINDER_GET_FROZEN_INFO
+
+#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
+
+struct binder_frozen_status_info {
+ //
+ // Group-leader PID of process to be queried
+ //
+ __u32 pid;
+ //
+ // Indicates whether the process has received any sync calls since last
+ // freeze (cleared at freeze/unfreeze)
+ //
+ __u32 sync_recv;
+ //
+ // Indicates whether the process has received any async calls since last
+ // freeze (cleared at freeze/unfreeze)
+ //
+ __u32 async_recv;
+};
+#endif //BINDER_GET_FROZEN_INFO
+
+
+
#ifdef __cplusplus
} // namespace android
#endif
diff --git a/libs/binder/tests/binderLibTest.cpp b/libs/binder/tests/binderLibTest.cpp
index b37b688..9caad45 100644
--- a/libs/binder/tests/binderLibTest.cpp
+++ b/libs/binder/tests/binderLibTest.cpp
@@ -16,6 +16,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <fstream>
#include <poll.h>
#include <pthread.h>
#include <stdio.h>
@@ -50,6 +51,9 @@
static char *binderserversuffix;
static char binderserverarg[] = "--binderserver";
+static constexpr int kSchedPolicy = SCHED_RR;
+static constexpr int kSchedPriority = 7;
+
static String16 binderLibTestServiceName = String16("test.binderLib");
enum BinderLibTestTranscationCode {
@@ -75,6 +79,9 @@
BINDER_LIB_TEST_GET_PTR_SIZE_TRANSACTION,
BINDER_LIB_TEST_CREATE_BINDER_TRANSACTION,
BINDER_LIB_TEST_GET_WORK_SOURCE_TRANSACTION,
+ BINDER_LIB_TEST_GET_SCHEDULING_POLICY,
+ BINDER_LIB_TEST_NOP_TRANSACTION_WAIT,
+ BINDER_LIB_TEST_GETPID,
BINDER_LIB_TEST_ECHO_VECTOR,
BINDER_LIB_TEST_REJECT_OBJECTS,
};
@@ -395,6 +402,49 @@
EXPECT_EQ(NO_ERROR, ret);
}
+TEST_F(BinderLibTest, Freeze) {
+ status_t ret;
+ Parcel data, reply, replypid;
+ std::ifstream freezer_file("/sys/fs/cgroup/freezer/cgroup.freeze");
+
+ //Pass test on devices where the freezer is not supported
+ if (freezer_file.fail()) {
+ GTEST_SKIP();
+ return;
+ }
+
+ std::string freezer_enabled;
+ std::getline(freezer_file, freezer_enabled);
+
+ //Pass test on devices where the freezer is disabled
+ if (freezer_enabled != "1") {
+ GTEST_SKIP();
+ return;
+ }
+
+ ret = m_server->transact(BINDER_LIB_TEST_GETPID, data, &replypid);
+ int32_t pid = replypid.readInt32();
+ EXPECT_EQ(NO_ERROR, ret);
+ for (int i = 0; i < 10; i++) {
+ EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION_WAIT, data, &reply, TF_ONE_WAY));
+ }
+ EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, 1, 0));
+ EXPECT_EQ(-EAGAIN, IPCThreadState::self()->freeze(pid, 1, 0));
+ EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, 1, 1000));
+ EXPECT_EQ(FAILED_TRANSACTION, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
+
+ bool sync_received, async_received;
+
+ EXPECT_EQ(NO_ERROR, IPCThreadState::self()->getProcessFreezeInfo(pid, &sync_received,
+ &async_received));
+
+ EXPECT_EQ(sync_received, 1);
+ EXPECT_EQ(async_received, 0);
+
+ EXPECT_EQ(NO_ERROR, IPCThreadState::self()->freeze(pid, 0, 0));
+ EXPECT_EQ(NO_ERROR, m_server->transact(BINDER_LIB_TEST_NOP_TRANSACTION, data, &reply));
+}
+
TEST_F(BinderLibTest, SetError) {
int32_t testValue[] = { 0, -123, 123 };
for (size_t i = 0; i < ARRAY_SIZE(testValue); i++) {
@@ -1015,6 +1065,22 @@
EXPECT_EQ(NO_ERROR, ret2);
}
+TEST_F(BinderLibTest, SchedPolicySet) {
+ sp<IBinder> server = addServer();
+ ASSERT_TRUE(server != nullptr);
+
+ Parcel data, reply;
+ status_t ret = server->transact(BINDER_LIB_TEST_GET_SCHEDULING_POLICY, data, &reply);
+ EXPECT_EQ(NO_ERROR, ret);
+
+ int policy = reply.readInt32();
+ int priority = reply.readInt32();
+
+ EXPECT_EQ(kSchedPolicy, policy & (~SCHED_RESET_ON_FORK));
+ EXPECT_EQ(kSchedPriority, priority);
+}
+
+
TEST_F(BinderLibTest, VectorSent) {
Parcel data, reply;
sp<IBinder> server = addServer();
@@ -1197,6 +1263,12 @@
pthread_mutex_unlock(&m_serverWaitMutex);
return ret;
}
+ case BINDER_LIB_TEST_GETPID:
+ reply->writeInt32(getpid());
+ return NO_ERROR;
+ case BINDER_LIB_TEST_NOP_TRANSACTION_WAIT:
+ usleep(5000);
+ return NO_ERROR;
case BINDER_LIB_TEST_NOP_TRANSACTION:
return NO_ERROR;
case BINDER_LIB_TEST_DELAYED_CALL_BACK: {
@@ -1371,6 +1443,16 @@
reply->writeInt32(IPCThreadState::self()->getCallingWorkSourceUid());
return NO_ERROR;
}
+ case BINDER_LIB_TEST_GET_SCHEDULING_POLICY: {
+ int policy = 0;
+ sched_param param;
+ if (0 != pthread_getschedparam(pthread_self(), &policy, ¶m)) {
+ return UNKNOWN_ERROR;
+ }
+ reply->writeInt32(policy);
+ reply->writeInt32(param.sched_priority);
+ return NO_ERROR;
+ }
case BINDER_LIB_TEST_ECHO_VECTOR: {
std::vector<uint64_t> vector;
auto err = data.readUint64Vector(&vector);
@@ -1407,6 +1489,8 @@
{
sp<BinderLibTestService> testService = new BinderLibTestService(index);
+ testService->setMinSchedulerPolicy(kSchedPolicy, kSchedPriority);
+
/*
* Normally would also contain functionality as well, but we are only
* testing the extension mechanism.
diff --git a/libs/gui/BitTube.cpp b/libs/gui/BitTube.cpp
index ef7a6f5..351af65 100644
--- a/libs/gui/BitTube.cpp
+++ b/libs/gui/BitTube.cpp
@@ -86,6 +86,10 @@
mReceiveFd = std::move(receiveFd);
}
+void BitTube::setSendFd(base::unique_fd&& sendFd) {
+ mSendFd = std::move(sendFd);
+}
+
ssize_t BitTube::write(void const* vaddr, size_t size) {
ssize_t err, len;
do {
@@ -115,6 +119,11 @@
status_t result = reply->writeDupFileDescriptor(mReceiveFd);
mReceiveFd.reset();
+ if (result != NO_ERROR) {
+ return result;
+ }
+ result = reply->writeDupFileDescriptor(mSendFd);
+ mSendFd.reset();
return result;
}
@@ -126,6 +135,13 @@
ALOGE("BitTube::readFromParcel: can't dup file descriptor (%s)", strerror(error));
return -error;
}
+ mSendFd.reset(dup(parcel->readFileDescriptor()));
+ if (mSendFd < 0) {
+ mSendFd.reset();
+ int error = errno;
+ ALOGE("BitTube::readFromParcel: can't dup file descriptor (%s)", strerror(error));
+ return -error;
+ }
return NO_ERROR;
}
diff --git a/libs/gui/DisplayEventDispatcher.cpp b/libs/gui/DisplayEventDispatcher.cpp
index b33bc9e..682fe91 100644
--- a/libs/gui/DisplayEventDispatcher.cpp
+++ b/libs/gui/DisplayEventDispatcher.cpp
@@ -89,12 +89,8 @@
return OK;
}
-void DisplayEventDispatcher::requestLatestConfig() {
- status_t status = mReceiver.requestLatestConfig();
- if (status) {
- ALOGW("Failed enable config events, status=%d", status);
- return;
- }
+void DisplayEventDispatcher::injectEvent(const DisplayEventReceiver::Event& event) {
+ mReceiver.sendEvents(&event, 1);
}
int DisplayEventDispatcher::getFd() const {
@@ -157,6 +153,9 @@
dispatchConfigChanged(ev.header.timestamp, ev.header.displayId,
ev.config.configId, ev.config.vsyncPeriod);
break;
+ case DisplayEventReceiver::DISPLAY_EVENT_NULL:
+ dispatchNullEvent(ev.header.timestamp, ev.header.displayId);
+ break;
default:
ALOGW("dispatcher %p ~ ignoring unknown event type %#x", this, ev.header.type);
break;
@@ -168,4 +167,5 @@
}
return gotVsync;
}
+
} // namespace android
diff --git a/libs/gui/DisplayEventReceiver.cpp b/libs/gui/DisplayEventReceiver.cpp
index 1fed509..f2b0962 100644
--- a/libs/gui/DisplayEventReceiver.cpp
+++ b/libs/gui/DisplayEventReceiver.cpp
@@ -79,14 +79,6 @@
return NO_INIT;
}
-status_t DisplayEventReceiver::requestLatestConfig() {
- if (mEventConnection != nullptr) {
- mEventConnection->requestLatestConfig();
- return NO_ERROR;
- }
- return NO_INIT;
-}
-
ssize_t DisplayEventReceiver::getEvents(DisplayEventReceiver::Event* events,
size_t count) {
return DisplayEventReceiver::getEvents(mDataChannel.get(), events, count);
@@ -98,6 +90,10 @@
return gui::BitTube::recvObjects(dataChannel, events, count);
}
+ssize_t DisplayEventReceiver::sendEvents(Event const* events, size_t count) {
+ return DisplayEventReceiver::sendEvents(mDataChannel.get(), events, count);
+}
+
ssize_t DisplayEventReceiver::sendEvents(gui::BitTube* dataChannel,
Event const* events, size_t count)
{
diff --git a/libs/gui/IDisplayEventConnection.cpp b/libs/gui/IDisplayEventConnection.cpp
index aa74bfd..c0e246f 100644
--- a/libs/gui/IDisplayEventConnection.cpp
+++ b/libs/gui/IDisplayEventConnection.cpp
@@ -26,8 +26,7 @@
STEAL_RECEIVE_CHANNEL = IBinder::FIRST_CALL_TRANSACTION,
SET_VSYNC_RATE,
REQUEST_NEXT_VSYNC,
- REQUEST_LATEST_CONFIG,
- LAST = REQUEST_LATEST_CONFIG,
+ LAST = REQUEST_NEXT_VSYNC,
};
} // Anonymous namespace
@@ -54,11 +53,6 @@
callRemoteAsync<decltype(&IDisplayEventConnection::requestNextVsync)>(
Tag::REQUEST_NEXT_VSYNC);
}
-
- void requestLatestConfig() override {
- callRemoteAsync<decltype(&IDisplayEventConnection::requestLatestConfig)>(
- Tag::REQUEST_LATEST_CONFIG);
- }
};
// Out-of-line virtual method definition to trigger vtable emission in this translation unit (see
@@ -80,8 +74,6 @@
return callLocal(data, reply, &IDisplayEventConnection::setVsyncRate);
case Tag::REQUEST_NEXT_VSYNC:
return callLocalAsync(data, reply, &IDisplayEventConnection::requestNextVsync);
- case Tag::REQUEST_LATEST_CONFIG:
- return callLocalAsync(data, reply, &IDisplayEventConnection::requestLatestConfig);
}
}
diff --git a/libs/gui/Surface.cpp b/libs/gui/Surface.cpp
index cf269b3..e1a17db 100644
--- a/libs/gui/Surface.cpp
+++ b/libs/gui/Surface.cpp
@@ -732,6 +732,8 @@
mSharedBufferHasBeenQueued = false;
}
+ mDequeuedSlots.insert(buf);
+
return OK;
}
@@ -760,6 +762,8 @@
mSharedBufferHasBeenQueued = true;
}
+ mDequeuedSlots.erase(i);
+
return OK;
}
@@ -895,6 +899,8 @@
ALOGE("queueBuffer: error queuing buffer to SurfaceTexture, %d", err);
}
+ mDequeuedSlots.erase(i);
+
if (mEnableFrameTimestamps) {
mFrameEventHistory->applyDelta(output.frameTimestamps);
// Update timestamps with the local acquire fence.
@@ -1660,6 +1666,7 @@
mRemovedBuffers.push_back(mSlots[attachedSlot].buffer);
}
mSlots[attachedSlot].buffer = graphicBuffer;
+ mDequeuedSlots.insert(attachedSlot);
return NO_ERROR;
}
@@ -1926,6 +1933,10 @@
}
void Surface::freeAllBuffers() {
+ if (!mDequeuedSlots.empty()) {
+ ALOGE("%s: %zu buffers were freed while being dequeued!",
+ __FUNCTION__, mDequeuedSlots.size());
+ }
for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
mSlots[i].buffer = nullptr;
}
@@ -1947,6 +1958,10 @@
ALOGW("%s: Discarded slot %d doesn't contain buffer!", __FUNCTION__, i);
continue;
}
+ // Don't flush currently dequeued buffers
+ if (mDequeuedSlots.count(i) > 0) {
+ continue;
+ }
outBuffers->push_back(mSlots[i].buffer);
mSlots[i].buffer = nullptr;
}
diff --git a/libs/gui/include/gui/DisplayEventDispatcher.h b/libs/gui/include/gui/DisplayEventDispatcher.h
index f210c34..eb5b004 100644
--- a/libs/gui/include/gui/DisplayEventDispatcher.h
+++ b/libs/gui/include/gui/DisplayEventDispatcher.h
@@ -31,7 +31,7 @@
status_t initialize();
void dispose();
status_t scheduleVsync();
- void requestLatestConfig();
+ void injectEvent(const DisplayEventReceiver::Event& event);
int getFd() const;
virtual int handleEvent(int receiveFd, int events, void* data);
@@ -48,6 +48,9 @@
bool connected) = 0;
virtual void dispatchConfigChanged(nsecs_t timestamp, PhysicalDisplayId displayId,
int32_t configId, nsecs_t vsyncPeriod) = 0;
+ // AChoreographer-specific hook for processing null-events so that looper
+ // can be properly poked.
+ virtual void dispatchNullEvent(nsecs_t timestamp, PhysicalDisplayId displayId) = 0;
bool processPendingEvents(nsecs_t* outTimestamp, PhysicalDisplayId* outDisplayId,
uint32_t* outCount);
diff --git a/libs/gui/include/gui/DisplayEventReceiver.h b/libs/gui/include/gui/DisplayEventReceiver.h
index 8d49184..0e10d1a 100644
--- a/libs/gui/include/gui/DisplayEventReceiver.h
+++ b/libs/gui/include/gui/DisplayEventReceiver.h
@@ -53,6 +53,7 @@
DISPLAY_EVENT_VSYNC = fourcc('v', 's', 'y', 'n'),
DISPLAY_EVENT_HOTPLUG = fourcc('p', 'l', 'u', 'g'),
DISPLAY_EVENT_CONFIG_CHANGED = fourcc('c', 'o', 'n', 'f'),
+ DISPLAY_EVENT_NULL = fourcc('n', 'u', 'l', 'l'),
};
struct Event {
@@ -130,6 +131,7 @@
* sendEvents write events to the queue and returns how many events were
* written.
*/
+ ssize_t sendEvents(Event const* events, size_t count);
static ssize_t sendEvents(gui::BitTube* dataChannel, Event const* events, size_t count);
/*
@@ -146,12 +148,6 @@
*/
status_t requestNextVsync();
- /*
- * requestLatestConfig() force-requests the current config for the primary
- * display.
- */
- status_t requestLatestConfig();
-
private:
sp<IDisplayEventConnection> mEventConnection;
std::unique_ptr<gui::BitTube> mDataChannel;
diff --git a/libs/gui/include/gui/IDisplayEventConnection.h b/libs/gui/include/gui/IDisplayEventConnection.h
index 674aafd..cff22a3 100644
--- a/libs/gui/include/gui/IDisplayEventConnection.h
+++ b/libs/gui/include/gui/IDisplayEventConnection.h
@@ -51,11 +51,6 @@
* requestNextVsync() schedules the next vsync event. It has no effect if the vsync rate is > 0.
*/
virtual void requestNextVsync() = 0; // Asynchronous
-
- /*
- * requestLatestConfig() requests the config for the primary display.
- */
- virtual void requestLatestConfig() = 0; // Asynchronous
};
class BnDisplayEventConnection : public SafeBnInterface<IDisplayEventConnection> {
diff --git a/libs/gui/include/gui/Surface.h b/libs/gui/include/gui/Surface.h
index 49c83da..55b4101 100644
--- a/libs/gui/include/gui/Surface.h
+++ b/libs/gui/include/gui/Surface.h
@@ -30,6 +30,7 @@
#include <utils/RefBase.h>
#include <shared_mutex>
+#include <unordered_set>
namespace android {
@@ -543,8 +544,15 @@
int mMaxBufferCount;
sp<IProducerListener> mListenerProxy;
+
+ // Get and flush the buffers of given slots, if the buffer in the slot
+ // is currently dequeued then it won't be flushed and won't be returned
+ // in outBuffers.
status_t getAndFlushBuffersFromSlots(const std::vector<int32_t>& slots,
std::vector<sp<GraphicBuffer>>* outBuffers);
+
+ // Buffers that are successfully dequeued/attached and handed to clients
+ std::unordered_set<int> mDequeuedSlots;
};
} // namespace android
diff --git a/libs/gui/include/private/gui/BitTube.h b/libs/gui/include/private/gui/BitTube.h
index 13c0162..8048518 100644
--- a/libs/gui/include/private/gui/BitTube.h
+++ b/libs/gui/include/private/gui/BitTube.h
@@ -58,6 +58,9 @@
// resets this BitTube's receive file descriptor to receiveFd
void setReceiveFd(base::unique_fd&& receiveFd);
+ // resets this BitTube's send file descriptor to sendFd
+ void setSendFd(base::unique_fd&& sendFd);
+
// send objects (sized blobs). All objects are guaranteed to be written or the call fails.
template <typename T>
static ssize_t sendObjects(BitTube* tube, T const* events, size_t count) {
@@ -85,7 +88,7 @@
// the message, excess data is silently discarded.
ssize_t read(void* vaddr, size_t size);
- base::unique_fd mSendFd;
+ mutable base::unique_fd mSendFd;
mutable base::unique_fd mReceiveFd;
static ssize_t sendObjects(BitTube* tube, void const* events, size_t count, size_t objSize);
diff --git a/libs/nativedisplay/AChoreographer.cpp b/libs/nativedisplay/AChoreographer.cpp
index e458b2e..f6a95ce 100644
--- a/libs/nativedisplay/AChoreographer.cpp
+++ b/libs/nativedisplay/AChoreographer.cpp
@@ -136,6 +136,7 @@
void dispatchHotplug(nsecs_t timestamp, PhysicalDisplayId displayId, bool connected) override;
void dispatchConfigChanged(nsecs_t timestamp, PhysicalDisplayId displayId, int32_t configId,
nsecs_t vsyncPeriod) override;
+ void dispatchNullEvent(nsecs_t, PhysicalDisplayId) override;
void scheduleCallbacks();
@@ -170,7 +171,7 @@
Choreographer::Choreographer(const sp<Looper>& looper)
: DisplayEventDispatcher(looper, ISurfaceComposer::VsyncSource::eVsyncSourceApp,
- ISurfaceComposer::ConfigChanged::eConfigChangedDispatch),
+ ISurfaceComposer::ConfigChanged::eConfigChangedSuppress),
mLooper(looper),
mThreadId(std::this_thread::get_id()) {
std::lock_guard<std::mutex> _l(gChoreographers.lock);
@@ -294,8 +295,14 @@
} else {
// If the looper thread is detached from Choreographer, then refresh rate
// changes will be handled in AChoreographer_handlePendingEvents, so we
- // need to redispatch a config from SF
- requestLatestConfig();
+ // need to wake up the looper thread by writing to the write-end of the
+ // socket the looper is listening on.
+ // Fortunately, these events are small so sending packets across the
+ // socket should be atomic across processes.
+ DisplayEventReceiver::Event event;
+ event.header = DisplayEventReceiver::Event::Header{DisplayEventReceiver::DISPLAY_EVENT_NULL,
+ PhysicalDisplayId(0), systemTime()};
+ injectEvent(event);
}
}
@@ -374,28 +381,15 @@
// displays. When multi-display choreographer is properly supported, then
// PhysicalDisplayId should no longer be ignored.
void Choreographer::dispatchConfigChanged(nsecs_t, PhysicalDisplayId displayId, int32_t configId,
- nsecs_t vsyncPeriod) {
+ nsecs_t) {
ALOGV("choreographer %p ~ received config change event "
"(displayId=%" ANDROID_PHYSICAL_DISPLAY_ID_FORMAT ", configId=%d).",
this, displayId, configId);
+}
- const nsecs_t lastPeriod = mLatestVsyncPeriod;
- std::vector<RefreshRateCallback> callbacks{};
- {
- std::lock_guard<std::mutex> _l{mLock};
- for (auto& cb : mRefreshRateCallbacks) {
- callbacks.push_back(cb);
- cb.firstCallbackFired = true;
- }
- }
-
- for (auto& cb : callbacks) {
- if (!cb.firstCallbackFired || (vsyncPeriod > 0 && vsyncPeriod != lastPeriod)) {
- cb.callback(vsyncPeriod, cb.data);
- }
- }
-
- mLatestVsyncPeriod = vsyncPeriod;
+void Choreographer::dispatchNullEvent(nsecs_t, PhysicalDisplayId) {
+ ALOGV("choreographer %p ~ received null event.", this);
+ handleRefreshRateUpdates();
}
void Choreographer::handleMessage(const Message& message) {
diff --git a/libs/renderengine/gl/GLESRenderEngine.cpp b/libs/renderengine/gl/GLESRenderEngine.cpp
index 92e7e71..0285c2f 100644
--- a/libs/renderengine/gl/GLESRenderEngine.cpp
+++ b/libs/renderengine/gl/GLESRenderEngine.cpp
@@ -409,6 +409,23 @@
mImageManager = std::make_unique<ImageManager>(this);
mImageManager->initThread();
mDrawingBuffer = createFramebuffer();
+ sp<GraphicBuffer> buf =
+ new GraphicBuffer(1, 1, PIXEL_FORMAT_RGBA_8888, 1,
+ GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE, "placeholder");
+
+ const status_t err = buf->initCheck();
+ if (err != OK) {
+ ALOGE("Error allocating placeholder buffer: %d", err);
+ return;
+ }
+ mPlaceholderBuffer = buf.get();
+ EGLint attributes[] = {
+ EGL_NONE,
+ };
+ mPlaceholderImage = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ mPlaceholderBuffer, attributes);
+ ALOGE_IF(mPlaceholderImage == EGL_NO_IMAGE_KHR, "Failed to create placeholder image: %#x",
+ eglGetError());
}
GLESRenderEngine::~GLESRenderEngine() {
@@ -423,6 +440,7 @@
eglDestroyImageKHR(mEGLDisplay, expired);
DEBUG_EGL_IMAGE_TRACKER_DESTROY();
}
+ eglDestroyImageKHR(mEGLDisplay, mPlaceholderImage);
mImageCache.clear();
eglMakeCurrent(mEGLDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
eglTerminate(mEGLDisplay);
@@ -589,6 +607,9 @@
}
void GLESRenderEngine::deleteTextures(size_t count, uint32_t const* names) {
+ for (int i = 0; i < count; ++i) {
+ mTextureView.erase(names[i]);
+ }
glDeleteTextures(count, names);
}
@@ -646,6 +667,7 @@
}
bindExternalTextureImage(texName, *cachedImage->second);
+ mTextureView.insert_or_assign(texName, buffer->getId());
}
// Wait for the new buffer to be ready.
@@ -887,7 +909,7 @@
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
-bool GLESRenderEngine::cleanupPostRender() {
+bool GLESRenderEngine::cleanupPostRender(CleanupMode mode) {
ATRACE_CALL();
if (mPriorResourcesCleaned ||
@@ -896,6 +918,30 @@
return false;
}
+ // This is a bit of a band-aid fix for FrameCaptureProcessor, as we should
+ // not need to keep memory around if we don't need to do so.
+ if (mode == CleanupMode::CLEAN_ALL) {
+ // TODO: SurfaceFlinger memory utilization may benefit from resetting
+ // texture bindings as well. Assess if it does and there's no performance regression
+ // when rebinding the same image data to the same texture, and if so then its mode
+ // behavior can be tweaked.
+ if (mPlaceholderImage != EGL_NO_IMAGE_KHR) {
+ for (auto [textureName, bufferId] : mTextureView) {
+ if (bufferId && mPlaceholderImage != EGL_NO_IMAGE_KHR) {
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureName);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES,
+ static_cast<GLeglImageOES>(mPlaceholderImage));
+ mTextureView[textureName] = std::nullopt;
+ checkErrors();
+ }
+ }
+ }
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ mImageCache.clear();
+ }
+ }
+
// Bind the texture to dummy data so that backing image data can be freed.
GLFramebuffer* glFramebuffer = static_cast<GLFramebuffer*>(getFramebufferForDrawing());
glFramebuffer->allocateBuffers(1, 1, mPlaceholderDrawBuffer);
@@ -1616,6 +1662,16 @@
return cachedImage != mImageCache.end();
}
+bool GLESRenderEngine::isTextureNameKnownForTesting(uint32_t texName) {
+ const auto& entry = mTextureView.find(texName);
+ return entry != mTextureView.end();
+}
+
+std::optional<uint64_t> GLESRenderEngine::getBufferIdForTextureNameForTesting(uint32_t texName) {
+ const auto& entry = mTextureView.find(texName);
+ return entry != mTextureView.end() ? entry->second : std::nullopt;
+}
+
bool GLESRenderEngine::isFramebufferImageCachedForTesting(uint64_t bufferId) {
std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
return std::any_of(mFramebufferImageCache.cbegin(), mFramebufferImageCache.cend(),
diff --git a/libs/renderengine/gl/GLESRenderEngine.h b/libs/renderengine/gl/GLESRenderEngine.h
index 42b8537..d5254a8 100644
--- a/libs/renderengine/gl/GLESRenderEngine.h
+++ b/libs/renderengine/gl/GLESRenderEngine.h
@@ -75,7 +75,7 @@
const std::vector<const LayerSettings*>& layers,
ANativeWindowBuffer* buffer, const bool useFramebufferCache,
base::unique_fd&& bufferFence, base::unique_fd* drawFence) override;
- bool cleanupPostRender() override;
+ bool cleanupPostRender(CleanupMode mode) override;
EGLDisplay getEGLDisplay() const { return mEGLDisplay; }
// Creates an output image for rendering to
@@ -86,6 +86,12 @@
// Test-only methods
// Returns true iff mImageCache contains an image keyed by bufferId
bool isImageCachedForTesting(uint64_t bufferId) EXCLUDES(mRenderingMutex);
+ // Returns true iff texName was previously generated by RenderEngine and was
+ // not destroyed.
+ bool isTextureNameKnownForTesting(uint32_t texName);
+ // Returns the buffer ID of the content bound to texName, or nullopt if no
+ // such mapping exists.
+ std::optional<uint64_t> getBufferIdForTextureNameForTesting(uint32_t texName);
// Returns true iff mFramebufferImageCache contains an image keyed by bufferId
bool isFramebufferImageCachedForTesting(uint64_t bufferId)
EXCLUDES(mFramebufferImageCacheMutex);
@@ -224,6 +230,8 @@
// Cache of GL images that we'll store per GraphicBuffer ID
std::unordered_map<uint64_t, std::unique_ptr<Image>> mImageCache GUARDED_BY(mRenderingMutex);
+ std::unordered_map<uint32_t, std::optional<uint64_t>> mTextureView;
+
// Mutex guarding rendering operations, so that:
// 1. GL operations aren't interleaved, and
// 2. Internal state related to rendering that is potentially modified by
@@ -237,6 +245,11 @@
// ensure that we align on a word. Allocating 16 bytes will provide a
// guarantee that we don't clobber memory.
uint32_t mPlaceholderDrawBuffer[4];
+ // Placeholder buffer and image, similar to mPlaceholderDrawBuffer, but
+ // instead these are intended for cleaning up texture memory with the
+ // GL_TEXTURE_EXTERNAL_OES target.
+ ANativeWindowBuffer* mPlaceholderBuffer = nullptr;
+ EGLImage mPlaceholderImage = EGL_NO_IMAGE_KHR;
sp<Fence> mLastDrawFence;
// Store a separate boolean checking if prior resources were cleaned up, as
// devices that don't support native sync fences can't rely on a last draw
diff --git a/libs/renderengine/include/renderengine/RenderEngine.h b/libs/renderengine/include/renderengine/RenderEngine.h
index e06e128..74bc44b 100644
--- a/libs/renderengine/include/renderengine/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/RenderEngine.h
@@ -111,14 +111,25 @@
// Returns NO_ERROR when binds successfully, NO_MEMORY when there's no memory for allocation.
virtual status_t bindFrameBuffer(Framebuffer* framebuffer) = 0;
virtual void unbindFrameBuffer(Framebuffer* framebuffer) = 0;
+
+ enum class CleanupMode {
+ CLEAN_OUTPUT_RESOURCES,
+ CLEAN_ALL,
+ };
// Clean-up method that should be called on the main thread after the
// drawFence returned by drawLayers fires. This method will free up
// resources used by the most recently drawn frame. If the frame is still
// being drawn, then this call is silently ignored.
//
+ // If mode is CLEAN_OUTPUT_RESOURCES, then only resources related to the
+ // output framebuffer are cleaned up, including the sibling texture.
+ //
+ // If mode is CLEAN_ALL, then we also cleanup resources related to any input
+ // buffers.
+ //
// Returns true if resources were cleaned up, and false if we didn't need to
// do any work.
- virtual bool cleanupPostRender() = 0;
+ virtual bool cleanupPostRender(CleanupMode mode = CleanupMode::CLEAN_OUTPUT_RESOURCES) = 0;
// queries
virtual size_t getMaxTextureSize() const = 0;
diff --git a/libs/renderengine/include/renderengine/mock/RenderEngine.h b/libs/renderengine/include/renderengine/mock/RenderEngine.h
index df0f17a..101cbb7 100644
--- a/libs/renderengine/include/renderengine/mock/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/mock/RenderEngine.h
@@ -56,7 +56,7 @@
MOCK_CONST_METHOD0(isProtected, bool());
MOCK_CONST_METHOD0(supportsProtectedContent, bool());
MOCK_METHOD1(useProtectedContext, bool(bool));
- MOCK_METHOD0(cleanupPostRender, bool());
+ MOCK_METHOD1(cleanupPostRender, bool(CleanupMode mode));
MOCK_METHOD6(drawLayers,
status_t(const DisplaySettings&, const std::vector<const LayerSettings*>&,
ANativeWindowBuffer*, const bool, base::unique_fd&&, base::unique_fd*));
diff --git a/libs/renderengine/tests/RenderEngineTest.cpp b/libs/renderengine/tests/RenderEngineTest.cpp
index 16a8a0d..f577eb3 100644
--- a/libs/renderengine/tests/RenderEngineTest.cpp
+++ b/libs/renderengine/tests/RenderEngineTest.cpp
@@ -81,6 +81,7 @@
}
for (uint32_t texName : mTexNames) {
sRE->deleteTextures(1, &texName);
+ EXPECT_FALSE(sRE->isTextureNameKnownForTesting(texName));
}
}
@@ -1424,10 +1425,44 @@
if (fd >= 0) {
sync_wait(fd, -1);
}
-
// Only cleanup the first time.
- EXPECT_TRUE(sRE->cleanupPostRender());
- EXPECT_FALSE(sRE->cleanupPostRender());
+ EXPECT_TRUE(sRE->cleanupPostRender(
+ renderengine::RenderEngine::CleanupMode::CLEAN_OUTPUT_RESOURCES));
+ EXPECT_FALSE(sRE->cleanupPostRender(
+ renderengine::RenderEngine::CleanupMode::CLEAN_OUTPUT_RESOURCES));
+}
+
+TEST_F(RenderEngineTest, cleanupPostRender_whenCleaningAll_replacesTextureMemory) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ base::unique_fd fence;
+ sRE->drawLayers(settings, layers, mBuffer->getNativeBuffer(), true, base::unique_fd(), &fence);
+
+ const int fd = fence.get();
+ if (fd >= 0) {
+ sync_wait(fd, -1);
+ }
+
+ uint64_t bufferId = layer.source.buffer.buffer->getId();
+ uint32_t texName = layer.source.buffer.textureName;
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(bufferId, sRE->getBufferIdForTextureNameForTesting(texName));
+
+ EXPECT_TRUE(sRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL));
+
+ // Now check that our view of memory is good.
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(std::nullopt, sRE->getBufferIdForTextureNameForTesting(bufferId));
+ EXPECT_TRUE(sRE->isTextureNameKnownForTesting(texName));
}
} // namespace android
diff --git a/services/inputflinger/reader/InputDevice.cpp b/services/inputflinger/reader/InputDevice.cpp
index 4b19e5e..3347ba6 100644
--- a/services/inputflinger/reader/InputDevice.cpp
+++ b/services/inputflinger/reader/InputDevice.cpp
@@ -84,12 +84,13 @@
bumpGeneration();
}
-void InputDevice::dump(std::string& dump) {
+void InputDevice::dump(std::string& dump, const std::string& eventHubDevStr) {
InputDeviceInfo deviceInfo;
getDeviceInfo(&deviceInfo);
dump += StringPrintf(INDENT "Device %d: %s\n", deviceInfo.getId(),
deviceInfo.getDisplayName().c_str());
+ dump += StringPrintf(INDENT "%s", eventHubDevStr.c_str());
dump += StringPrintf(INDENT2 "Generation: %d\n", mGeneration);
dump += StringPrintf(INDENT2 "IsExternal: %s\n", toString(mIsExternal));
dump += StringPrintf(INDENT2 "AssociatedDisplayPort: ");
@@ -101,6 +102,7 @@
dump += StringPrintf(INDENT2 "HasMic: %s\n", toString(mHasMic));
dump += StringPrintf(INDENT2 "Sources: 0x%08x\n", deviceInfo.getSources());
dump += StringPrintf(INDENT2 "KeyboardType: %d\n", deviceInfo.getKeyboardType());
+ dump += StringPrintf(INDENT2 "ControllerNum: %d\n", deviceInfo.getControllerNumber());
const std::vector<InputDeviceInfo::MotionRange>& ranges = deviceInfo.getMotionRanges();
if (!ranges.empty()) {
@@ -200,6 +202,8 @@
// insert the context into the devices set
mDevices.insert({eventHubId, std::make_pair(std::move(contextPtr), std::move(mappers))});
+ // Must change generation to flag this device as changed
+ bumpGeneration();
}
void InputDevice::removeEventHubDevice(int32_t eventHubId) {
diff --git a/services/inputflinger/reader/InputReader.cpp b/services/inputflinger/reader/InputReader.cpp
index 657a134..fc063f9 100644
--- a/services/inputflinger/reader/InputReader.cpp
+++ b/services/inputflinger/reader/InputReader.cpp
@@ -206,6 +206,14 @@
}
mDevices.emplace(eventHubId, device);
+ // Add device to device to EventHub ids map.
+ const auto mapIt = mDeviceToEventHubIdsMap.find(device);
+ if (mapIt == mDeviceToEventHubIdsMap.end()) {
+ std::vector<int32_t> ids = {eventHubId};
+ mDeviceToEventHubIdsMap.emplace(device, ids);
+ } else {
+ mapIt->second.push_back(eventHubId);
+ }
bumpGenerationLocked();
if (device->getClasses() & INPUT_DEVICE_CLASS_EXTERNAL_STYLUS) {
@@ -222,6 +230,17 @@
std::shared_ptr<InputDevice> device = std::move(deviceIt->second);
mDevices.erase(deviceIt);
+ // Erase device from device to EventHub ids map.
+ auto mapIt = mDeviceToEventHubIdsMap.find(device);
+ if (mapIt != mDeviceToEventHubIdsMap.end()) {
+ std::vector<int32_t>& eventHubIds = mapIt->second;
+ eventHubIds.erase(std::remove_if(eventHubIds.begin(), eventHubIds.end(),
+ [eventHubId](int32_t eId) { return eId == eventHubId; }),
+ eventHubIds.end());
+ if (eventHubIds.size() == 0) {
+ mDeviceToEventHubIdsMap.erase(mapIt);
+ }
+ }
bumpGenerationLocked();
if (device->isIgnored()) {
@@ -449,8 +468,7 @@
void InputReader::getInputDevicesLocked(std::vector<InputDeviceInfo>& outInputDevices) {
outInputDevices.clear();
- for (auto& devicePair : mDevices) {
- std::shared_ptr<InputDevice>& device = devicePair.second;
+ for (const auto& [device, eventHubIds] : mDeviceToEventHubIdsMap) {
if (!device->isIgnored()) {
InputDeviceInfo info;
device->getDeviceInfo(&info);
@@ -621,11 +639,17 @@
mEventHub->dump(dump);
dump += "\n";
- dump += "Input Reader State:\n";
+ dump += StringPrintf("Input Reader State (Nums of device: %zu):\n",
+ mDeviceToEventHubIdsMap.size());
- for (const auto& devicePair : mDevices) {
- const std::shared_ptr<InputDevice>& device = devicePair.second;
- device->dump(dump);
+ for (const auto& devicePair : mDeviceToEventHubIdsMap) {
+ const std::shared_ptr<InputDevice>& device = devicePair.first;
+ std::string eventHubDevStr = INDENT "EventHub Devices: [ ";
+ for (const auto& eId : devicePair.second) {
+ eventHubDevStr += StringPrintf("%d ", eId);
+ }
+ eventHubDevStr += "] \n";
+ device->dump(dump, eventHubDevStr);
}
dump += INDENT "Configuration:\n";
diff --git a/services/inputflinger/reader/include/InputDevice.h b/services/inputflinger/reader/include/InputDevice.h
index 71313fc..a08062a 100644
--- a/services/inputflinger/reader/include/InputDevice.h
+++ b/services/inputflinger/reader/include/InputDevice.h
@@ -66,7 +66,7 @@
bool isEnabled();
void setEnabled(bool enabled, nsecs_t when);
- void dump(std::string& dump);
+ void dump(std::string& dump, const std::string& eventHubDevStr);
void addEventHubDevice(int32_t eventHubId, bool populateMappers = true);
void removeEventHubDevice(int32_t eventHubId);
void configure(nsecs_t when, const InputReaderConfiguration* config, uint32_t changes);
diff --git a/services/inputflinger/reader/include/InputReader.h b/services/inputflinger/reader/include/InputReader.h
index 693ec30..d46ec1a 100644
--- a/services/inputflinger/reader/include/InputReader.h
+++ b/services/inputflinger/reader/include/InputReader.h
@@ -141,6 +141,11 @@
// to lookup the input device instance from the EventHub device id.
std::unordered_map<int32_t /*eventHubId*/, std::shared_ptr<InputDevice>> mDevices;
+ // An input device contains one or more eventHubId, this map provides a way to lookup the
+ // EventHubIds contained in the input device from the input device instance.
+ std::unordered_map<std::shared_ptr<InputDevice>, std::vector<int32_t> /*eventHubId*/>
+ mDeviceToEventHubIdsMap;
+
// low-level input event decoding and device management
void processEventsLocked(const RawEvent* rawEvents, size_t count);
diff --git a/services/inputflinger/reader/include/TouchVideoDevice.h b/services/inputflinger/reader/include/TouchVideoDevice.h
index 5a32443..7de9b830 100644
--- a/services/inputflinger/reader/include/TouchVideoDevice.h
+++ b/services/inputflinger/reader/include/TouchVideoDevice.h
@@ -102,7 +102,7 @@
* How many buffers to keep for the internal queue. When the internal buffer
* exceeds this capacity, oldest frames will be dropped.
*/
- static constexpr size_t MAX_QUEUE_SIZE = 10;
+ static constexpr size_t MAX_QUEUE_SIZE = 20;
std::vector<TouchVideoFrame> mFrames;
/**
diff --git a/services/sensorservice/SensorDevice.cpp b/services/sensorservice/SensorDevice.cpp
index 8a282e2..e355594 100644
--- a/services/sensorservice/SensorDevice.cpp
+++ b/services/sensorservice/SensorDevice.cpp
@@ -153,12 +153,18 @@
SensorDeviceUtils::defaultResolutionForType(sensor.type);
}
- double promotedResolution = sensor.resolution;
- double promotedMaxRange = sensor.maxRange;
- if (fmod(promotedMaxRange, promotedResolution) != 0) {
- ALOGW("%s's max range %f is not a multiple of the resolution %f",
- sensor.name, sensor.maxRange, sensor.resolution);
- SensorDeviceUtils::quantizeValue(&sensor.maxRange, promotedResolution);
+ // Some sensors don't have a default resolution and will be left at 0.
+ // Don't crash in this case since CTS will verify that devices don't go to
+ // production with a resolution of 0.
+ if (sensor.resolution != 0) {
+ double promotedResolution = sensor.resolution;
+ double promotedMaxRange = sensor.maxRange;
+ if (fmod(promotedMaxRange, promotedResolution) != 0) {
+ ALOGW("%s's max range %f is not a multiple of the resolution %f",
+ sensor.name, sensor.maxRange, sensor.resolution);
+ SensorDeviceUtils::quantizeValue(
+ &sensor.maxRange, promotedResolution);
+ }
}
}
diff --git a/services/sensorservice/SensorEventConnection.cpp b/services/sensorservice/SensorEventConnection.cpp
index 6c86712..3cccaf9 100644
--- a/services/sensorservice/SensorEventConnection.cpp
+++ b/services/sensorservice/SensorEventConnection.cpp
@@ -29,6 +29,12 @@
#define UNUSED(x) (void)(x)
namespace android {
+namespace {
+
+// Used as the default value for the target SDK until it's obtained via getTargetSdkVersion.
+constexpr int kTargetSdkUnknown = 0;
+
+} // namespace
SensorService::SensorEventConnection::SensorEventConnection(
const sp<SensorService>& service, uid_t uid, String8 packageName, bool isDataInjectionMode,
@@ -36,9 +42,9 @@
: mService(service), mUid(uid), mWakeLockRefCount(0), mHasLooperCallbacks(false),
mDead(false), mDataInjectionMode(isDataInjectionMode), mEventCache(nullptr),
mCacheSize(0), mMaxCacheSize(0), mTimeOfLastEventDrop(0), mEventsDropped(0),
- mPackageName(packageName), mOpPackageName(opPackageName), mDestroyed(false) {
+ mPackageName(packageName), mOpPackageName(opPackageName), mTargetSdk(kTargetSdkUnknown),
+ mDestroyed(false) {
mChannel = new BitTube(mService->mSocketBufferSize);
- mTargetSdk = SensorService::getTargetSdkVersion(opPackageName);
#if DEBUG_CONNECTIONS
mEventsReceived = mEventsSentFromCache = mEventsSent = 0;
mTotalAcksNeeded = mTotalAcksReceived = 0;
@@ -439,6 +445,14 @@
bool success = true;
const auto iter = mHandleToAppOp.find(event.sensor);
if (iter != mHandleToAppOp.end()) {
+ if (mTargetSdk == kTargetSdkUnknown) {
+ // getTargetSdkVersion returns -1 if it fails so this operation should only be run once
+ // per connection and then cached. Perform this here as opposed to in the constructor to
+ // avoid log spam for NDK/VNDK clients that don't use sensors guarded with permissions
+ // and pass in invalid op package names.
+ mTargetSdk = SensorService::getTargetSdkVersion(mOpPackageName);
+ }
+
// Special handling for step count/detect backwards compatibility: if the app's target SDK
// is pre-Q, still permit delivering events to the app even if permission isn't granted
// (since this permission was only introduced in Q)
diff --git a/services/sensorservice/SensorService.cpp b/services/sensorservice/SensorService.cpp
index 60f9cd9..3ca34bb 100644
--- a/services/sensorservice/SensorService.cpp
+++ b/services/sensorservice/SensorService.cpp
@@ -79,6 +79,8 @@
bool SensorService::sHmacGlobalKeyIsValid = false;
std::map<String16, int> SensorService::sPackageTargetVersion;
Mutex SensorService::sPackageTargetVersionLock;
+String16 SensorService::sSensorInterfaceDescriptorPrefix =
+ String16("android.frameworks.sensorservice@");
AppOpsManager SensorService::sAppOpsManager;
#define SENSOR_SERVICE_DIR "/data/system/sensor_service"
@@ -1847,6 +1849,13 @@
}
int SensorService::getTargetSdkVersion(const String16& opPackageName) {
+ // Don't query the SDK version for the ISensorManager descriptor as it doesn't have one. This
+ // descriptor tends to be used for VNDK clients, but can technically be set by anyone so don't
+ // give it elevated privileges.
+ if (opPackageName.startsWith(sSensorInterfaceDescriptorPrefix)) {
+ return -1;
+ }
+
Mutex::Autolock packageLock(sPackageTargetVersionLock);
int targetSdkVersion = -1;
auto entry = sPackageTargetVersion.find(opPackageName);
diff --git a/services/sensorservice/SensorService.h b/services/sensorservice/SensorService.h
index 3bb8421..052cbfe 100644
--- a/services/sensorservice/SensorService.h
+++ b/services/sensorservice/SensorService.h
@@ -424,6 +424,7 @@
static AppOpsManager sAppOpsManager;
static std::map<String16, int> sPackageTargetVersion;
static Mutex sPackageTargetVersionLock;
+ static String16 sSensorInterfaceDescriptorPrefix;
};
} // namespace android
diff --git a/services/stats/StatsHal.cpp b/services/stats/StatsHal.cpp
index 80c3b65..ae0a984 100644
--- a/services/stats/StatsHal.cpp
+++ b/services/stats/StatsHal.cpp
@@ -58,7 +58,7 @@
std::vector<int32_t> buckets = chargeCycles.cycleBucket;
int initialSize = buckets.size();
for (int i = 0; i < 10 - initialSize; i++) {
- buckets.push_back(-1); // Push -1 for buckets that do not exist.
+ buckets.push_back(0); // Push 0 for buckets that do not exist.
}
android::util::stats_write(android::util::CHARGE_CYCLES_REPORTED, buckets[0], buckets[1],
buckets[2], buckets[3], buckets[4], buckets[5], buckets[6], buckets[7], buckets[8],
diff --git a/services/surfaceflinger/BufferQueueLayer.cpp b/services/surfaceflinger/BufferQueueLayer.cpp
index 07be791..6e4235e 100644
--- a/services/surfaceflinger/BufferQueueLayer.cpp
+++ b/services/surfaceflinger/BufferQueueLayer.cpp
@@ -324,9 +324,6 @@
}
uint64_t bufferID = mQueueItems[0].mGraphicBuffer->getId();
- mFlinger->mFrameTracer->traceFence(layerId, bufferID, currentFrameNumber,
- mQueueItems[0].mFenceTime,
- FrameTracer::FrameEvent::ACQUIRE_FENCE);
mFlinger->mTimeStats->setLatchTime(layerId, currentFrameNumber, latchTime);
mFlinger->mFrameTracer->traceTimestamp(layerId, bufferID, currentFrameNumber, latchTime,
FrameTracer::FrameEvent::LATCH);
@@ -393,8 +390,12 @@
void BufferQueueLayer::onFrameAvailable(const BufferItem& item) {
const int32_t layerId = getSequence();
- mFlinger->mFrameTracer->traceTimestamp(layerId, item.mGraphicBuffer->getId(), item.mFrameNumber,
- systemTime(), FrameTracer::FrameEvent::QUEUE);
+ const uint64_t bufferId = item.mGraphicBuffer->getId();
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, item.mFrameNumber, systemTime(),
+ FrameTracer::FrameEvent::QUEUE);
+ mFlinger->mFrameTracer->traceFence(layerId, bufferId, item.mFrameNumber,
+ std::make_shared<FenceTime>(item.mFence),
+ FrameTracer::FrameEvent::ACQUIRE_FENCE);
ATRACE_CALL();
// Add this buffer from our internal queue tracker
@@ -460,8 +461,12 @@
}
const int32_t layerId = getSequence();
- mFlinger->mFrameTracer->traceTimestamp(layerId, item.mGraphicBuffer->getId(), item.mFrameNumber,
- systemTime(), FrameTracer::FrameEvent::QUEUE);
+ const uint64_t bufferId = item.mGraphicBuffer->getId();
+ mFlinger->mFrameTracer->traceTimestamp(layerId, bufferId, item.mFrameNumber, systemTime(),
+ FrameTracer::FrameEvent::QUEUE);
+ mFlinger->mFrameTracer->traceFence(layerId, bufferId, item.mFrameNumber,
+ std::make_shared<FenceTime>(item.mFence),
+ FrameTracer::FrameEvent::ACQUIRE_FENCE);
mConsumer->onBufferAvailable(item);
}
diff --git a/services/surfaceflinger/CompositionEngine/src/Output.cpp b/services/surfaceflinger/CompositionEngine/src/Output.cpp
index e8f54f5..90ac781 100644
--- a/services/surfaceflinger/CompositionEngine/src/Output.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/Output.cpp
@@ -835,6 +835,8 @@
needsProtected == renderEngine.isProtected()) {
mRenderSurface->setProtected(needsProtected);
}
+ } else if (!outputState.isSecure && renderEngine.isProtected()) {
+ renderEngine.useProtectedContext(false);
}
base::unique_fd fd;
diff --git a/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp b/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp
index 09f37fb..29fd4b9 100644
--- a/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/DisplayTest.cpp
@@ -159,6 +159,7 @@
EXPECT_CALL(mCompositionEngine, getHwComposer()).WillRepeatedly(ReturnRef(mHwComposer));
EXPECT_CALL(mCompositionEngine, getRenderEngine()).WillRepeatedly(ReturnRef(mRenderEngine));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
}
DisplayCreationArgs getDisplayCreationArgsForPhysicalHWCDisplay() {
diff --git a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
index 59ed72e..e1989ae 100644
--- a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
@@ -2871,6 +2871,7 @@
mOutput.mState.usesClientComposition = false;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, setExpensiveRenderingExpected(false));
@@ -2883,6 +2884,7 @@
mOutput.mState.flipClientTarget = true;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillOnce(Return(mOutputBuffer));
EXPECT_CALL(mOutput, setExpensiveRenderingExpected(false));
@@ -2892,6 +2894,7 @@
TEST_F(OutputComposeSurfacesTest, doesMinimalWorkIfDequeueBufferFailsForClientComposition) {
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillOnce(Return(nullptr));
@@ -2904,6 +2907,7 @@
mOutput.mState.flipClientTarget = true;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillOnce(Return(nullptr));
@@ -2914,6 +2918,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -2936,6 +2941,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -2963,6 +2969,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1, r2}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -2991,6 +2998,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1, r2}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -3019,6 +3027,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{r1, r2}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -3050,6 +3059,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillOnce(Return(std::vector<LayerFE::LayerSettings>{r1, r2}))
.WillOnce(Return(std::vector<LayerFE::LayerSettings>{r1, r3}));
@@ -3072,6 +3082,7 @@
struct OutputComposeSurfacesTest_UsesExpectedDisplaySettings : public OutputComposeSurfacesTest {
OutputComposeSurfacesTest_UsesExpectedDisplaySettings() {
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, generateClientCompositionRequests(_, _, kDefaultOutputDataspace))
.WillRepeatedly(Return(std::vector<LayerFE::LayerSettings>{}));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
@@ -3219,6 +3230,8 @@
mOutput.mState.isSecure = false;
mLayer2.mLayerFEState.hasProtectedContent = true;
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(true));
+ EXPECT_CALL(mRenderEngine, isProtected).WillOnce(Return(true));
+ EXPECT_CALL(mRenderEngine, useProtectedContext(false)).WillOnce(Return(true));
mOutput.composeSurfaces(kDebugRegion, kDefaultRefreshArgs);
}
@@ -3311,6 +3324,7 @@
EXPECT_CALL(mOutput, getSkipColorTransform()).WillRepeatedly(Return(false));
EXPECT_CALL(*mDisplayColorProfile, hasWideColorGamut()).WillRepeatedly(Return(true));
EXPECT_CALL(mRenderEngine, supportsProtectedContent()).WillRepeatedly(Return(false));
+ EXPECT_CALL(mRenderEngine, isProtected()).WillRepeatedly(Return(false));
EXPECT_CALL(mOutput, appendRegionFlashRequests(RegionEq(kDebugRegion), _))
.WillRepeatedly(Return());
EXPECT_CALL(*mRenderSurface, dequeueBuffer(_)).WillRepeatedly(Return(mOutputBuffer));
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 4e3ef24..fc93e14 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -1463,6 +1463,13 @@
void Layer::deferTransactionUntil_legacy(const sp<Layer>& barrierLayer, uint64_t frameNumber) {
ATRACE_CALL();
+ if (mLayerDetached) {
+ // If the layer is detached, then we don't defer this transaction since we will not
+ // commit the pending state while the layer is detached. Adding sync points may cause
+ // the barrier layer to wait for the states to be committed before dequeuing a buffer.
+ return;
+ }
+
mCurrentState.barrierLayer_legacy = barrierLayer;
mCurrentState.frameNumber_legacy = frameNumber;
// We don't set eTransactionNeeded, because just receiving a deferral
@@ -2422,7 +2429,15 @@
xSurfaceInset = (xSurfaceInset >= 0) ? std::min(xSurfaceInset, layerBounds.getWidth() / 2) : 0;
ySurfaceInset = (ySurfaceInset >= 0) ? std::min(ySurfaceInset, layerBounds.getHeight() / 2) : 0;
- layerBounds.inset(xSurfaceInset, ySurfaceInset, xSurfaceInset, ySurfaceInset);
+ // inset while protecting from overflow TODO(b/161235021): What is going wrong
+ // in the overflow scenario?
+ {
+ int32_t tmp;
+ if (!__builtin_add_overflow(layerBounds.left, xSurfaceInset, &tmp)) layerBounds.left = tmp;
+ if (!__builtin_sub_overflow(layerBounds.right, xSurfaceInset, &tmp)) layerBounds.right = tmp;
+ if (!__builtin_add_overflow(layerBounds.top, ySurfaceInset, &tmp)) layerBounds.top = tmp;
+ if (!__builtin_sub_overflow(layerBounds.bottom, ySurfaceInset, &tmp)) layerBounds.bottom = tmp;
+ }
// Input coordinate should match the layer bounds.
info.frameLeft = layerBounds.left;
diff --git a/services/surfaceflinger/Scheduler/EventThread.cpp b/services/surfaceflinger/Scheduler/EventThread.cpp
index cee36a1..845bf50 100644
--- a/services/surfaceflinger/Scheduler/EventThread.cpp
+++ b/services/surfaceflinger/Scheduler/EventThread.cpp
@@ -139,6 +139,7 @@
status_t EventThreadConnection::stealReceiveChannel(gui::BitTube* outChannel) {
outChannel->setReceiveFd(mChannel.moveReceiveFd());
+ outChannel->setSendFd(base::unique_fd(dup(mChannel.getSendFd())));
return NO_ERROR;
}
@@ -152,11 +153,6 @@
mEventThread->requestNextVsync(this);
}
-void EventThreadConnection::requestLatestConfig() {
- ATRACE_NAME("requestLatestConfig");
- mEventThread->requestLatestConfig(this);
-}
-
status_t EventThreadConnection::postEvent(const DisplayEventReceiver::Event& event) {
ssize_t size = DisplayEventReceiver::sendEvents(&mChannel, &event, 1);
return size < 0 ? status_t(size) : status_t(NO_ERROR);
@@ -269,28 +265,6 @@
}
}
-void EventThread::requestLatestConfig(const sp<EventThreadConnection>& connection) {
- std::lock_guard<std::mutex> lock(mMutex);
- if (connection->mForcedConfigChangeDispatch) {
- return;
- }
- connection->mForcedConfigChangeDispatch = true;
- auto pendingConfigChange =
- std::find_if(std::begin(mPendingEvents), std::end(mPendingEvents),
- [&](const DisplayEventReceiver::Event& event) {
- return event.header.type ==
- DisplayEventReceiver::DISPLAY_EVENT_CONFIG_CHANGED;
- });
-
- // If we didn't find a pending config change event, then push out the
- // latest one we've ever seen.
- if (pendingConfigChange == std::end(mPendingEvents)) {
- mPendingEvents.push_back(mLastConfigChangeEvent);
- }
-
- mCondition.notify_all();
-}
-
void EventThread::onScreenReleased() {
std::lock_guard<std::mutex> lock(mMutex);
if (!mVSyncState || mVSyncState->synthetic) {
@@ -366,9 +340,6 @@
mInterceptVSyncsCallback(event->header.timestamp);
}
break;
- case DisplayEventReceiver::DISPLAY_EVENT_CONFIG_CHANGED:
- mLastConfigChangeEvent = *event;
- break;
}
}
@@ -381,10 +352,6 @@
vsyncRequested |= connection->vsyncRequest != VSyncRequest::None;
if (event && shouldConsumeEvent(*event, connection)) {
- if (event->header.type == DisplayEventReceiver::DISPLAY_EVENT_CONFIG_CHANGED &&
- connection->mForcedConfigChangeDispatch) {
- connection->mForcedConfigChangeDispatch = false;
- }
consumers.push_back(connection);
}
@@ -460,9 +427,7 @@
return true;
case DisplayEventReceiver::DISPLAY_EVENT_CONFIG_CHANGED: {
- const bool oneTimeDispatch = connection->mForcedConfigChangeDispatch;
- return oneTimeDispatch ||
- connection->mConfigChanged == ISurfaceComposer::eConfigChangedDispatch;
+ return connection->mConfigChanged == ISurfaceComposer::eConfigChangedDispatch;
}
case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
diff --git a/services/surfaceflinger/Scheduler/EventThread.h b/services/surfaceflinger/Scheduler/EventThread.h
index 64acbd7..49f624c 100644
--- a/services/surfaceflinger/Scheduler/EventThread.h
+++ b/services/surfaceflinger/Scheduler/EventThread.h
@@ -81,19 +81,13 @@
status_t stealReceiveChannel(gui::BitTube* outChannel) override;
status_t setVsyncRate(uint32_t rate) override;
void requestNextVsync() override; // asynchronous
- void requestLatestConfig() override; // asynchronous
// Called in response to requestNextVsync.
const ResyncCallback resyncCallback;
VSyncRequest vsyncRequest = VSyncRequest::None;
- ISurfaceComposer::ConfigChanged mConfigChanged =
+ const ISurfaceComposer::ConfigChanged mConfigChanged =
ISurfaceComposer::ConfigChanged::eConfigChangedSuppress;
- // Store whether we need to force dispatching a config change separately -
- // if mConfigChanged ever changes before the config change is dispatched
- // then we still need to propagate an initial config to the app if we
- // haven't already.
- bool mForcedConfigChangeDispatch = false;
private:
virtual void onFirstRef();
@@ -129,10 +123,6 @@
virtual void setVsyncRate(uint32_t rate, const sp<EventThreadConnection>& connection) = 0;
// Requests the next vsync. If resetIdleTimer is set to true, it resets the idle timer.
virtual void requestNextVsync(const sp<EventThreadConnection>& connection) = 0;
- // Dispatches the most recent configuration
- // Usage of this method assumes that only the primary internal display
- // supports multiple display configurations.
- virtual void requestLatestConfig(const sp<EventThreadConnection>& connection) = 0;
// Retrieves the number of event connections tracked by this EventThread.
virtual size_t getEventThreadConnectionCount() = 0;
@@ -153,7 +143,6 @@
status_t registerDisplayEventConnection(const sp<EventThreadConnection>& connection) override;
void setVsyncRate(uint32_t rate, const sp<EventThreadConnection>& connection) override;
void requestNextVsync(const sp<EventThreadConnection>& connection) override;
- void requestLatestConfig(const sp<EventThreadConnection>& connection) override;
// called before the screen is turned off from main thread
void onScreenReleased() override;
@@ -201,7 +190,6 @@
std::vector<wp<EventThreadConnection>> mDisplayEventConnections GUARDED_BY(mMutex);
std::deque<DisplayEventReceiver::Event> mPendingEvents GUARDED_BY(mMutex);
- DisplayEventReceiver::Event mLastConfigChangeEvent GUARDED_BY(mMutex);
// VSYNC state of connected display.
struct VSyncState {
diff --git a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
index a596bce..2a6fd05 100644
--- a/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncDispatchTimerQueue.cpp
@@ -243,7 +243,8 @@
std::vector<Invocation> invocations;
{
std::lock_guard<decltype(mMutex)> lk(mMutex);
- mLastTimerCallback = mTimeKeeper->now();
+ auto const now = mTimeKeeper->now();
+ mLastTimerCallback = now;
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
auto& callback = it->second;
auto const wakeupTime = callback->wakeupTime();
@@ -251,7 +252,8 @@
continue;
}
- if (*wakeupTime < mIntendedWakeupTime + mTimerSlack) {
+ auto const lagAllowance = std::max(now - mIntendedWakeupTime, static_cast<nsecs_t>(0));
+ if (*wakeupTime < mIntendedWakeupTime + mTimerSlack + lagAllowance) {
callback->executing();
invocations.emplace_back(
Invocation{callback, *callback->lastExecutedVsyncTarget(), *wakeupTime});
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index ab5773d..61f3fbb 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -255,21 +255,9 @@
}
}
-bool VSyncPredictor::needsMoreSamples(nsecs_t now) const {
- using namespace std::literals::chrono_literals;
+bool VSyncPredictor::needsMoreSamples() const {
std::lock_guard<std::mutex> lk(mMutex);
- bool needsMoreSamples = true;
- if (mTimestamps.size() >= kMinimumSamplesForPrediction) {
- nsecs_t constexpr aLongTime =
- std::chrono::duration_cast<std::chrono::nanoseconds>(500ms).count();
- if (!(mLastTimestampIndex < 0 || mTimestamps.empty())) {
- auto const lastTimestamp = mTimestamps[mLastTimestampIndex];
- needsMoreSamples = !((lastTimestamp + aLongTime) > now);
- }
- }
-
- ATRACE_INT("VSP-moreSamples", needsMoreSamples);
- return needsMoreSamples;
+ return mTimestamps.size() < kMinimumSamplesForPrediction;
}
void VSyncPredictor::resetModel() {
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.h b/services/surfaceflinger/Scheduler/VSyncPredictor.h
index 3ca878d..5f3c418 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.h
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.h
@@ -52,11 +52,10 @@
*/
void setPeriod(nsecs_t period) final;
- /* Query if the model is in need of more samples to make a prediction at timePoint.
- * \param [in] timePoint The timePoint to inquire of.
+ /* Query if the model is in need of more samples to make a prediction.
* \return True, if model would benefit from more samples, False if not.
*/
- bool needsMoreSamples(nsecs_t timePoint) const;
+ bool needsMoreSamples() const final;
std::tuple<nsecs_t /* slope */, nsecs_t /* intercept */> getVSyncPredictionModel() const;
diff --git a/services/surfaceflinger/Scheduler/VSyncReactor.cpp b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
index c743de0..efa8bab 100644
--- a/services/surfaceflinger/Scheduler/VSyncReactor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncReactor.cpp
@@ -233,6 +233,7 @@
}
void VSyncReactor::startPeriodTransition(nsecs_t newPeriod) {
+ ATRACE_CALL();
mPeriodConfirmationInProgress = true;
mPeriodTransitioningTo = newPeriod;
mMoreSamplesNeeded = true;
@@ -240,8 +241,7 @@
}
void VSyncReactor::endPeriodTransition() {
- setIgnorePresentFencesInternal(false);
- mMoreSamplesNeeded = false;
+ ATRACE_CALL();
mPeriodTransitioningTo.reset();
mPeriodConfirmationInProgress = false;
mLastHwVsync.reset();
@@ -254,6 +254,8 @@
if (!mSupportKernelIdleTimer && period == getPeriod()) {
endPeriodTransition();
+ setIgnorePresentFencesInternal(false);
+ mMoreSamplesNeeded = false;
} else {
startPeriodTransition(period);
}
@@ -303,6 +305,7 @@
std::lock_guard<std::mutex> lk(mMutex);
if (periodConfirmed(timestamp, hwcVsyncPeriod)) {
+ ATRACE_NAME("VSR: period confirmed");
if (mPeriodTransitioningTo) {
mTracker->setPeriod(*mPeriodTransitioningTo);
for (auto& entry : mCallbacks) {
@@ -310,17 +313,29 @@
}
*periodFlushed = true;
}
+
+ if (mLastHwVsync) {
+ mTracker->addVsyncTimestamp(*mLastHwVsync);
+ }
+ mTracker->addVsyncTimestamp(timestamp);
+
endPeriodTransition();
+ mMoreSamplesNeeded = mTracker->needsMoreSamples();
} else if (mPeriodConfirmationInProgress) {
+ ATRACE_NAME("VSR: still confirming period");
mLastHwVsync = timestamp;
mMoreSamplesNeeded = true;
*periodFlushed = false;
} else {
- mMoreSamplesNeeded = false;
+ ATRACE_NAME("VSR: adding sample");
*periodFlushed = false;
+ mTracker->addVsyncTimestamp(timestamp);
+ mMoreSamplesNeeded = mTracker->needsMoreSamples();
}
- mTracker->addVsyncTimestamp(timestamp);
+ if (!mMoreSamplesNeeded) {
+ setIgnorePresentFencesInternal(false);
+ }
return mMoreSamplesNeeded;
}
diff --git a/services/surfaceflinger/Scheduler/VSyncTracker.h b/services/surfaceflinger/Scheduler/VSyncTracker.h
index 05a6fc3..107c540 100644
--- a/services/surfaceflinger/Scheduler/VSyncTracker.h
+++ b/services/surfaceflinger/Scheduler/VSyncTracker.h
@@ -66,6 +66,8 @@
/* Inform the tracker that the samples it has are not accurate for prediction. */
virtual void resetModel() = 0;
+ virtual bool needsMoreSamples() const = 0;
+
virtual void dump(std::string& result) const = 0;
protected:
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 4947210..56c4d26 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1567,7 +1567,7 @@
mEventQueue->refresh();
}
-nsecs_t SurfaceFlinger::getVsyncPeriod() const {
+nsecs_t SurfaceFlinger::getVsyncPeriodFromHWC() const {
const auto displayId = getInternalDisplayIdLocked();
if (!displayId || !getHwComposer().isConnected(*displayId)) {
return 0;
@@ -1774,7 +1774,7 @@
setPowerModeInternal(display, currentDisplayPowerMode);
// Reset the timing values to account for the period of the swapped in HWC
- const nsecs_t vsyncPeriod = getVsyncPeriod();
+ const nsecs_t vsyncPeriod = mRefreshRateConfigs->getCurrentRefreshRate().getVsyncPeriod();
mAnimFrameTracker.setDisplayRefreshPeriod(vsyncPeriod);
// The present fences returned from vr_hwc are not an accurate
@@ -2122,7 +2122,8 @@
mTimeStats->incrementCompositionStrategyChanges();
}
- mVSyncModulator->onRefreshed(mHadClientComposition);
+ // TODO: b/160583065 Enable skip validation when SF caches all client composition layers
+ mVSyncModulator->onRefreshed(mHadClientComposition || mReusedClientComposition);
mLayersWithQueuedFrames.clear();
if (mVisibleRegionsDirty) {
@@ -4201,8 +4202,7 @@
{});
setPowerModeInternal(display, hal::PowerMode::ON);
-
- const nsecs_t vsyncPeriod = getVsyncPeriod();
+ const nsecs_t vsyncPeriod = mRefreshRateConfigs->getCurrentRefreshRate().getVsyncPeriod();
mAnimFrameTracker.setDisplayRefreshPeriod(vsyncPeriod);
// Use phase of 0 since phase is not known.
@@ -4237,7 +4237,7 @@
if (mInterceptor->isEnabled()) {
mInterceptor->savePowerModeUpdate(display->getSequenceId(), static_cast<int32_t>(mode));
}
-
+ const auto vsyncPeriod = mRefreshRateConfigs->getCurrentRefreshRate().getVsyncPeriod();
if (currentMode == hal::PowerMode::OFF) {
if (SurfaceFlinger::setSchedFifo(true) != NO_ERROR) {
ALOGW("Couldn't set SCHED_FIFO on display on: %s\n", strerror(errno));
@@ -4246,7 +4246,7 @@
if (display->isPrimary() && mode != hal::PowerMode::DOZE_SUSPEND) {
getHwComposer().setVsyncEnabled(*displayId, mHWCVsyncPendingState);
mScheduler->onScreenAcquired(mAppConnectionHandle);
- mScheduler->resyncToHardwareVsync(true, getVsyncPeriod());
+ mScheduler->resyncToHardwareVsync(true, vsyncPeriod);
}
mVisibleRegionsDirty = true;
@@ -4273,7 +4273,7 @@
getHwComposer().setPowerMode(*displayId, mode);
if (display->isPrimary() && currentMode == hal::PowerMode::DOZE_SUSPEND) {
mScheduler->onScreenAcquired(mAppConnectionHandle);
- mScheduler->resyncToHardwareVsync(true, getVsyncPeriod());
+ mScheduler->resyncToHardwareVsync(true, vsyncPeriod);
}
} else if (mode == hal::PowerMode::DOZE_SUSPEND) {
// Leave display going to doze
@@ -4386,7 +4386,7 @@
}
void SurfaceFlinger::dumpStatsLocked(const DumpArgs& args, std::string& result) const {
- StringAppendF(&result, "%" PRId64 "\n", getVsyncPeriod());
+ StringAppendF(&result, "%" PRId64 "\n", getVsyncPeriodFromHWC());
if (args.size() > 1) {
const auto name = String8(args[1]);
@@ -4451,7 +4451,7 @@
mPhaseConfiguration->dump(result);
StringAppendF(&result,
" present offset: %9" PRId64 " ns\t VSYNC period: %9" PRId64 " ns\n\n",
- dispSyncPresentTimeOffset, getVsyncPeriod());
+ dispSyncPresentTimeOffset, getVsyncPeriodFromHWC());
scheduler::RefreshRateConfigs::Policy policy = mRefreshRateConfigs->getDisplayManagerPolicy();
StringAppendF(&result,
@@ -6237,6 +6237,11 @@
Mutex::Autolock lock(mStateLock);
if (authenticateSurfaceTextureLocked(surface)) {
sp<Layer> layer = (static_cast<MonitoredProducer*>(surface.get()))->getLayer();
+ if (layer == nullptr) {
+ ALOGE("Attempt to set frame rate on a layer that no longer exists");
+ return BAD_VALUE;
+ }
+
if (layer->setFrameRate(
Layer::FrameRate(frameRate,
Layer::FrameRate::convertCompatibility(compatibility)))) {
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index ccaeb2d..c727574 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -852,7 +852,7 @@
/* ------------------------------------------------------------------------
* VSync
*/
- nsecs_t getVsyncPeriod() const REQUIRES(mStateLock);
+ nsecs_t getVsyncPeriodFromHWC() const REQUIRES(mStateLock);
// Sets the refresh rate by switching active configs, if they are available for
// the desired refresh rate.
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index ce5f35c..06bdcdc 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -85,7 +85,7 @@
using HotplugEvent = TestableSurfaceFlinger::HotplugEvent;
using HWC2Display = TestableSurfaceFlinger::HWC2Display;
-constexpr int32_t DEFAULT_REFRESH_RATE = 16'666'666;
+constexpr int32_t DEFAULT_REFRESH_RATE = 16'666'667;
constexpr int32_t DEFAULT_DPI = 320;
constexpr int DEFAULT_VIRTUAL_DISPLAY_SURFACE_FORMAT = HAL_PIXEL_FORMAT_RGB_565;
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
index be49ef3..c2a7752 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchRealtimeTest.cpp
@@ -51,6 +51,7 @@
void setPeriod(nsecs_t) final {}
void resetModel() final {}
+ bool needsMoreSamples() const final { return false; }
void dump(std::string&) const final {}
private:
@@ -86,6 +87,7 @@
void setPeriod(nsecs_t) final {}
void resetModel() final {}
+ bool needsMoreSamples() const final { return false; }
void dump(std::string&) const final {}
private:
diff --git a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
index d940dc5..373f6a5 100644
--- a/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncDispatchTimerQueueTest.cpp
@@ -47,6 +47,7 @@
MOCK_CONST_METHOD0(currentPeriod, nsecs_t());
MOCK_METHOD1(setPeriod, void(nsecs_t));
MOCK_METHOD0(resetModel, void());
+ MOCK_CONST_METHOD0(needsMoreSamples, bool());
MOCK_CONST_METHOD1(dump, void(std::string&));
nsecs_t nextVSyncTime(nsecs_t timePoint) const {
@@ -115,11 +116,15 @@
operator VSyncDispatch::CallbackToken() const { return mToken; }
- void counter(nsecs_t time, nsecs_t) { mCalls.push_back(time); }
+ void counter(nsecs_t time, nsecs_t wakeup_time) {
+ mCalls.push_back(time);
+ mWakeupTime.push_back(wakeup_time);
+ }
VSyncDispatch& mDispatch;
VSyncDispatch::CallbackToken mToken;
std::vector<nsecs_t> mCalls;
+ std::vector<nsecs_t> mWakeupTime;
};
class PausingCallback {
@@ -747,6 +752,31 @@
EXPECT_THAT(cb2.mCalls.size(), Eq(1));
}
+TEST_F(VSyncDispatchTimerQueueTest, laggedTimerGroupsCallbacksWithinLag) {
+ CountingCallback cb1(mDispatch);
+ CountingCallback cb2(mDispatch);
+
+ Sequence seq;
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
+ .InSequence(seq)
+ .WillOnce(Return(1000));
+ EXPECT_CALL(mMockClock, alarmIn(_, 600)).InSequence(seq);
+ EXPECT_CALL(mStubTracker, nextAnticipatedVSyncTimeFrom(1000))
+ .InSequence(seq)
+ .WillOnce(Return(1000));
+
+ EXPECT_EQ(mDispatch.schedule(cb1, 400, 1000), ScheduleResult::Scheduled);
+ EXPECT_EQ(mDispatch.schedule(cb2, 390, 1000), ScheduleResult::Scheduled);
+
+ mMockClock.setLag(100);
+ mMockClock.advanceBy(700);
+
+ ASSERT_THAT(cb1.mWakeupTime.size(), Eq(1));
+ EXPECT_THAT(cb1.mWakeupTime[0], Eq(600));
+ ASSERT_THAT(cb2.mWakeupTime.size(), Eq(1));
+ EXPECT_THAT(cb2.mWakeupTime[0], Eq(610));
+}
+
class VSyncDispatchTimerQueueEntryTest : public testing::Test {
protected:
nsecs_t const mPeriod = 1000;
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
index fc39235..d4cd11d 100644
--- a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -73,27 +73,27 @@
TEST_F(VSyncPredictorTest, reportsSamplesNeededWhenHasNoDataPoints) {
for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
- EXPECT_TRUE(tracker.needsMoreSamples(mNow += mPeriod));
- tracker.addVsyncTimestamp(mNow);
+ EXPECT_TRUE(tracker.needsMoreSamples());
+ tracker.addVsyncTimestamp(mNow += mPeriod);
}
- EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+ EXPECT_FALSE(tracker.needsMoreSamples());
}
TEST_F(VSyncPredictorTest, reportsSamplesNeededAfterExplicitRateChange) {
for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
tracker.addVsyncTimestamp(mNow += mPeriod);
}
- EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+ EXPECT_FALSE(tracker.needsMoreSamples());
auto const changedPeriod = mPeriod * 2;
tracker.setPeriod(changedPeriod);
- EXPECT_TRUE(tracker.needsMoreSamples(mNow));
+ EXPECT_TRUE(tracker.needsMoreSamples());
for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
- EXPECT_TRUE(tracker.needsMoreSamples(mNow += changedPeriod));
- tracker.addVsyncTimestamp(mNow);
+ EXPECT_TRUE(tracker.needsMoreSamples());
+ tracker.addVsyncTimestamp(mNow += changedPeriod);
}
- EXPECT_FALSE(tracker.needsMoreSamples(mNow));
+ EXPECT_FALSE(tracker.needsMoreSamples());
}
TEST_F(VSyncPredictorTest, transitionsToModelledPointsAfterSynthetic) {
@@ -320,20 +320,6 @@
EXPECT_THAT(intercept, Eq(0));
}
-TEST_F(VSyncPredictorTest, willBecomeInaccurateAfterA_longTimeWithNoSamples) {
- auto const simulatedVsyncs = generateVsyncTimestamps(kMinimumSamplesForPrediction, mPeriod, 0);
-
- for (auto const& timestamp : simulatedVsyncs) {
- tracker.addVsyncTimestamp(timestamp);
- }
- auto const mNow = *simulatedVsyncs.rbegin();
- EXPECT_FALSE(tracker.needsMoreSamples(mNow));
-
- // TODO: would be better to decay this as a result of the variance of the samples
- static auto constexpr aLongTimeOut = 1000000000;
- EXPECT_TRUE(tracker.needsMoreSamples(mNow + aLongTimeOut));
-}
-
TEST_F(VSyncPredictorTest, idealModelPredictionsBeforeRegressionModelIsBuilt) {
auto const simulatedVsyncs =
generateVsyncTimestamps(kMinimumSamplesForPrediction + 1, mPeriod, 0);
@@ -443,7 +429,7 @@
// When VsyncPredictor returns the period it means that it doesn't know how to predict and
// it needs to get more samples
if (slope == mPeriod && intercept == 0) {
- EXPECT_TRUE(tracker.needsMoreSamples(now));
+ EXPECT_TRUE(tracker.needsMoreSamples());
}
}
}
diff --git a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
index a972562..6856612 100644
--- a/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncReactorTest.cpp
@@ -41,6 +41,7 @@
MOCK_CONST_METHOD0(currentPeriod, nsecs_t());
MOCK_METHOD1(setPeriod, void(nsecs_t));
MOCK_METHOD0(resetModel, void());
+ MOCK_CONST_METHOD0(needsMoreSamples, bool());
MOCK_CONST_METHOD1(dump, void(std::string&));
};
@@ -57,6 +58,7 @@
nsecs_t currentPeriod() const final { return mTracker->currentPeriod(); }
void setPeriod(nsecs_t period) final { mTracker->setPeriod(period); }
void resetModel() final { mTracker->resetModel(); }
+ bool needsMoreSamples() const final { return mTracker->needsMoreSamples(); }
void dump(std::string& result) const final { mTracker->dump(result); }
private:
@@ -455,6 +457,83 @@
EXPECT_FALSE(mReactor.addPresentFence(generateSignalledFenceWithTime(0)));
}
+TEST_F(VSyncReactorTest, hwVsyncIsRequestedForTracker) {
+ auto time = 0;
+ bool periodFlushed = false;
+ nsecs_t const newPeriod = 4000;
+ mReactor.setPeriod(newPeriod);
+
+ static auto constexpr numSamplesWithNewPeriod = 4;
+ Sequence seq;
+ EXPECT_CALL(*mMockTracker, needsMoreSamples())
+ .Times(numSamplesWithNewPeriod - 2)
+ .InSequence(seq)
+ .WillRepeatedly(Return(true));
+ EXPECT_CALL(*mMockTracker, needsMoreSamples())
+ .Times(1)
+ .InSequence(seq)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(numSamplesWithNewPeriod);
+
+ EXPECT_TRUE(mReactor.addResyncSample(time += period, std::nullopt, &periodFlushed));
+
+ EXPECT_TRUE(mReactor.addResyncSample(time += period, std::nullopt, &periodFlushed));
+ // confirmed period, but predictor wants numRequest samples. This one and prior are valid.
+ EXPECT_TRUE(mReactor.addResyncSample(time += newPeriod, std::nullopt, &periodFlushed));
+ EXPECT_TRUE(mReactor.addResyncSample(time += newPeriod, std::nullopt, &periodFlushed));
+ EXPECT_FALSE(mReactor.addResyncSample(time += newPeriod, std::nullopt, &periodFlushed));
+}
+
+TEST_F(VSyncReactorTest, hwVsyncturnsOffOnConfirmationWhenTrackerDoesntRequest) {
+ auto time = 0;
+ bool periodFlushed = false;
+ nsecs_t const newPeriod = 4000;
+ mReactor.setPeriod(newPeriod);
+
+ Sequence seq;
+ EXPECT_CALL(*mMockTracker, needsMoreSamples())
+ .Times(1)
+ .InSequence(seq)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(2);
+
+ EXPECT_TRUE(mReactor.addResyncSample(time += period, std::nullopt, &periodFlushed));
+ EXPECT_TRUE(mReactor.addResyncSample(time += period, std::nullopt, &periodFlushed));
+ EXPECT_FALSE(mReactor.addResyncSample(time += newPeriod, std::nullopt, &periodFlushed));
+}
+
+TEST_F(VSyncReactorTest, hwVsyncIsRequestedForTrackerMultiplePeriodChanges) {
+ auto time = 0;
+ bool periodFlushed = false;
+ nsecs_t const newPeriod1 = 4000;
+ nsecs_t const newPeriod2 = 7000;
+
+ mReactor.setPeriod(newPeriod1);
+
+ Sequence seq;
+ EXPECT_CALL(*mMockTracker, needsMoreSamples())
+ .Times(4)
+ .InSequence(seq)
+ .WillRepeatedly(Return(true));
+ EXPECT_CALL(*mMockTracker, needsMoreSamples())
+ .Times(1)
+ .InSequence(seq)
+ .WillRepeatedly(Return(false));
+ EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(7);
+
+ EXPECT_TRUE(mReactor.addResyncSample(time += period, std::nullopt, &periodFlushed));
+ EXPECT_TRUE(mReactor.addResyncSample(time += period, std::nullopt, &periodFlushed));
+ // confirmed period, but predictor wants numRequest samples. This one and prior are valid.
+ EXPECT_TRUE(mReactor.addResyncSample(time += newPeriod1, std::nullopt, &periodFlushed));
+ EXPECT_TRUE(mReactor.addResyncSample(time += newPeriod1, std::nullopt, &periodFlushed));
+
+ mReactor.setPeriod(newPeriod2);
+ EXPECT_TRUE(mReactor.addResyncSample(time += newPeriod1, std::nullopt, &periodFlushed));
+ EXPECT_TRUE(mReactor.addResyncSample(time += newPeriod2, std::nullopt, &periodFlushed));
+ EXPECT_TRUE(mReactor.addResyncSample(time += newPeriod2, std::nullopt, &periodFlushed));
+ EXPECT_FALSE(mReactor.addResyncSample(time += newPeriod2, std::nullopt, &periodFlushed));
+}
+
static nsecs_t computeWorkload(nsecs_t period, nsecs_t phase) {
return period - phase;
}
@@ -648,7 +727,7 @@
TEST_F(VSyncReactorTest, periodChangeWithGivenVsyncPeriod) {
bool periodFlushed = true;
- EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(3);
+ EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(2);
mReactor.setIgnorePresentFences(true);
nsecs_t const newPeriod = 5000;
@@ -672,7 +751,7 @@
kPendingLimit, true /* supportKernelIdleTimer */);
bool periodFlushed = true;
- EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(5);
+ EXPECT_CALL(*mMockTracker, addVsyncTimestamp(_)).Times(4);
idleReactor.setIgnorePresentFences(true);
// First, set the same period, which should only be confirmed when we receive two
diff --git a/vulkan/libvulkan/Android.bp b/vulkan/libvulkan/Android.bp
index f69de1f..aa8040b 100644
--- a/vulkan/libvulkan/Android.bp
+++ b/vulkan/libvulkan/Android.bp
@@ -89,7 +89,6 @@
"libhardware",
"libsync",
"libbase",
- "libdl_android",
"libhidlbase",
"liblog",
"libui",
@@ -100,6 +99,7 @@
"libnativebridge_lazy",
"libnativeloader_lazy",
"libnativewindow",
+ "libvndksupport",
"android.hardware.graphics.common@1.0",
"libSurfaceFlingerProp",
],
diff --git a/vulkan/libvulkan/driver.cpp b/vulkan/libvulkan/driver.cpp
index 7bcb2c1..f840561 100644
--- a/vulkan/libvulkan/driver.cpp
+++ b/vulkan/libvulkan/driver.cpp
@@ -28,20 +28,17 @@
#include <android/dlext.h>
#include <android/hardware/configstore/1.0/ISurfaceFlingerConfigs.h>
#include <configstore/Utils.h>
-#include <cutils/properties.h>
#include <graphicsenv/GraphicsEnv.h>
#include <log/log.h>
-#include <nativeloader/dlext_namespaces.h>
#include <sys/prctl.h>
#include <utils/Timers.h>
#include <utils/Trace.h>
+#include <vndksupport/linker.h>
#include <algorithm>
#include <array>
#include <climits>
#include <new>
-#include <string_view>
-#include <sstream>
#include <vector>
#include "stubhal.h"
@@ -151,19 +148,11 @@
Hal Hal::hal_;
-void* LoadLibrary(const android_dlextinfo& dlextinfo,
- const std::string_view subname) {
- ATRACE_CALL();
-
- std::stringstream ss;
- ss << "vulkan." << subname << ".so";
- return android_dlopen_ext(ss.str().c_str(), RTLD_LOCAL | RTLD_NOW, &dlextinfo);
-}
-
const std::array<const char*, 2> HAL_SUBNAME_KEY_PROPERTIES = {{
"ro.hardware." HWVULKAN_HARDWARE_MODULE_ID,
- "ro.board.platform",
+ "ro.board.platform"
}};
+constexpr int LIB_DL_FLAGS = RTLD_LOCAL | RTLD_NOW;
// LoadDriver returns:
// * 0 when succeed, or
@@ -174,23 +163,30 @@
const hwvulkan_module_t** module) {
ATRACE_CALL();
- const android_dlextinfo dlextinfo = {
- .flags = ANDROID_DLEXT_USE_NAMESPACE,
- .library_namespace = library_namespace,
- };
void* so = nullptr;
- char prop[PROPERTY_VALUE_MAX];
for (auto key : HAL_SUBNAME_KEY_PROPERTIES) {
- int prop_len = property_get(key, prop, nullptr);
- if (prop_len > 0 && prop_len <= UINT_MAX) {
- std::string_view lib_name(prop, static_cast<unsigned int>(prop_len));
- so = LoadLibrary(dlextinfo, lib_name);
- if (so)
- break;
+ std::string lib_name = android::base::GetProperty(key, "");
+ if (lib_name.empty())
+ continue;
+
+ lib_name = "vulkan." + lib_name + ".so";
+ if (library_namespace) {
+ // load updated driver
+ const android_dlextinfo dlextinfo = {
+ .flags = ANDROID_DLEXT_USE_NAMESPACE,
+ .library_namespace = library_namespace,
+ };
+ so = android_dlopen_ext(lib_name.c_str(), LIB_DL_FLAGS, &dlextinfo);
+ } else {
+ // load built-in driver
+ so = android_load_sphal_library(lib_name.c_str(), LIB_DL_FLAGS);
}
+ if (so)
+ break;
}
- if (!so)
+ if (!so) {
return -ENOENT;
+ }
auto hmi = static_cast<hw_module_t*>(dlsym(so, HAL_MODULE_INFO_SYM_AS_STR));
if (!hmi) {
@@ -211,12 +207,9 @@
int LoadBuiltinDriver(const hwvulkan_module_t** module) {
ATRACE_CALL();
- auto ns = android_get_exported_namespace("sphal");
- if (!ns)
- return -ENOENT;
android::GraphicsEnv::getInstance().setDriverToLoad(
android::GpuStatsInfo::Driver::VULKAN);
- return LoadDriver(ns, module);
+ return LoadDriver(nullptr, module);
}
int LoadUpdatedDriver(const hwvulkan_module_t** module) {
@@ -238,7 +231,6 @@
bool Hal::Open() {
ATRACE_CALL();
-
const nsecs_t openTime = systemTime();
ALOG_ASSERT(!hal_.dev_, "OpenHAL called more than once");
@@ -256,16 +248,16 @@
if (result != 0) {
android::GraphicsEnv::getInstance().setDriverLoaded(
android::GpuStatsInfo::Api::API_VK, false, systemTime() - openTime);
- ALOGV("unable to load Vulkan HAL, using stub HAL (result=%d)", result);
return true;
}
-
hwvulkan_device_t* device;
ATRACE_BEGIN("hwvulkan module open");
result =
module->common.methods->open(&module->common, HWVULKAN_DEVICE_0,
reinterpret_cast<hw_device_t**>(&device));
+
+
ATRACE_END();
if (result != 0) {
android::GraphicsEnv::getInstance().setDriverLoaded(