Merge "Send isBuffer to perfetto trace" into sc-dev
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 204953c..cc0434d 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -292,8 +292,8 @@
}
}
-static unique_fd create_profile(uid_t uid, const std::string& profile, int32_t flags) {
- unique_fd fd(TEMP_FAILURE_RETRY(open(profile.c_str(), flags, 0600)));
+static unique_fd create_profile(uid_t uid, const std::string& profile, int32_t flags, mode_t mode) {
+ unique_fd fd(TEMP_FAILURE_RETRY(open(profile.c_str(), flags, mode)));
if (fd.get() < 0) {
if (errno != EEXIST) {
PLOG(ERROR) << "Failed to create profile " << profile;
@@ -310,7 +310,7 @@
return fd;
}
-static unique_fd open_profile(uid_t uid, const std::string& profile, int32_t flags) {
+static unique_fd open_profile(uid_t uid, const std::string& profile, int32_t flags, mode_t mode) {
// Do not follow symlinks when opening a profile:
// - primary profiles should not contain symlinks in their paths
// - secondary dex paths should have been already resolved and validated
@@ -320,7 +320,7 @@
// Reference profiles and snapshots are created on the fly; so they might not exist beforehand.
unique_fd fd;
if ((flags & O_CREAT) != 0) {
- fd = create_profile(uid, profile, flags);
+ fd = create_profile(uid, profile, flags, mode);
} else {
fd.reset(TEMP_FAILURE_RETRY(open(profile.c_str(), flags)));
}
@@ -336,6 +336,16 @@
PLOG(ERROR) << "Failed to open profile " << profile;
}
return invalid_unique_fd();
+ } else {
+ // If we just create the file we need to set its mode because on Android
+ // open has a mask that only allows owner access.
+ if ((flags & O_CREAT) != 0) {
+ if (fchmod(fd.get(), mode) != 0) {
+ PLOG(ERROR) << "Could not set mode " << std::hex << mode << std::dec
+ << " on profile" << profile;
+ // Not a terminal failure.
+ }
+ }
}
return fd;
@@ -345,20 +355,29 @@
const std::string& location, bool is_secondary_dex) {
std::string profile = create_current_profile_path(user, package_name, location,
is_secondary_dex);
- return open_profile(uid, profile, O_RDONLY);
+ return open_profile(uid, profile, O_RDONLY, /*mode=*/ 0);
}
static unique_fd open_reference_profile(uid_t uid, const std::string& package_name,
const std::string& location, bool read_write, bool is_secondary_dex) {
std::string profile = create_reference_profile_path(package_name, location, is_secondary_dex);
- return open_profile(uid, profile, read_write ? (O_CREAT | O_RDWR) : O_RDONLY);
+ return open_profile(
+ uid,
+ profile,
+ read_write ? (O_CREAT | O_RDWR) : O_RDONLY,
+ S_IRUSR | S_IWUSR | S_IRGRP); // so that ART can also read it when apps run.
}
static UniqueFile open_reference_profile_as_unique_file(uid_t uid, const std::string& package_name,
const std::string& location, bool read_write, bool is_secondary_dex) {
std::string profile_path = create_reference_profile_path(package_name, location,
is_secondary_dex);
- unique_fd ufd = open_profile(uid, profile_path, read_write ? (O_CREAT | O_RDWR) : O_RDONLY);
+ unique_fd ufd = open_profile(
+ uid,
+ profile_path,
+ read_write ? (O_CREAT | O_RDWR) : O_RDONLY,
+ S_IRUSR | S_IWUSR | S_IRGRP); // so that ART can also read it when apps run.
+
return UniqueFile(ufd.release(), profile_path, [](const std::string& path) {
clear_profile(path);
});
@@ -367,7 +386,7 @@
static unique_fd open_spnashot_profile(uid_t uid, const std::string& package_name,
const std::string& location) {
std::string profile = create_snapshot_profile_path(package_name, location);
- return open_profile(uid, profile, O_CREAT | O_RDWR | O_TRUNC);
+ return open_profile(uid, profile, O_CREAT | O_RDWR | O_TRUNC, S_IRUSR | S_IWUSR);
}
static void open_profile_files(uid_t uid, const std::string& package_name,
@@ -2484,7 +2503,7 @@
for (size_t i = 0; i < profiles.size(); ) {
std::vector<unique_fd> profiles_fd;
for (size_t k = 0; k < kAggregationBatchSize && i < profiles.size(); k++, i++) {
- unique_fd fd = open_profile(AID_SYSTEM, profiles[i], O_RDONLY);
+ unique_fd fd = open_profile(AID_SYSTEM, profiles[i], O_RDONLY, /*mode=*/ 0);
if (fd.get() >= 0) {
profiles_fd.push_back(std::move(fd));
}
diff --git a/cmds/installd/tests/installd_dexopt_test.cpp b/cmds/installd/tests/installd_dexopt_test.cpp
index e272025..216347e 100644
--- a/cmds/installd/tests/installd_dexopt_test.cpp
+++ b/cmds/installd/tests/installd_dexopt_test.cpp
@@ -919,7 +919,7 @@
return;
}
- // Check that the snapshot was created witht he expected acess flags.
+ // Check that the snapshot was created with the expected access flags.
CheckFileAccess(snap_profile_, kSystemUid, kSystemGid, 0600 | S_IFREG);
// The snapshot should be equivalent to the merge of profiles.
@@ -962,8 +962,8 @@
return;
}
- // Check that the snapshot was created witht he expected acess flags.
- CheckFileAccess(ref_profile_, kTestAppUid, kTestAppUid, 0600 | S_IFREG);
+ // Check that the snapshot was created with the expected access flags.
+ CheckFileAccess(ref_profile_, kTestAppUid, kTestAppUid, 0640 | S_IFREG);
// The snapshot should be equivalent to the merge of profiles.
std::string ref_profile_content = ref_profile_ + ".expected";
diff --git a/libs/binder/rust/src/native.rs b/libs/binder/rust/src/native.rs
index 3b3fd08..3920129 100644
--- a/libs/binder/rust/src/native.rs
+++ b/libs/binder/rust/src/native.rs
@@ -24,7 +24,6 @@
use std::ffi::{c_void, CString};
use std::mem::ManuallyDrop;
use std::ops::Deref;
-use std::ptr;
/// Rust wrapper around Binder remotable objects.
///
@@ -273,7 +272,7 @@
/// Must be called with a valid pointer to a `T` object. After this call,
/// the pointer will be invalid and should not be dereferenced.
unsafe extern "C" fn on_destroy(object: *mut c_void) {
- ptr::drop_in_place(object as *mut T)
+ Box::from_raw(object as *mut T);
}
/// Called whenever a new, local `AIBinder` object is needed of a specific
diff --git a/libs/renderengine/gl/GLESRenderEngine.cpp b/libs/renderengine/gl/GLESRenderEngine.cpp
index 3c58238..18eb7b5 100644
--- a/libs/renderengine/gl/GLESRenderEngine.cpp
+++ b/libs/renderengine/gl/GLESRenderEngine.cpp
@@ -515,9 +515,10 @@
return mDrawingBuffer.get();
}
-void GLESRenderEngine::primeCache() {
+std::future<void> GLESRenderEngine::primeCache() {
ProgramCache::getInstance().primeCache(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
mUseColorManagement, mPrecacheToneMapperShaderOnly);
+ return {};
}
base::unique_fd GLESRenderEngine::flush() {
diff --git a/libs/renderengine/gl/GLESRenderEngine.h b/libs/renderengine/gl/GLESRenderEngine.h
index e7ed9c0..1ff3674 100644
--- a/libs/renderengine/gl/GLESRenderEngine.h
+++ b/libs/renderengine/gl/GLESRenderEngine.h
@@ -57,7 +57,7 @@
EGLSurface protectedStub);
~GLESRenderEngine() override EXCLUDES(mRenderingMutex);
- void primeCache() override;
+ std::future<void> primeCache() override;
void genTextures(size_t count, uint32_t* names) override;
void deleteTextures(size_t count, uint32_t const* names) override;
bool isProtected() const override { return mInProtectedContext; }
diff --git a/libs/renderengine/include/renderengine/RenderEngine.h b/libs/renderengine/include/renderengine/RenderEngine.h
index d1bbcc5..1da9adc 100644
--- a/libs/renderengine/include/renderengine/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/RenderEngine.h
@@ -29,6 +29,7 @@
#include <ui/GraphicTypes.h>
#include <ui/Transform.h>
+#include <future>
#include <memory>
/**
@@ -48,6 +49,11 @@
*/
#define PROPERTY_DEBUG_RENDERENGINE_CAPTURE_FILENAME "debug.renderengine.capture_filename"
+/**
+ * Allows recording of Skia drawing commands with systrace.
+ */
+#define PROPERTY_SKIA_ATRACE_ENABLED "debug.renderengine.skia_atrace_enabled"
+
struct ANativeWindowBuffer;
namespace android {
@@ -94,17 +100,13 @@
static std::unique_ptr<RenderEngine> create(const RenderEngineCreationArgs& args);
- RenderEngine() : RenderEngine(RenderEngineType::GLES) {}
-
- RenderEngine(RenderEngineType type) : mRenderEngineType(type) {}
-
virtual ~RenderEngine() = 0;
// ----- BEGIN DEPRECATED INTERFACE -----
// This interface, while still in use until a suitable replacement is built,
// should be considered deprecated, minus some methods which still may be
// used to support legacy behavior.
- virtual void primeCache() = 0;
+ virtual std::future<void> primeCache() = 0;
// dump the extension strings. always call the base class.
virtual void dump(std::string& result) = 0;
@@ -204,6 +206,10 @@
static void validateOutputBufferUsage(const sp<GraphicBuffer>&);
protected:
+ RenderEngine() : RenderEngine(RenderEngineType::GLES) {}
+
+ RenderEngine(RenderEngineType type) : mRenderEngineType(type) {}
+
// Maps GPU resources for this buffer.
// Note that work may be deferred to an additional thread, i.e. this call
// is made asynchronously, but the caller can expect that map/unmap calls
diff --git a/libs/renderengine/include/renderengine/mock/RenderEngine.h b/libs/renderengine/include/renderengine/mock/RenderEngine.h
index 27dbd1e..baa1305 100644
--- a/libs/renderengine/include/renderengine/mock/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/mock/RenderEngine.h
@@ -35,7 +35,7 @@
RenderEngine();
~RenderEngine() override;
- MOCK_METHOD0(primeCache, void());
+ MOCK_METHOD0(primeCache, std::future<void>());
MOCK_METHOD1(dump, void(std::string&));
MOCK_METHOD2(genTextures, void(size_t, uint32_t*));
MOCK_METHOD2(deleteTextures, void(size_t, uint32_t const*));
diff --git a/libs/renderengine/skia/Cache.cpp b/libs/renderengine/skia/Cache.cpp
index 8a41395..b3975b0 100644
--- a/libs/renderengine/skia/Cache.cpp
+++ b/libs/renderengine/skia/Cache.cpp
@@ -292,7 +292,11 @@
drawSolidLayers(renderengine, display, dstTexture);
drawShadowLayers(renderengine, display, srcTexture);
- drawBlurLayers(renderengine, display, dstTexture);
+
+ if (renderengine->supportsBackgroundBlur()) {
+ drawBlurLayers(renderengine, display, dstTexture);
+ }
+
// The majority of shaders are related to sampling images.
drawImageLayers(renderengine, display, dstTexture, srcTexture);
diff --git a/libs/renderengine/skia/SkiaGLRenderEngine.cpp b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
index 2d80c46..f0986a3 100644
--- a/libs/renderengine/skia/SkiaGLRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
@@ -36,6 +36,7 @@
#include <SkSurface.h>
#include <android-base/stringprintf.h>
#include <gl/GrGLInterface.h>
+#include <gui/TraceUtils.h>
#include <sync/sync.h>
#include <ui/BlurRegion.h>
#include <ui/DebugUtils.h>
@@ -234,8 +235,9 @@
return engine;
}
-void SkiaGLRenderEngine::primeCache() {
+std::future<void> SkiaGLRenderEngine::primeCache() {
Cache::primeShaderCache(this);
+ return {};
}
EGLConfig SkiaGLRenderEngine::chooseEglConfig(EGLDisplay display, int format, bool logConfig) {
@@ -315,6 +317,7 @@
GrContextOptions options;
options.fDisableDriverCorrectnessWorkarounds = true;
options.fDisableDistanceFieldPaths = true;
+ options.fReducedShaderVariations = true;
options.fPersistentCache = &mSkSLCacheMonitor;
mGrContext = GrDirectContext::MakeGL(glInterface, options);
if (useProtectedContext(true)) {
@@ -506,17 +509,18 @@
return;
}
// We currently don't attempt to map a buffer if the buffer contains protected content
- // or we are using a protected context because GPU resources for protected buffers is
- // much more limited.
+ // because GPU resources for protected buffers is much more limited.
const bool isProtectedBuffer = buffer->getUsage() & GRALLOC_USAGE_PROTECTED;
- if (isProtectedBuffer || mInProtectedContext) {
+ if (isProtectedBuffer) {
return;
}
ATRACE_CALL();
- // If we were to support caching protected buffers then we will need to switch the currently
- // bound context if we are not already using the protected context (and subsequently switch
- // back after the buffer is cached).
+ // If we were to support caching protected buffers then we will need to switch the
+ // currently bound context if we are not already using the protected context (and subsequently
+ // switch back after the buffer is cached). However, for non-protected content we can bind
+ // the texture in either GL context because they are initialized with the same share_context
+ // which allows the texture state to be shared between them.
auto grContext = getActiveGrContext();
auto& cache = mTextureCache;
@@ -550,7 +554,6 @@
if (iter->second == 0) {
mTextureCache.erase(buffer->getId());
- mProtectedTextureCache.erase(buffer->getId());
mGraphicBufferExternalRefs.erase(buffer->getId());
}
}
@@ -702,7 +705,7 @@
validateOutputBufferUsage(buffer->getBuffer());
auto grContext = getActiveGrContext();
- auto& cache = mInProtectedContext ? mProtectedTextureCache : mTextureCache;
+ auto& cache = mTextureCache;
std::shared_ptr<AutoBackendTexture::LocalRef> surfaceTextureRef;
if (const auto& it = cache.find(buffer->getBuffer()->getId()); it != cache.end()) {
@@ -792,7 +795,7 @@
}
for (const auto& layer : layers) {
- ATRACE_NAME("DrawLayer");
+ ATRACE_FORMAT("DrawLayer: %s", layer->name.c_str());
if (kPrintLayerSettings) {
std::stringstream ls;
@@ -1445,12 +1448,6 @@
StringAppendF(&result, "Skia's Protected Wrapped Objects:\n");
gpuProtectedReporter.logOutput(result, true);
- StringAppendF(&result, "RenderEngine protected AHB/BackendTexture cache size: %zu\n",
- mProtectedTextureCache.size());
- StringAppendF(&result, "Dumping buffer ids...\n");
- for (const auto& [id, unused] : mProtectedTextureCache) {
- StringAppendF(&result, "- 0x%" PRIx64 "\n", id);
- }
StringAppendF(&result, "\n");
StringAppendF(&result, "RenderEngine runtime effects: %zu\n", mRuntimeEffects.size());
for (const auto& [linearEffect, unused] : mRuntimeEffects) {
diff --git a/libs/renderengine/skia/SkiaGLRenderEngine.h b/libs/renderengine/skia/SkiaGLRenderEngine.h
index 97d3b72..b972c73 100644
--- a/libs/renderengine/skia/SkiaGLRenderEngine.h
+++ b/libs/renderengine/skia/SkiaGLRenderEngine.h
@@ -53,7 +53,7 @@
EGLSurface protectedPlaceholder);
~SkiaGLRenderEngine() override EXCLUDES(mRenderingMutex);
- void primeCache() override;
+ std::future<void> primeCache() override;
status_t drawLayers(const DisplaySettings& display,
const std::vector<const LayerSettings*>& layers,
const std::shared_ptr<ExternalTexture>& buffer,
@@ -126,11 +126,9 @@
// Number of external holders of ExternalTexture references, per GraphicBuffer ID.
std::unordered_map<GraphicBufferId, int32_t> mGraphicBufferExternalRefs
GUARDED_BY(mRenderingMutex);
- // Cache of GL textures that we'll store per GraphicBuffer ID, sliced by GPU context.
+ // Cache of GL textures that we'll store per GraphicBuffer ID, shared between GPU contexts.
std::unordered_map<GraphicBufferId, std::shared_ptr<AutoBackendTexture::LocalRef>> mTextureCache
GUARDED_BY(mRenderingMutex);
- std::unordered_map<GraphicBufferId, std::shared_ptr<AutoBackendTexture::LocalRef>>
- mProtectedTextureCache GUARDED_BY(mRenderingMutex);
std::unordered_map<LinearEffect, sk_sp<SkRuntimeEffect>, LinearEffectHasher> mRuntimeEffects;
StretchShaderFactory mStretchShaderFactory;
diff --git a/libs/renderengine/skia/SkiaRenderEngine.cpp b/libs/renderengine/skia/SkiaRenderEngine.cpp
index 81f0b6f..29175a2 100644
--- a/libs/renderengine/skia/SkiaRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaRenderEngine.cpp
@@ -14,13 +14,22 @@
* limitations under the License.
*/
-//#define LOG_NDEBUG 0
#undef LOG_TAG
#define LOG_TAG "RenderEngine"
#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+#include "SkiaRenderEngine.h"
+
+#include <android-base/properties.h>
+#include <src/core/SkTraceEventCommon.h>
+
namespace android {
namespace renderengine {
-namespace skia {} // namespace skia
+namespace skia {
+SkiaRenderEngine::SkiaRenderEngine(RenderEngineType type) : RenderEngine(type) {
+ SkAndroidFrameworkTraceUtil::setEnableTracing(
+ base::GetBoolProperty(PROPERTY_SKIA_ATRACE_ENABLED, false));
+}
+} // namespace skia
} // namespace renderengine
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/libs/renderengine/skia/SkiaRenderEngine.h b/libs/renderengine/skia/SkiaRenderEngine.h
index 308c5ff..f098a86 100644
--- a/libs/renderengine/skia/SkiaRenderEngine.h
+++ b/libs/renderengine/skia/SkiaRenderEngine.h
@@ -36,10 +36,10 @@
class SkiaRenderEngine : public RenderEngine {
public:
static std::unique_ptr<SkiaRenderEngine> create(const RenderEngineCreationArgs& args);
- SkiaRenderEngine(RenderEngineType type) : RenderEngine(type) {}
+ SkiaRenderEngine(RenderEngineType type);
~SkiaRenderEngine() override {}
- virtual void primeCache() override{};
+ virtual std::future<void> primeCache() override { return {}; };
virtual void genTextures(size_t /*count*/, uint32_t* /*names*/) override{};
virtual void deleteTextures(size_t /*count*/, uint32_t const* /*names*/) override{};
virtual bool isProtected() const override { return false; } // mInProtectedContext; }
@@ -60,8 +60,8 @@
protected:
virtual void mapExternalTextureBuffer(const sp<GraphicBuffer>& /*buffer*/,
- bool /*isRenderable*/) override;
- virtual void unmapExternalTextureBuffer(const sp<GraphicBuffer>& /*buffer*/) override;
+ bool /*isRenderable*/) override = 0;
+ virtual void unmapExternalTextureBuffer(const sp<GraphicBuffer>& /*buffer*/) override = 0;
};
} // namespace skia
diff --git a/libs/renderengine/threaded/RenderEngineThreaded.cpp b/libs/renderengine/threaded/RenderEngineThreaded.cpp
index 9009ce4..74c5f47 100644
--- a/libs/renderengine/threaded/RenderEngineThreaded.cpp
+++ b/libs/renderengine/threaded/RenderEngineThreaded.cpp
@@ -48,44 +48,70 @@
}
RenderEngineThreaded::~RenderEngineThreaded() {
- {
- std::lock_guard lock(mThreadMutex);
- mRunning = false;
- mCondition.notify_one();
- }
+ mRunning = false;
+ mCondition.notify_one();
if (mThread.joinable()) {
mThread.join();
}
}
+status_t RenderEngineThreaded::setSchedFifo(bool enabled) {
+ static constexpr int kFifoPriority = 2;
+ static constexpr int kOtherPriority = 0;
+
+ struct sched_param param = {0};
+ int sched_policy;
+ if (enabled) {
+ sched_policy = SCHED_FIFO;
+ param.sched_priority = kFifoPriority;
+ } else {
+ sched_policy = SCHED_OTHER;
+ param.sched_priority = kOtherPriority;
+ }
+
+ if (sched_setscheduler(0, sched_policy, ¶m) != 0) {
+ return -errno;
+ }
+ return NO_ERROR;
+}
+
// NO_THREAD_SAFETY_ANALYSIS is because std::unique_lock presently lacks thread safety annotations.
void RenderEngineThreaded::threadMain(CreateInstanceFactory factory) NO_THREAD_SAFETY_ANALYSIS {
ATRACE_CALL();
- struct sched_param param = {0};
- param.sched_priority = 2;
- if (sched_setscheduler(0, SCHED_FIFO, ¶m) != 0) {
- ALOGE("Couldn't set SCHED_FIFO");
+ if (setSchedFifo(true) != NO_ERROR) {
+ ALOGW("Couldn't set SCHED_FIFO");
}
mRenderEngine = factory();
- std::unique_lock<std::mutex> lock(mThreadMutex);
pthread_setname_np(pthread_self(), mThreadName);
{
- std::unique_lock<std::mutex> lock(mInitializedMutex);
+ std::scoped_lock lock(mInitializedMutex);
mIsInitialized = true;
}
mInitializedCondition.notify_all();
while (mRunning) {
- if (!mFunctionCalls.empty()) {
- auto task = mFunctionCalls.front();
- mFunctionCalls.pop();
- task(*mRenderEngine);
+ const auto getNextTask = [this]() -> std::optional<Work> {
+ std::scoped_lock lock(mThreadMutex);
+ if (!mFunctionCalls.empty()) {
+ Work task = mFunctionCalls.front();
+ mFunctionCalls.pop();
+ return std::make_optional<Work>(task);
+ }
+ return std::nullopt;
+ };
+
+ const auto task = getNextTask();
+
+ if (task) {
+ (*task)(*mRenderEngine);
}
+
+ std::unique_lock<std::mutex> lock(mThreadMutex);
mCondition.wait(lock, [this]() REQUIRES(mThreadMutex) {
return !mRunning || !mFunctionCalls.empty();
});
@@ -100,18 +126,31 @@
mInitializedCondition.wait(lock, [=] { return mIsInitialized; });
}
-void RenderEngineThreaded::primeCache() {
+std::future<void> RenderEngineThreaded::primeCache() {
+ const auto resultPromise = std::make_shared<std::promise<void>>();
+ std::future<void> resultFuture = resultPromise->get_future();
ATRACE_CALL();
// This function is designed so it can run asynchronously, so we do not need to wait
// for the futures.
{
std::lock_guard lock(mThreadMutex);
- mFunctionCalls.push([](renderengine::RenderEngine& instance) {
+ mFunctionCalls.push([resultPromise](renderengine::RenderEngine& instance) {
ATRACE_NAME("REThreaded::primeCache");
+ if (setSchedFifo(false) != NO_ERROR) {
+ ALOGW("Couldn't set SCHED_OTHER for primeCache");
+ }
+
instance.primeCache();
+ resultPromise->set_value();
+
+ if (setSchedFifo(true) != NO_ERROR) {
+ ALOGW("Couldn't set SCHED_FIFO for primeCache");
+ }
});
}
mCondition.notify_one();
+
+ return resultFuture;
}
void RenderEngineThreaded::dump(std::string& result) {
diff --git a/libs/renderengine/threaded/RenderEngineThreaded.h b/libs/renderengine/threaded/RenderEngineThreaded.h
index eb6098e..c81f137 100644
--- a/libs/renderengine/threaded/RenderEngineThreaded.h
+++ b/libs/renderengine/threaded/RenderEngineThreaded.h
@@ -42,7 +42,7 @@
RenderEngineThreaded(CreateInstanceFactory factory, RenderEngineType type);
~RenderEngineThreaded() override;
- void primeCache() override;
+ std::future<void> primeCache() override;
void dump(std::string& result) override;
@@ -74,6 +74,7 @@
private:
void threadMain(CreateInstanceFactory factory);
void waitUntilInitialized() const;
+ static status_t setSchedFifo(bool enabled);
/* ------------------------------------------------------------------------
* Threading
@@ -82,9 +83,10 @@
// Protects the creation and destruction of mThread.
mutable std::mutex mThreadMutex;
std::thread mThread GUARDED_BY(mThreadMutex);
- bool mRunning GUARDED_BY(mThreadMutex) = true;
- mutable std::queue<std::function<void(renderengine::RenderEngine& instance)>> mFunctionCalls
- GUARDED_BY(mThreadMutex);
+ std::atomic<bool> mRunning = true;
+
+ using Work = std::function<void(renderengine::RenderEngine&)>;
+ mutable std::queue<Work> mFunctionCalls GUARDED_BY(mThreadMutex);
mutable std::condition_variable mCondition;
// Used to allow select thread safe methods to be accessed without requiring the
diff --git a/services/sensorservice/SensorService.cpp b/services/sensorservice/SensorService.cpp
index 9df020d..a1c800e 100644
--- a/services/sensorservice/SensorService.cpp
+++ b/services/sensorservice/SensorService.cpp
@@ -2172,6 +2172,13 @@
return true;
}
+/**
+ * Checks if a sensor should be capped according to HIGH_SAMPLING_RATE_SENSORS
+ * permission.
+ *
+ * This needs to be kept in sync with the list defined on the Java side
+ * in frameworks/base/core/java/android/hardware/SystemSensorManager.java
+ */
bool SensorService::isSensorInCappedSet(int sensorType) {
return (sensorType == SENSOR_TYPE_ACCELEROMETER
|| sensorType == SENSOR_TYPE_ACCELEROMETER_UNCALIBRATED
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/CompositionRefreshArgs.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/CompositionRefreshArgs.h
index 289cb11..29937fb 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/CompositionRefreshArgs.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/CompositionRefreshArgs.h
@@ -82,6 +82,9 @@
// The earliest time to send the present command to the HAL
std::chrono::steady_clock::time_point earliestPresentTime;
+
+ // The predicted next invalidation time
+ std::optional<std::chrono::steady_clock::time_point> nextInvalidateTime;
};
} // namespace android::compositionengine
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/Output.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/Output.h
index 257974f..1416b1e 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/Output.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/Output.h
@@ -286,7 +286,7 @@
virtual std::optional<base::unique_fd> composeSurfaces(
const Region&, const compositionengine::CompositionRefreshArgs& refreshArgs) = 0;
virtual void postFramebuffer() = 0;
- virtual void renderCachedSets() = 0;
+ virtual void renderCachedSets(const CompositionRefreshArgs&) = 0;
virtual void chooseCompositionStrategy() = 0;
virtual bool getSkipColorTransform() const = 0;
virtual FrameFences presentAndGetFrameFences() = 0;
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/Output.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/Output.h
index f10ff25..f832084 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/Output.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/Output.h
@@ -93,7 +93,7 @@
std::optional<base::unique_fd> composeSurfaces(
const Region&, const compositionengine::CompositionRefreshArgs& refreshArgs) override;
void postFramebuffer() override;
- void renderCachedSets() override;
+ void renderCachedSets(const CompositionRefreshArgs&) override;
void cacheClientCompositionRequests(uint32_t) override;
// Testing
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayer.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayer.h
index 2ffd472..244f8ab 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayer.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/OutputLayer.h
@@ -73,10 +73,11 @@
void writeOutputIndependentGeometryStateToHWC(HWC2::Layer*, const LayerFECompositionState&,
bool skipLayer);
void writeOutputDependentPerFrameStateToHWC(HWC2::Layer*);
- void writeOutputIndependentPerFrameStateToHWC(HWC2::Layer*, const LayerFECompositionState&);
+ void writeOutputIndependentPerFrameStateToHWC(HWC2::Layer*, const LayerFECompositionState&,
+ bool skipLayer);
void writeSolidColorStateToHWC(HWC2::Layer*, const LayerFECompositionState&);
void writeSidebandStateToHWC(HWC2::Layer*, const LayerFECompositionState&);
- void writeBufferStateToHWC(HWC2::Layer*, const LayerFECompositionState&);
+ void writeBufferStateToHWC(HWC2::Layer*, const LayerFECompositionState&, bool skipLayer);
void writeCompositionTypeToHWC(HWC2::Layer*, Hwc2::IComposerClient::Composition,
bool isPeekingThrough, bool skipLayer);
void detectDisallowedCompositionTypeChange(Hwc2::IComposerClient::Composition from,
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/CachedSet.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/CachedSet.h
index a4356c5..7cb0f6b 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/CachedSet.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/CachedSet.h
@@ -98,6 +98,7 @@
mDrawFence = nullptr;
mBlurLayer = nullptr;
mHolePunchLayer = nullptr;
+ mSkipCount = 0;
mLayers.insert(mLayers.end(), other.mLayers.cbegin(), other.mLayers.cend());
Region boundingRegion;
@@ -107,6 +108,8 @@
mVisibleRegion.orSelf(other.mVisibleRegion);
}
void incrementAge() { ++mAge; }
+ void incrementSkipCount() { mSkipCount++; }
+ size_t getSkipCount() { return mSkipCount; }
// Renders the cached set with the supplied output composition state.
void render(renderengine::RenderEngine& re, TexturePool& texturePool,
@@ -155,6 +158,7 @@
Rect mBounds = Rect::EMPTY_RECT;
Region mVisibleRegion;
size_t mAge = 0;
+ size_t mSkipCount = 0;
// TODO(b/190411067): This is a shared pointer only because CachedSets are copied into different
// containers in the Flattener. Logically this should have unique ownership otherwise.
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h
index 94a169e..7534548 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Flattener.h
@@ -20,6 +20,7 @@
#include <compositionengine/impl/planner/CachedSet.h>
#include <compositionengine/impl/planner/LayerState.h>
+#include <chrono>
#include <numeric>
#include <vector>
@@ -37,7 +38,35 @@
class Flattener {
public:
- Flattener(renderengine::RenderEngine& renderEngine, bool enableHolePunch = false);
+ struct CachedSetRenderSchedulingTunables {
+ // This default assumes that rendering a cached set takes about 3ms. That time is then cut
+ // in half - the next frame using the cached set would have the same workload, meaning that
+ // composition cost is the same. This is best illustrated with the following example:
+ //
+ // Suppose we're at a 120hz cadence so SurfaceFlinger is budgeted 8.3ms per-frame. If
+ // renderCachedSets costs 3ms, then two consecutive frames have timings:
+ //
+ // First frame: Start at 0ms, end at 6.8ms.
+ // renderCachedSets: Start at 6.8ms, end at 9.8ms.
+ // Second frame: Start at 9.8ms, end at 16.6ms.
+ //
+ // Now the second frame won't render a cached set afterwards, but the first frame didn't
+ // really steal time from the second frame.
+ static const constexpr std::chrono::nanoseconds kDefaultCachedSetRenderDuration = 1500us;
+
+ static const constexpr size_t kDefaultMaxDeferRenderAttempts = 240;
+
+ // Duration allocated for rendering a cached set. If we don't have enough time for rendering
+ // a cached set, then rendering is deferred to another frame.
+ const std::chrono::nanoseconds cachedSetRenderDuration;
+ // Maximum of times that we defer rendering a cached set. If we defer rendering a cached set
+ // too many times, then render it anyways so that future frames would benefit from the
+ // flattened cached set.
+ const size_t maxDeferRenderAttempts;
+ };
+ Flattener(renderengine::RenderEngine& renderEngine, bool enableHolePunch = false,
+ std::optional<CachedSetRenderSchedulingTunables> cachedSetRenderSchedulingTunables =
+ std::nullopt);
void setDisplaySize(ui::Size size) {
mDisplaySize = size;
@@ -48,16 +77,14 @@
std::chrono::steady_clock::time_point now);
// Renders the newest cached sets with the supplied output composition state
- void renderCachedSets(const OutputCompositionState& outputState);
+ void renderCachedSets(const OutputCompositionState& outputState,
+ std::optional<std::chrono::steady_clock::time_point> renderDeadline);
void dump(std::string& result) const;
void dumpLayers(std::string& result) const;
const std::optional<CachedSet>& getNewCachedSetForTesting() const { return mNewCachedSet; }
-protected:
- std::optional<CachedSet> mNewCachedSet;
-
private:
size_t calculateDisplayCost(const std::vector<const LayerState*>& layers) const;
@@ -149,9 +176,15 @@
renderengine::RenderEngine& mRenderEngine;
const bool mEnableHolePunch;
+ const std::optional<CachedSetRenderSchedulingTunables> mCachedSetRenderSchedulingTunables;
TexturePool mTexturePool;
+protected:
+ // mNewCachedSet must be destroyed before mTexturePool is.
+ std::optional<CachedSet> mNewCachedSet;
+
+private:
ui::Size mDisplaySize;
NonBufferHash mCurrentGeometry;
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Planner.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Planner.h
index fd1ddfc..be34153 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Planner.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/impl/planner/Planner.h
@@ -58,8 +58,11 @@
void reportFinalPlan(
compositionengine::Output::OutputLayersEnumerator<compositionengine::Output>&& layers);
- // The planner will call to the Flattener to render any pending cached set
- void renderCachedSets(const OutputCompositionState& outputState);
+ // The planner will call to the Flattener to render any pending cached set.
+ // Rendering a pending cached set is optional: if the renderDeadline is not far enough in the
+ // future then the planner may opt to skip rendering the cached set.
+ void renderCachedSets(const OutputCompositionState& outputState,
+ std::optional<std::chrono::steady_clock::time_point> renderDeadline);
void dump(const Vector<String16>& args, std::string&);
diff --git a/services/surfaceflinger/CompositionEngine/include/compositionengine/mock/Output.h b/services/surfaceflinger/CompositionEngine/include/compositionengine/mock/Output.h
index 4b4d375..8e777e3 100644
--- a/services/surfaceflinger/CompositionEngine/include/compositionengine/mock/Output.h
+++ b/services/surfaceflinger/CompositionEngine/include/compositionengine/mock/Output.h
@@ -109,7 +109,7 @@
MOCK_CONST_METHOD0(getSkipColorTransform, bool());
MOCK_METHOD0(postFramebuffer, void());
- MOCK_METHOD0(renderCachedSets, void());
+ MOCK_METHOD1(renderCachedSets, void(const CompositionRefreshArgs&));
MOCK_METHOD0(presentAndGetFrameFences, compositionengine::Output::FrameFences());
MOCK_METHOD3(generateClientCompositionRequests,
diff --git a/services/surfaceflinger/CompositionEngine/src/Output.cpp b/services/surfaceflinger/CompositionEngine/src/Output.cpp
index cd2f742..67bb149 100644
--- a/services/surfaceflinger/CompositionEngine/src/Output.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/Output.cpp
@@ -434,7 +434,7 @@
devOptRepaintFlash(refreshArgs);
finishFrame(refreshArgs);
postFramebuffer();
- renderCachedSets();
+ renderCachedSets(refreshArgs);
}
void Output::rebuildLayerStacks(const compositionengine::CompositionRefreshArgs& refreshArgs,
@@ -1312,9 +1312,9 @@
mReleasedLayers.clear();
}
-void Output::renderCachedSets() {
+void Output::renderCachedSets(const CompositionRefreshArgs& refreshArgs) {
if (mPlanner) {
- mPlanner->renderCachedSets(getState());
+ mPlanner->renderCachedSets(getState(), refreshArgs.nextInvalidateTime);
}
}
diff --git a/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp b/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp
index cd14327..e4e46a7 100644
--- a/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/OutputLayer.cpp
@@ -349,7 +349,7 @@
}
writeOutputDependentPerFrameStateToHWC(hwcLayer.get());
- writeOutputIndependentPerFrameStateToHWC(hwcLayer.get(), *outputIndependentState);
+ writeOutputIndependentPerFrameStateToHWC(hwcLayer.get(), *outputIndependentState, skipLayer);
writeCompositionTypeToHWC(hwcLayer.get(), requestedCompositionType, isPeekingThrough,
skipLayer);
@@ -471,7 +471,8 @@
}
void OutputLayer::writeOutputIndependentPerFrameStateToHWC(
- HWC2::Layer* hwcLayer, const LayerFECompositionState& outputIndependentState) {
+ HWC2::Layer* hwcLayer, const LayerFECompositionState& outputIndependentState,
+ bool skipLayer) {
switch (auto error = hwcLayer->setColorTransform(outputIndependentState.colorTransform)) {
case hal::Error::NONE:
break;
@@ -504,7 +505,7 @@
break;
case hal::Composition::CURSOR:
case hal::Composition::DEVICE:
- writeBufferStateToHWC(hwcLayer, outputIndependentState);
+ writeBufferStateToHWC(hwcLayer, outputIndependentState, skipLayer);
break;
case hal::Composition::INVALID:
case hal::Composition::CLIENT:
@@ -541,7 +542,8 @@
}
void OutputLayer::writeBufferStateToHWC(HWC2::Layer* hwcLayer,
- const LayerFECompositionState& outputIndependentState) {
+ const LayerFECompositionState& outputIndependentState,
+ bool skipLayer) {
auto supportedPerFrameMetadata =
getOutput().getDisplayColorProfile()->getSupportedPerFrameMetadata();
if (auto error = hwcLayer->setPerFrameMetadata(supportedPerFrameMetadata,
@@ -554,7 +556,7 @@
sp<GraphicBuffer> buffer = outputIndependentState.buffer;
sp<Fence> acquireFence = outputIndependentState.acquireFence;
int slot = outputIndependentState.bufferSlot;
- if (getState().overrideInfo.buffer != nullptr) {
+ if (getState().overrideInfo.buffer != nullptr && !skipLayer) {
buffer = getState().overrideInfo.buffer->getBuffer();
acquireFence = getState().overrideInfo.acquireFence;
slot = HwcBufferCache::FLATTENER_CACHING_SLOT;
diff --git a/services/surfaceflinger/CompositionEngine/src/planner/CachedSet.cpp b/services/surfaceflinger/CompositionEngine/src/planner/CachedSet.cpp
index 69e8c7d..acc7ed2 100644
--- a/services/surfaceflinger/CompositionEngine/src/planner/CachedSet.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/planner/CachedSet.cpp
@@ -277,6 +277,7 @@
mOutputSpace.orientation = outputState.framebufferSpace.orientation;
mOutputDataspace = outputDataspace;
mOrientation = orientation;
+ mSkipCount = 0;
} else {
mTexture.reset();
}
@@ -294,6 +295,12 @@
return false;
}
+ // Do not use a hole punch with an HDR layer; this should be done in client
+ // composition to properly mix HDR with SDR.
+ if (hasHdrLayers()) {
+ return false;
+ }
+
const auto& layerFE = mLayers[0].getState()->getOutputLayer()->getLayerFE();
if (layerFE.getCompositionState()->forceClientComposition) {
return false;
diff --git a/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp b/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp
index 192c411..2bcaf60 100644
--- a/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/planner/Flattener.cpp
@@ -23,7 +23,7 @@
#include <compositionengine/impl/planner/Flattener.h>
#include <compositionengine/impl/planner/LayerState.h>
-#include <utils/Trace.h>
+#include <gui/TraceUtils.h>
using time_point = std::chrono::steady_clock::time_point;
using namespace std::chrono_literals;
@@ -60,9 +60,12 @@
} // namespace
-Flattener::Flattener(renderengine::RenderEngine& renderEngine, bool enableHolePunch)
+Flattener::Flattener(
+ renderengine::RenderEngine& renderEngine, bool enableHolePunch,
+ std::optional<CachedSetRenderSchedulingTunables> cachedSetRenderSchedulingTunables)
: mRenderEngine(renderEngine),
mEnableHolePunch(enableHolePunch),
+ mCachedSetRenderSchedulingTunables(cachedSetRenderSchedulingTunables),
mTexturePool(mRenderEngine) {
const int timeoutInMs =
base::GetIntProperty(std::string("debug.sf.layer_caching_active_layer_timeout_ms"), 0);
@@ -105,12 +108,45 @@
return hash;
}
-void Flattener::renderCachedSets(const OutputCompositionState& outputState) {
+void Flattener::renderCachedSets(
+ const OutputCompositionState& outputState,
+ std::optional<std::chrono::steady_clock::time_point> renderDeadline) {
ATRACE_CALL();
- if (!mNewCachedSet || mNewCachedSet->hasRenderedBuffer()) {
+
+ if (!mNewCachedSet) {
return;
}
+ // Ensure that a cached set has a valid buffer first
+ if (mNewCachedSet->hasRenderedBuffer()) {
+ ATRACE_NAME("mNewCachedSet->hasRenderedBuffer()");
+ return;
+ }
+
+ const auto now = std::chrono::steady_clock::now();
+
+ // If we have a render deadline, and the flattener is configured to skip rendering if we don't
+ // have enough time, then we skip rendering the cached set if we think that we'll steal too much
+ // time from the next frame.
+ if (renderDeadline && mCachedSetRenderSchedulingTunables) {
+ if (const auto estimatedRenderFinish =
+ now + mCachedSetRenderSchedulingTunables->cachedSetRenderDuration;
+ estimatedRenderFinish > *renderDeadline) {
+ mNewCachedSet->incrementSkipCount();
+
+ if (mNewCachedSet->getSkipCount() <=
+ mCachedSetRenderSchedulingTunables->maxDeferRenderAttempts) {
+ ATRACE_FORMAT("DeadlinePassed: exceeded deadline by: %d us",
+ std::chrono::duration_cast<std::chrono::microseconds>(
+ estimatedRenderFinish - *renderDeadline)
+ .count());
+ return;
+ } else {
+ ATRACE_NAME("DeadlinePassed: exceeded max skips");
+ }
+ }
+ }
+
mNewCachedSet->render(mRenderEngine, mTexturePool, outputState);
}
diff --git a/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp b/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp
index 711a634..be2510f 100644
--- a/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/planner/Planner.cpp
@@ -26,16 +26,43 @@
#include <compositionengine/impl/planner/Planner.h>
#include <utils/Trace.h>
+#include <chrono>
namespace android::compositionengine::impl::planner {
+namespace {
+
+std::optional<Flattener::CachedSetRenderSchedulingTunables> buildFlattenerTuneables() {
+ if (!base::GetBoolProperty(std::string("debug.sf.enable_cached_set_render_scheduling"), true)) {
+ return std::nullopt;
+ }
+
+ auto renderDuration = std::chrono::nanoseconds(
+ base::GetUintProperty<uint64_t>(std::string("debug.sf.cached_set_render_duration_ns"),
+ Flattener::CachedSetRenderSchedulingTunables::
+ kDefaultCachedSetRenderDuration.count()));
+
+ auto maxDeferRenderAttempts = base::GetUintProperty<
+ size_t>(std::string("debug.sf.cached_set_max_defer_render_attmpts"),
+ Flattener::CachedSetRenderSchedulingTunables::kDefaultMaxDeferRenderAttempts);
+
+ return std::make_optional<Flattener::CachedSetRenderSchedulingTunables>(
+ Flattener::CachedSetRenderSchedulingTunables{
+ .cachedSetRenderDuration = renderDuration,
+ .maxDeferRenderAttempts = maxDeferRenderAttempts,
+ });
+}
+
+} // namespace
+
Planner::Planner(renderengine::RenderEngine& renderEngine)
// Implicitly, layer caching must also be enabled for the hole punch or
// predictor to have any effect.
// E.g., setprop debug.sf.enable_layer_caching 1, or
// adb shell service call SurfaceFlinger 1040 i32 1 [i64 <display ID>]
: mFlattener(renderEngine,
- base::GetBoolProperty(std::string("debug.sf.enable_hole_punch_pip"), true)) {
+ base::GetBoolProperty(std::string("debug.sf.enable_hole_punch_pip"), true),
+ buildFlattenerTuneables()) {
mPredictorEnabled =
base::GetBoolProperty(std::string("debug.sf.enable_planner_prediction"), false);
}
@@ -161,9 +188,11 @@
finalPlan);
}
-void Planner::renderCachedSets(const OutputCompositionState& outputState) {
+void Planner::renderCachedSets(
+ const OutputCompositionState& outputState,
+ std::optional<std::chrono::steady_clock::time_point> renderDeadline) {
ATRACE_CALL();
- mFlattener.renderCachedSets(outputState);
+ mFlattener.renderCachedSets(outputState, renderDeadline);
}
void Planner::dump(const Vector<String16>& args, std::string& result) {
diff --git a/services/surfaceflinger/CompositionEngine/tests/OutputLayerTest.cpp b/services/surfaceflinger/CompositionEngine/tests/OutputLayerTest.cpp
index 5bd1216..e9ecf3e 100644
--- a/services/surfaceflinger/CompositionEngine/tests/OutputLayerTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/OutputLayerTest.cpp
@@ -699,6 +699,7 @@
Hwc2::IComposerClient::BlendMode::PREMULTIPLIED;
static constexpr float kAlpha = 51.f;
static constexpr float kOverrideAlpha = 1.f;
+ static constexpr float kSkipAlpha = 0.f;
static constexpr ui::Dataspace kDataspace = static_cast<ui::Dataspace>(71);
static constexpr ui::Dataspace kOverrideDataspace = static_cast<ui::Dataspace>(72);
static constexpr int kSupportedPerFrameMetadata = 101;
@@ -1055,6 +1056,22 @@
/*zIsOverridden*/ false, /*isPeekingThrough*/ false);
}
+TEST_F(OutputLayerWriteStateToHWCTest, overriddenSkipLayerDoesNotSendBuffer) {
+ mLayerFEState.compositionType = Hwc2::IComposerClient::Composition::DEVICE;
+ includeOverrideInfo();
+
+ expectGeometryCommonCalls(kOverrideDisplayFrame, kOverrideSourceCrop, kOverrideBufferTransform,
+ kOverrideBlendMode, kSkipAlpha);
+ expectPerFrameCommonCalls(SimulateUnsupported::None, kOverrideDataspace, kOverrideVisibleRegion,
+ kOverrideSurfaceDamage);
+ expectSetHdrMetadataAndBufferCalls();
+ expectSetCompositionTypeCall(Hwc2::IComposerClient::Composition::DEVICE);
+ EXPECT_CALL(*mLayerFE, hasRoundedCorners()).WillRepeatedly(Return(false));
+
+ mOutputLayer.writeStateToHWC(/*includeGeometry*/ true, /*skipLayer*/ true, 0,
+ /*zIsOverridden*/ false, /*isPeekingThrough*/ false);
+}
+
TEST_F(OutputLayerWriteStateToHWCTest, includesOverrideInfoIfPresent) {
mLayerFEState.compositionType = Hwc2::IComposerClient::Composition::DEVICE;
includeOverrideInfo();
diff --git a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
index c381081..742b155 100644
--- a/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/OutputTest.cpp
@@ -1767,7 +1767,7 @@
MOCK_METHOD1(devOptRepaintFlash, void(const compositionengine::CompositionRefreshArgs&));
MOCK_METHOD1(finishFrame, void(const compositionengine::CompositionRefreshArgs&));
MOCK_METHOD0(postFramebuffer, void());
- MOCK_METHOD0(renderCachedSets, void());
+ MOCK_METHOD1(renderCachedSets, void(const compositionengine::CompositionRefreshArgs&));
};
StrictMock<OutputPartialMock> mOutput;
@@ -1787,7 +1787,7 @@
EXPECT_CALL(mOutput, devOptRepaintFlash(Ref(args)));
EXPECT_CALL(mOutput, finishFrame(Ref(args)));
EXPECT_CALL(mOutput, postFramebuffer());
- EXPECT_CALL(mOutput, renderCachedSets());
+ EXPECT_CALL(mOutput, renderCachedSets(Ref(args)));
mOutput.present(args);
}
diff --git a/services/surfaceflinger/CompositionEngine/tests/planner/CachedSetTest.cpp b/services/surfaceflinger/CompositionEngine/tests/planner/CachedSetTest.cpp
index b15e4f3..0acc317 100644
--- a/services/surfaceflinger/CompositionEngine/tests/planner/CachedSetTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/planner/CachedSetTest.cpp
@@ -223,6 +223,16 @@
EXPECT_EQ(2u, cachedSet.getAge());
}
+TEST_F(CachedSetTest, incrementSkipCount) {
+ CachedSet::Layer& layer = *mTestLayers[0]->cachedSetLayer.get();
+ CachedSet cachedSet(layer);
+ EXPECT_EQ(0u, cachedSet.getSkipCount());
+ cachedSet.incrementSkipCount();
+ EXPECT_EQ(1u, cachedSet.getSkipCount());
+ cachedSet.incrementSkipCount();
+ EXPECT_EQ(2u, cachedSet.getSkipCount());
+}
+
TEST_F(CachedSetTest, hasBufferUpdate_NoUpdate) {
CachedSet::Layer& layer1 = *mTestLayers[0]->cachedSetLayer.get();
CachedSet::Layer& layer2 = *mTestLayers[1]->cachedSetLayer.get();
@@ -257,6 +267,8 @@
CachedSet cachedSet1(layer1);
CachedSet cachedSet2(layer2);
cachedSet1.addLayer(layer3.getState(), kStartTime + 10ms);
+ cachedSet1.incrementSkipCount();
+ EXPECT_EQ(1u, cachedSet1.getSkipCount());
cachedSet1.append(cachedSet2);
EXPECT_EQ(kStartTime, cachedSet1.getLastUpdate());
@@ -268,6 +280,8 @@
EXPECT_TRUE(cachedSet1.getVisibleRegion().hasSameRects(expectedRegion));
EXPECT_EQ(3u, cachedSet1.getLayerCount());
EXPECT_EQ(0u, cachedSet1.getAge());
+ EXPECT_EQ(0u, cachedSet1.getSkipCount());
+
expectNoBuffer(cachedSet1);
// TODO(b/181192080): check that getNonBufferHash returns the correct hash value
// EXPECT_EQ(android::hashCombine(layer1.getHash(), layer2.getHash()),
@@ -429,6 +443,20 @@
EXPECT_FALSE(cachedSet.requiresHolePunch());
}
+TEST_F(CachedSetTest, holePunch_requiresNonHdr) {
+ mTestLayers[0]->outputLayerCompositionState.dataspace = ui::Dataspace::BT2020_PQ;
+ mTestLayers[0]->layerState->update(&mTestLayers[0]->outputLayer);
+
+ CachedSet::Layer& layer = *mTestLayers[0]->cachedSetLayer.get();
+ mTestLayers[0]->layerFECompositionState.buffer = sp<GraphicBuffer>::make();
+ sp<mock::LayerFE> layerFE = mTestLayers[0]->layerFE;
+
+ CachedSet cachedSet(layer);
+ EXPECT_CALL(*layerFE, hasRoundedCorners()).WillRepeatedly(Return(true));
+
+ EXPECT_FALSE(cachedSet.requiresHolePunch());
+}
+
TEST_F(CachedSetTest, requiresHolePunch) {
CachedSet::Layer& layer = *mTestLayers[0]->cachedSetLayer.get();
mTestLayers[0]->layerFECompositionState.buffer = sp<GraphicBuffer>::make();
diff --git a/services/surfaceflinger/CompositionEngine/tests/planner/FlattenerTest.cpp b/services/surfaceflinger/CompositionEngine/tests/planner/FlattenerTest.cpp
index e176c98..334b855 100644
--- a/services/surfaceflinger/CompositionEngine/tests/planner/FlattenerTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/planner/FlattenerTest.cpp
@@ -24,6 +24,7 @@
#include <renderengine/ExternalTexture.h>
#include <renderengine/LayerSettings.h>
#include <renderengine/mock/RenderEngine.h>
+#include <chrono>
namespace android::compositionengine {
using namespace std::chrono_literals;
@@ -46,22 +47,28 @@
class TestableFlattener : public Flattener {
public:
- TestableFlattener(renderengine::RenderEngine& renderEngine, bool enableHolePunch)
- : Flattener(renderEngine, enableHolePunch) {}
+ TestableFlattener(renderengine::RenderEngine& renderEngine, bool enableHolePunch,
+ std::optional<Flattener::CachedSetRenderSchedulingTunables>
+ cachedSetRenderSchedulingTunables = std::nullopt)
+ : Flattener(renderEngine, enableHolePunch, cachedSetRenderSchedulingTunables) {}
const std::optional<CachedSet>& getNewCachedSetForTesting() const { return mNewCachedSet; }
};
class FlattenerTest : public testing::Test {
public:
- FlattenerTest() : mFlattener(std::make_unique<TestableFlattener>(mRenderEngine, true)) {}
+ FlattenerTest() : FlattenerTest(std::nullopt) {}
void SetUp() override;
protected:
+ FlattenerTest(std::optional<Flattener::CachedSetRenderSchedulingTunables>
+ cachedSetRenderSchedulingTunables)
+ : mFlattener(std::make_unique<TestableFlattener>(mRenderEngine, true,
+ cachedSetRenderSchedulingTunables)) {}
void initializeOverrideBuffer(const std::vector<const LayerState*>& layers);
void initializeFlattener(const std::vector<const LayerState*>& layers);
void expectAllLayersFlattened(const std::vector<const LayerState*>& layers);
- // mRenderEngine is held as a reference in mFlattener, so mFlattener must be destroyed first.
+ // mRenderEngine is held as a reference in mFlattener, so explicitly destroy mFlattener first.
renderengine::mock::RenderEngine mRenderEngine;
std::unique_ptr<TestableFlattener> mFlattener;
@@ -148,13 +155,13 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
// same geometry, update the internal layer stack
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
}
void FlattenerTest::expectAllLayersFlattened(const std::vector<const LayerState*>& layers) {
@@ -164,7 +171,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
for (const auto layer : layers) {
EXPECT_EQ(nullptr, layer->getOutputLayer()->getState().overrideInfo.buffer);
@@ -174,7 +181,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
const auto buffer = layers[0]->getOutputLayer()->getState().overrideInfo.buffer;
EXPECT_NE(nullptr, buffer);
@@ -209,7 +216,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
}
TEST_F(FlattenerTest, flattenLayers_basicFlatten) {
@@ -255,7 +262,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -360,7 +367,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_EQ(nullptr, overrideBuffer1);
EXPECT_EQ(nullptr, overrideBuffer2);
@@ -397,7 +404,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_EQ(nullptr, overrideBuffer1);
EXPECT_EQ(nullptr, overrideBuffer2);
@@ -406,7 +413,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_EQ(nullptr, overrideBuffer1);
EXPECT_NE(nullptr, overrideBuffer2);
@@ -419,7 +426,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_EQ(nullptr, overrideBuffer1);
EXPECT_NE(nullptr, overrideBuffer2);
@@ -428,7 +435,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -470,7 +477,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_EQ(nullptr, overrideBuffer1);
EXPECT_EQ(nullptr, overrideBuffer2);
@@ -484,7 +491,7 @@
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
mOutputState.framebufferSpace.orientation = ui::ROTATION_90;
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -497,7 +504,7 @@
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
mOutputState.framebufferSpace.orientation = ui::ROTATION_180;
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -512,7 +519,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -524,7 +531,7 @@
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
mOutputState.framebufferSpace.orientation = ui::ROTATION_270;
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -563,7 +570,7 @@
// This will render a CachedSet.
EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
// We've rendered a CachedSet, but we haven't merged it in.
EXPECT_EQ(nullptr, overrideBuffer1);
@@ -576,7 +583,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -625,7 +632,7 @@
// This will render a CachedSet.
EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
// We've rendered a CachedSet, but we haven't merged it in.
EXPECT_EQ(nullptr, overrideBuffer1);
@@ -638,7 +645,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
@@ -682,7 +689,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
for (const auto layer : layers) {
EXPECT_EQ(nullptr, layer->getOutputLayer()->getState().overrideInfo.buffer);
@@ -692,7 +699,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer1, overrideBuffer2);
EXPECT_EQ(nullptr, overrideBuffer3);
@@ -726,7 +733,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
for (const auto layer : layers) {
EXPECT_EQ(nullptr, layer->getOutputLayer()->getState().overrideInfo.buffer);
@@ -737,7 +744,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
for (const auto layer : layers) {
EXPECT_EQ(nullptr, layer->getOutputLayer()->getState().overrideInfo.buffer);
}
@@ -778,7 +785,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
for (const auto layer : layers) {
EXPECT_EQ(nullptr, layer->getOutputLayer()->getState().overrideInfo.buffer);
@@ -788,7 +795,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_EQ(nullptr, overrideBuffer1);
EXPECT_EQ(nullptr, blurOverrideBuffer);
EXPECT_NE(nullptr, overrideBuffer3);
@@ -825,7 +832,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
const auto& cachedSet = mFlattener->getNewCachedSetForTesting();
ASSERT_NE(std::nullopt, cachedSet);
@@ -839,7 +846,7 @@
initializeOverrideBuffer(layers);
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer2, overrideBuffer1);
EXPECT_EQ(nullptr, blurOverrideBuffer);
@@ -866,7 +873,7 @@
initializeOverrideBuffer(layers);
EXPECT_EQ(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_EQ(nullptr, overrideBuffer1);
EXPECT_EQ(nullptr, overrideBuffer2);
@@ -874,16 +881,61 @@
// Simulate attempting to render prior to merging the new cached set with the layer stack.
// Here we should not try to re-render.
EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).Times(0);
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
// We provide the override buffer now that it's rendered
EXPECT_NE(getNonBufferHash(layers),
mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
- mFlattener->renderCachedSets(mOutputState);
+ mFlattener->renderCachedSets(mOutputState, std::nullopt);
EXPECT_NE(nullptr, overrideBuffer1);
EXPECT_EQ(overrideBuffer2, overrideBuffer1);
}
+const constexpr std::chrono::nanoseconds kCachedSetRenderDuration = 0ms;
+const constexpr size_t kMaxDeferRenderAttempts = 2;
+
+class FlattenerRenderSchedulingTest : public FlattenerTest {
+public:
+ FlattenerRenderSchedulingTest()
+ : FlattenerTest(
+ Flattener::CachedSetRenderSchedulingTunables{.cachedSetRenderDuration =
+ kCachedSetRenderDuration,
+ .maxDeferRenderAttempts =
+ kMaxDeferRenderAttempts}) {
+ }
+};
+
+TEST_F(FlattenerRenderSchedulingTest, flattenLayers_renderCachedSets_defersUpToMaxAttempts) {
+ auto& layerState1 = mTestLayers[0]->layerState;
+ auto& layerState2 = mTestLayers[1]->layerState;
+
+ const std::vector<const LayerState*> layers = {
+ layerState1.get(),
+ layerState2.get(),
+ };
+
+ initializeFlattener(layers);
+
+ // Mark the layers inactive
+ mTime += 200ms;
+
+ initializeOverrideBuffer(layers);
+ EXPECT_EQ(getNonBufferHash(layers),
+ mFlattener->flattenLayers(layers, getNonBufferHash(layers), mTime));
+
+ for (size_t i = 0; i < kMaxDeferRenderAttempts; i++) {
+ EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).Times(0);
+ mFlattener->renderCachedSets(mOutputState,
+ std::chrono::steady_clock::now() -
+ (kCachedSetRenderDuration + 10ms));
+ }
+
+ EXPECT_CALL(mRenderEngine, drawLayers(_, _, _, _, _, _)).WillOnce(Return(NO_ERROR));
+ mFlattener->renderCachedSets(mOutputState,
+ std::chrono::steady_clock::now() -
+ (kCachedSetRenderDuration + 10ms));
+}
+
} // namespace
} // namespace android::compositionengine
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index e50087f..2bf5602 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -2382,6 +2382,16 @@
info.touchableRegion = inputTransform.transform(info.touchableRegion);
}
+void Layer::fillTouchOcclusionMode(InputWindowInfo& info) {
+ sp<Layer> p = this;
+ while (p != nullptr && !p->hasInputInfo()) {
+ p = p->mDrawingParent.promote();
+ }
+ if (p != nullptr) {
+ info.touchOcclusionMode = p->mDrawingState.inputInfo.touchOcclusionMode;
+ }
+}
+
InputWindowInfo Layer::fillInputInfo(const sp<DisplayDevice>& display) {
if (!hasInputInfo()) {
mDrawingState.inputInfo.name = getName();
@@ -2419,6 +2429,7 @@
// anything.
info.visible = hasInputInfo() ? canReceiveInput() : isVisible();
info.alpha = getAlpha();
+ fillTouchOcclusionMode(info);
auto cropLayer = mDrawingState.touchableRegionCrop.promote();
if (info.replaceTouchableRegionWithCrop) {
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index dde0031..5873103 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -1057,6 +1057,10 @@
// null.
sp<Layer> getRootLayer();
+ // Fills in the touch occlusion mode of the first parent (including this layer) that
+ // hasInputInfo() or no-op if no such parent is found.
+ void fillTouchOcclusionMode(InputWindowInfo& info);
+
// Fills in the frame and transform info for the InputWindowInfo
void fillInputFrameInfo(InputWindowInfo& info, const ui::Transform& toPhysicalDisplay);
diff --git a/services/surfaceflinger/Scheduler/RefreshRateConfigs.cpp b/services/surfaceflinger/Scheduler/RefreshRateConfigs.cpp
index b062acd..9746076 100644
--- a/services/surfaceflinger/Scheduler/RefreshRateConfigs.cpp
+++ b/services/surfaceflinger/Scheduler/RefreshRateConfigs.cpp
@@ -190,6 +190,45 @@
RefreshRate RefreshRateConfigs::getBestRefreshRate(const std::vector<LayerRequirement>& layers,
const GlobalSignals& globalSignals,
GlobalSignals* outSignalsConsidered) const {
+ std::lock_guard lock(mLock);
+
+ if (auto cached = getCachedBestRefreshRate(layers, globalSignals, outSignalsConsidered)) {
+ return *cached;
+ }
+
+ GlobalSignals signalsConsidered;
+ RefreshRate result = getBestRefreshRateLocked(layers, globalSignals, &signalsConsidered);
+ lastBestRefreshRateInvocation.emplace(
+ GetBestRefreshRateInvocation{.layerRequirements = layers,
+ .globalSignals = globalSignals,
+ .outSignalsConsidered = signalsConsidered,
+ .resultingBestRefreshRate = result});
+ if (outSignalsConsidered) {
+ *outSignalsConsidered = signalsConsidered;
+ }
+ return result;
+}
+
+std::optional<RefreshRate> RefreshRateConfigs::getCachedBestRefreshRate(
+ const std::vector<LayerRequirement>& layers, const GlobalSignals& globalSignals,
+ GlobalSignals* outSignalsConsidered) const {
+ const bool sameAsLastCall = lastBestRefreshRateInvocation &&
+ lastBestRefreshRateInvocation->layerRequirements == layers &&
+ lastBestRefreshRateInvocation->globalSignals == globalSignals;
+
+ if (sameAsLastCall) {
+ if (outSignalsConsidered) {
+ *outSignalsConsidered = lastBestRefreshRateInvocation->outSignalsConsidered;
+ }
+ return lastBestRefreshRateInvocation->resultingBestRefreshRate;
+ }
+
+ return {};
+}
+
+RefreshRate RefreshRateConfigs::getBestRefreshRateLocked(
+ const std::vector<LayerRequirement>& layers, const GlobalSignals& globalSignals,
+ GlobalSignals* outSignalsConsidered) const {
ATRACE_CALL();
ALOGV("getBestRefreshRate %zu layers", layers.size());
@@ -206,8 +245,6 @@
}
};
- std::lock_guard lock(mLock);
-
int noVoteLayers = 0;
int minVoteLayers = 0;
int maxVoteLayers = 0;
@@ -592,6 +629,11 @@
void RefreshRateConfigs::setCurrentModeId(DisplayModeId modeId) {
std::lock_guard lock(mLock);
+
+ // Invalidate the cached invocation to getBestRefreshRate. This forces
+ // the refresh rate to be recomputed on the next call to getBestRefreshRate.
+ lastBestRefreshRateInvocation.reset();
+
mCurrentRefreshRate = mRefreshRates.at(modeId).get();
}
@@ -605,11 +647,16 @@
void RefreshRateConfigs::updateDisplayModes(const DisplayModes& modes,
DisplayModeId currentModeId) {
std::lock_guard lock(mLock);
+
// The current mode should be supported
LOG_ALWAYS_FATAL_IF(std::none_of(modes.begin(), modes.end(), [&](DisplayModePtr mode) {
return mode->getId() == currentModeId;
}));
+ // Invalidate the cached invocation to getBestRefreshRate. This forces
+ // the refresh rate to be recomputed on the next call to getBestRefreshRate.
+ lastBestRefreshRateInvocation.reset();
+
mRefreshRates.clear();
for (const auto& mode : modes) {
const auto modeId = mode->getId();
@@ -666,6 +713,7 @@
ALOGE("Invalid refresh rate policy: %s", policy.toString().c_str());
return BAD_VALUE;
}
+ lastBestRefreshRateInvocation.reset();
Policy previousPolicy = *getCurrentPolicyLocked();
mDisplayManagerPolicy = policy;
if (*getCurrentPolicyLocked() == previousPolicy) {
@@ -680,6 +728,7 @@
if (policy && !isPolicyValidLocked(*policy)) {
return BAD_VALUE;
}
+ lastBestRefreshRateInvocation.reset();
Policy previousPolicy = *getCurrentPolicyLocked();
mOverridePolicy = policy;
if (*getCurrentPolicyLocked() == previousPolicy) {
diff --git a/services/surfaceflinger/Scheduler/RefreshRateConfigs.h b/services/surfaceflinger/Scheduler/RefreshRateConfigs.h
index ee89149..342fde0 100644
--- a/services/surfaceflinger/Scheduler/RefreshRateConfigs.h
+++ b/services/surfaceflinger/Scheduler/RefreshRateConfigs.h
@@ -250,6 +250,10 @@
bool touch = false;
// True if the system hasn't seen any buffers posted to layers recently.
bool idle = false;
+
+ bool operator==(const GlobalSignals& other) const {
+ return touch == other.touch && idle == other.idle;
+ }
};
// Returns the refresh rate that fits best to the given layers.
@@ -350,6 +354,15 @@
const std::function<bool(const RefreshRate&)>& shouldAddRefreshRate,
std::vector<const RefreshRate*>* outRefreshRates) REQUIRES(mLock);
+ std::optional<RefreshRate> getCachedBestRefreshRate(const std::vector<LayerRequirement>& layers,
+ const GlobalSignals& globalSignals,
+ GlobalSignals* outSignalsConsidered) const
+ REQUIRES(mLock);
+
+ RefreshRate getBestRefreshRateLocked(const std::vector<LayerRequirement>& layers,
+ const GlobalSignals& globalSignals,
+ GlobalSignals* outSignalsConsidered) const REQUIRES(mLock);
+
// Returns the refresh rate with the highest score in the collection specified from begin
// to end. If there are more than one with the same highest refresh rate, the first one is
// returned.
@@ -414,6 +427,15 @@
const bool mEnableFrameRateOverride;
bool mSupportsFrameRateOverride;
+
+ struct GetBestRefreshRateInvocation {
+ std::vector<LayerRequirement> layerRequirements;
+ GlobalSignals globalSignals;
+ GlobalSignals outSignalsConsidered;
+ RefreshRate resultingBestRefreshRate;
+ };
+ mutable std::optional<GetBestRefreshRateInvocation> lastBestRefreshRateInvocation
+ GUARDED_BY(mLock);
};
} // namespace android::scheduler
diff --git a/services/surfaceflinger/Scheduler/RefreshRateStats.h b/services/surfaceflinger/Scheduler/RefreshRateStats.h
index 80f4665..208a767 100644
--- a/services/surfaceflinger/Scheduler/RefreshRateStats.h
+++ b/services/surfaceflinger/Scheduler/RefreshRateStats.h
@@ -61,6 +61,7 @@
if (mCurrentRefreshRate.equalsWithMargin(currRefreshRate)) {
return;
}
+ mTimeStats.incrementRefreshRateSwitches();
flushTime();
mCurrentRefreshRate = currRefreshRate;
}
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index fac2c65..4b8cbfb 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -194,8 +194,6 @@
std::unique_ptr<LayerHistory> Scheduler::createLayerHistory(
const scheduler::RefreshRateConfigs& configs) {
- if (!configs.canSwitch()) return nullptr;
-
return std::make_unique<scheduler::LayerHistory>(configs);
}
@@ -579,8 +577,6 @@
}
void Scheduler::registerLayer(Layer* layer) {
- if (!mLayerHistory) return;
-
scheduler::LayerHistory::LayerVoteType voteType;
if (!mOptions.useContentDetection ||
@@ -600,26 +596,22 @@
}
void Scheduler::deregisterLayer(Layer* layer) {
- if (mLayerHistory) {
- mLayerHistory->deregisterLayer(layer);
- }
+ mLayerHistory->deregisterLayer(layer);
}
void Scheduler::recordLayerHistory(Layer* layer, nsecs_t presentTime,
LayerHistory::LayerUpdateType updateType) {
- if (mLayerHistory) {
+ if (mRefreshRateConfigs.canSwitch()) {
mLayerHistory->record(layer, presentTime, systemTime(), updateType);
}
}
void Scheduler::setModeChangePending(bool pending) {
- if (mLayerHistory) {
- mLayerHistory->setModeChangePending(pending);
- }
+ mLayerHistory->setModeChangePending(pending);
}
void Scheduler::chooseRefreshRateForContent() {
- if (!mLayerHistory) return;
+ if (!mRefreshRateConfigs.canSwitch()) return;
ATRACE_CALL();
@@ -630,9 +622,6 @@
bool frameRateOverridesChanged;
{
std::lock_guard<std::mutex> lock(mFeatureStateLock);
- if (mFeatures.contentRequirements == summary) {
- return;
- }
mFeatures.contentRequirements = summary;
newModeId = calculateRefreshRateModeId(&consideredSignals);
@@ -691,9 +680,7 @@
// Display Power event will boost the refresh rate to performance.
// Clear Layer History to get fresh FPS detection
- if (mLayerHistory) {
- mLayerHistory->clear();
- }
+ mLayerHistory->clear();
}
void Scheduler::kernelIdleTimerCallback(TimerState state) {
@@ -732,9 +719,7 @@
// NOTE: Instead of checking all the layers, we should be checking the layer
// that is currently on top. b/142507166 will give us this capability.
if (handleTimerStateChanged(&mFeatures.touch, touch)) {
- if (mLayerHistory) {
- mLayerHistory->clear();
- }
+ mLayerHistory->clear();
}
ATRACE_INT("TouchState", static_cast<int>(touch));
}
@@ -908,9 +893,7 @@
}
void Scheduler::onPrimaryDisplayAreaChanged(uint32_t displayArea) {
- if (mLayerHistory) {
- mLayerHistory->setDisplayArea(displayArea);
- }
+ mLayerHistory->setDisplayArea(displayArea);
}
void Scheduler::setPreferredRefreshRateForUid(FrameRateOverride frameRateOverride) {
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index 49d3d93..30a3253 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -268,7 +268,7 @@
VsyncSchedule mVsyncSchedule;
// Used to choose refresh rate if content detection is enabled.
- const std::unique_ptr<LayerHistory> mLayerHistory;
+ std::unique_ptr<LayerHistory> mLayerHistory;
// Timer that records time between requests for next vsync.
std::optional<scheduler::OneShotTimer> mIdleTimer;
diff --git a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
index 028f7a6..329e4a0 100644
--- a/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
+++ b/services/surfaceflinger/Scheduler/VSyncPredictor.cpp
@@ -86,6 +86,9 @@
// in the learning phase we should just clear all timestamps and start
// over.
if (mTimestamps.size() < kMinimumSamplesForPrediction) {
+ // Add the timestamp to mTimestamps before clearing it so we could
+ // update mKnownTimestamp based on the new timestamp.
+ mTimestamps.push_back(timestamp);
clearTimestamps();
} else if (!mTimestamps.empty()) {
mKnownTimestamp =
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index a4b6fef..f02f1e2 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -680,6 +680,10 @@
if (mStartPropertySetThread->join() != NO_ERROR) {
ALOGE("Join StartPropertySetThread failed!");
}
+
+ if (mRenderEnginePrimeCacheFuture.valid()) {
+ mRenderEnginePrimeCacheFuture.get();
+ }
const nsecs_t now = systemTime();
const nsecs_t duration = now - mBootTime;
ALOGI("Boot is finished (%ld ms)", long(ns2ms(duration)) );
@@ -807,7 +811,15 @@
char primeShaderCache[PROPERTY_VALUE_MAX];
property_get("service.sf.prime_shader_cache", primeShaderCache, "1");
if (atoi(primeShaderCache)) {
- getRenderEngine().primeCache();
+ if (setSchedFifo(false) != NO_ERROR) {
+ ALOGW("Can't set SCHED_OTHER for primeCache");
+ }
+
+ mRenderEnginePrimeCacheFuture = getRenderEngine().primeCache();
+
+ if (setSchedFifo(true) != NO_ERROR) {
+ ALOGW("Can't set SCHED_OTHER for primeCache");
+ }
}
getRenderEngine().onPrimaryDisplaySizeChanged(display->getSize());
@@ -1139,7 +1151,18 @@
// have been already updated with the upcoming active mode.
return;
}
- const Fps oldRefreshRate = display->getActiveMode()->getFps();
+
+ if (display->getActiveMode()->getSize() != upcomingMode->getSize()) {
+ auto& state = mCurrentState.displays.editValueFor(display->getDisplayToken());
+ // We need to generate new sequenceId in order to recreate the display (and this
+ // way the framebuffer).
+ state.sequenceId = DisplayDeviceState{}.sequenceId;
+ state.physical->activeMode = upcomingMode;
+ processDisplayChangesLocked();
+
+ // processDisplayChangesLocked will update all necessary components so we're done here.
+ return;
+ }
std::lock_guard<std::mutex> lock(mActiveModeLock);
mRefreshRateConfigs->setCurrentModeId(mUpcomingActiveMode.modeId);
@@ -1149,9 +1172,6 @@
mRefreshRateStats->setRefreshRate(refreshRate);
- if (!refreshRate.equalsWithMargin(oldRefreshRate)) {
- mTimeStats->incrementRefreshRateSwitches();
- }
updatePhaseConfiguration(refreshRate);
ATRACE_INT("ActiveConfigFPS", refreshRate.getValue());
@@ -2040,6 +2060,7 @@
}
refreshArgs.earliestPresentTime = mScheduler->getPreviousVsyncFrom(mExpectedPresentTime);
+ refreshArgs.nextInvalidateTime = mEventQueue->nextExpectedInvalidate();
mGeometryInvalid = false;
@@ -2808,7 +2829,10 @@
mRefreshRateConfigs->updateDisplayModes(currentState.physical->supportedModes,
currentState.physical->activeMode->getId());
mVsyncConfiguration->reset();
- updatePhaseConfiguration(mRefreshRateConfigs->getCurrentRefreshRate().getFps());
+ const Fps refreshRate = currentState.physical->activeMode->getFps();
+ updatePhaseConfiguration(refreshRate);
+ mRefreshRateStats->setRefreshRate(refreshRate);
+
if (mRefreshRateOverlay) {
mRefreshRateOverlay->reset();
}
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 519a5ab..a3fa8d6 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -1183,6 +1183,8 @@
sp<StartPropertySetThread> mStartPropertySetThread;
surfaceflinger::Factory& mFactory;
+ std::future<void> mRenderEnginePrimeCacheFuture;
+
// access must be protected by mStateLock
mutable Mutex mStateLock;
State mCurrentState{LayerVector::StateSet::Current};
diff --git a/services/surfaceflinger/tests/unittests/RefreshRateConfigsTest.cpp b/services/surfaceflinger/tests/unittests/RefreshRateConfigsTest.cpp
index 7ace70a..d04a7d7 100644
--- a/services/surfaceflinger/tests/unittests/RefreshRateConfigsTest.cpp
+++ b/services/surfaceflinger/tests/unittests/RefreshRateConfigsTest.cpp
@@ -45,9 +45,16 @@
class RefreshRateConfigsTest : public testing::Test {
protected:
+ using GetBestRefreshRateInvocation = RefreshRateConfigs::GetBestRefreshRateInvocation;
+
RefreshRateConfigsTest();
~RefreshRateConfigsTest();
+ RefreshRate createRefreshRate(DisplayModePtr displayMode) {
+ return {displayMode->getId(), displayMode, displayMode->getFps(),
+ RefreshRate::ConstructorTag(0)};
+ }
+
Fps findClosestKnownFrameRate(const RefreshRateConfigs& refreshRateConfigs, Fps frameRate) {
return refreshRateConfigs.findClosestKnownFrameRate(frameRate);
}
@@ -71,6 +78,19 @@
return *refreshRateConfigs.mMaxSupportedRefreshRate;
}
+ void setLastBestRefreshRateInvocation(RefreshRateConfigs& refreshRateConfigs,
+ const GetBestRefreshRateInvocation& invocation) {
+ std::lock_guard lock(refreshRateConfigs.mLock);
+ refreshRateConfigs.lastBestRefreshRateInvocation.emplace(
+ GetBestRefreshRateInvocation(invocation));
+ }
+
+ std::optional<GetBestRefreshRateInvocation> getLastBestRefreshRateInvocation(
+ const RefreshRateConfigs& refreshRateConfigs) {
+ std::lock_guard lock(refreshRateConfigs.mLock);
+ return refreshRateConfigs.lastBestRefreshRateInvocation;
+ }
+
// Test config IDs
static inline const DisplayModeId HWC_CONFIG_ID_60 = DisplayModeId(0);
static inline const DisplayModeId HWC_CONFIG_ID_90 = DisplayModeId(1);
@@ -1752,6 +1772,78 @@
refreshRateConfigs->getBestRefreshRate(layers, {.touch = false, .idle = false}));
}
+TEST_F(RefreshRateConfigsTest, getBestRefreshRate_ReadsCached) {
+ using GlobalSignals = RefreshRateConfigs::GlobalSignals;
+
+ auto refreshRateConfigs =
+ std::make_unique<RefreshRateConfigs>(m30_60_72_90_120Device,
+ /*currentConfigId=*/HWC_CONFIG_ID_60);
+
+ setLastBestRefreshRateInvocation(*refreshRateConfigs,
+ GetBestRefreshRateInvocation{.layerRequirements = std::vector<
+ LayerRequirement>(),
+ .globalSignals = {.touch = true,
+ .idle = true},
+ .outSignalsConsidered =
+ {.touch = true,
+ .idle = false},
+ .resultingBestRefreshRate =
+ createRefreshRate(
+ mConfig90)});
+
+ EXPECT_EQ(createRefreshRate(mConfig90),
+ refreshRateConfigs->getBestRefreshRate(std::vector<LayerRequirement>(),
+ {.touch = true, .idle = true}));
+
+ const GlobalSignals cachedSignalsConsidered{.touch = true, .idle = false};
+ setLastBestRefreshRateInvocation(*refreshRateConfigs,
+ GetBestRefreshRateInvocation{.layerRequirements = std::vector<
+ LayerRequirement>(),
+ .globalSignals = {.touch = true,
+ .idle = true},
+ .outSignalsConsidered =
+ cachedSignalsConsidered,
+ .resultingBestRefreshRate =
+ createRefreshRate(
+ mConfig30)});
+
+ GlobalSignals signalsConsidered;
+ EXPECT_EQ(createRefreshRate(mConfig30),
+ refreshRateConfigs->getBestRefreshRate(std::vector<LayerRequirement>(),
+ {.touch = true, .idle = true},
+ &signalsConsidered));
+
+ EXPECT_EQ(cachedSignalsConsidered, signalsConsidered);
+}
+
+TEST_F(RefreshRateConfigsTest, getBestRefreshRate_WritesCache) {
+ using GlobalSignals = RefreshRateConfigs::GlobalSignals;
+
+ auto refreshRateConfigs =
+ std::make_unique<RefreshRateConfigs>(m30_60_72_90_120Device,
+ /*currentConfigId=*/HWC_CONFIG_ID_60);
+ ASSERT_FALSE(getLastBestRefreshRateInvocation(*refreshRateConfigs).has_value());
+
+ GlobalSignals globalSignals{.touch = true, .idle = true};
+ auto layers = std::vector<LayerRequirement>{LayerRequirement{.weight = 1.0f},
+ LayerRequirement{.weight = 0.5f}};
+ const auto lastResult =
+ refreshRateConfigs->getBestRefreshRate(layers, globalSignals,
+ /* outSignalsConsidered */ nullptr);
+
+ const auto lastInvocation = getLastBestRefreshRateInvocation(*refreshRateConfigs);
+
+ ASSERT_TRUE(lastInvocation.has_value());
+ ASSERT_EQ(layers, lastInvocation->layerRequirements);
+ ASSERT_EQ(globalSignals, lastInvocation->globalSignals);
+ ASSERT_EQ(lastResult, lastInvocation->resultingBestRefreshRate);
+
+ // outSignalsConsidered needs to be populated even tho earlier we gave nullptr
+ // to getBestRefreshRate()
+ GlobalSignals detaultSignals;
+ ASSERT_FALSE(detaultSignals == lastInvocation->outSignalsConsidered);
+}
+
TEST_F(RefreshRateConfigsTest, testComparisonOperator) {
EXPECT_TRUE(mExpected60Config < mExpected90Config);
EXPECT_FALSE(mExpected60Config < mExpected60Config);
diff --git a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
index 38e503f..423d0cc 100644
--- a/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SchedulerTest.cpp
@@ -51,9 +51,18 @@
SchedulerTest();
- const scheduler::RefreshRateConfigs
- mConfigs{{DisplayMode::Builder(0).setVsyncPeriod(16'666'667).setGroup(0).build()},
- DisplayModeId(0)};
+ const DisplayModePtr mode60 = DisplayMode::Builder(0)
+ .setId(DisplayModeId(0))
+ .setVsyncPeriod(Fps(60.f).getPeriodNsecs())
+ .setGroup(0)
+ .build();
+ const DisplayModePtr mode120 = DisplayMode::Builder(1)
+ .setId(DisplayModeId(1))
+ .setVsyncPeriod(Fps(120.f).getPeriodNsecs())
+ .setGroup(0)
+ .build();
+
+ scheduler::RefreshRateConfigs mConfigs{{mode60}, mode60->getId()};
mock::SchedulerCallback mSchedulerCallback;
@@ -149,15 +158,14 @@
EXPECT_EQ(kEventConnections, mScheduler->getEventThreadConnectionCount(mConnectionHandle));
}
-TEST_F(SchedulerTest, noLayerHistory) {
- // Layer history should not be created if there is a single config.
- ASSERT_FALSE(mScheduler->hasLayerHistory());
-
+TEST_F(SchedulerTest, chooseRefreshRateForContentIsNoopWhenModeSwitchingIsNotSupported) {
+ // The layer is registered at creation time and deregistered at destruction time.
sp<mock::MockLayer> layer = sp<mock::MockLayer>::make(mFlinger.flinger());
- // Content detection should be no-op.
- mScheduler->registerLayer(layer.get());
+ // recordLayerHistory should be a noop
+ ASSERT_EQ(static_cast<size_t>(0), mScheduler->getNumActiveLayers());
mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
+ ASSERT_EQ(static_cast<size_t>(0), mScheduler->getNumActiveLayers());
constexpr bool kPowerStateNormal = true;
mScheduler->setDisplayPowerState(kPowerStateNormal);
@@ -169,6 +177,18 @@
mScheduler->chooseRefreshRateForContent();
}
+TEST_F(SchedulerTest, updateDisplayModes) {
+ ASSERT_EQ(static_cast<size_t>(0), mScheduler->layerHistorySize());
+ sp<mock::MockLayer> layer = sp<mock::MockLayer>::make(mFlinger.flinger());
+ ASSERT_EQ(static_cast<size_t>(1), mScheduler->layerHistorySize());
+
+ mConfigs.updateDisplayModes({mode60, mode120}, /* activeMode */ mode60->getId());
+
+ ASSERT_EQ(static_cast<size_t>(0), mScheduler->getNumActiveLayers());
+ mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
+ ASSERT_EQ(static_cast<size_t>(1), mScheduler->getNumActiveLayers());
+}
+
TEST_F(SchedulerTest, testDispatchCachedReportedMode) {
// If the optional fields are cleared, the function should return before
// onModeChange is called.
@@ -200,4 +220,25 @@
EXPECT_EQ(0, mFlinger.calculateExtraBufferCount(Fps(60), 10ms));
}
+MATCHER(Is120Hz, "") {
+ return arg.getFps().equalsWithMargin(Fps(120.f));
+}
+
+TEST_F(SchedulerTest, chooseRefreshRateForContentSelectsMaxRefreshRate) {
+ mConfigs.updateDisplayModes({mode60, mode120}, /* activeMode */ mode60->getId());
+
+ sp<mock::MockLayer> layer = sp<mock::MockLayer>::make(mFlinger.flinger());
+
+ mScheduler->recordLayerHistory(layer.get(), 0, LayerHistory::LayerUpdateType::Buffer);
+
+ constexpr bool kPowerStateNormal = true;
+ mScheduler->setDisplayPowerState(kPowerStateNormal);
+
+ constexpr uint32_t kDisplayArea = 999'999;
+ mScheduler->onPrimaryDisplayAreaChanged(kDisplayArea);
+
+ EXPECT_CALL(mSchedulerCallback, changeRefreshRate(Is120Hz(), _)).Times(1);
+ mScheduler->chooseRefreshRateForContent();
+}
+
} // namespace android
diff --git a/services/surfaceflinger/tests/unittests/TestableScheduler.h b/services/surfaceflinger/tests/unittests/TestableScheduler.h
index 3f9dd01..41fd6e3 100644
--- a/services/surfaceflinger/tests/unittests/TestableScheduler.h
+++ b/services/surfaceflinger/tests/unittests/TestableScheduler.h
@@ -64,6 +64,11 @@
return mutableLayerHistory()->mLayerInfos.size();
}
+ size_t getNumActiveLayers() NO_THREAD_SAFETY_ANALYSIS {
+ if (!mLayerHistory) return 0;
+ return mutableLayerHistory()->mActiveLayersEnd;
+ }
+
void replaceTouchTimer(int64_t millis) {
if (mTouchTimer) {
mTouchTimer.reset();
diff --git a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
index 2a658dd..ae936e4 100644
--- a/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
+++ b/services/surfaceflinger/tests/unittests/VSyncPredictorTest.cpp
@@ -478,7 +478,7 @@
}
}
-TEST_F(VSyncPredictorTest, InconsistentVsyncValueIsFlushedEventually) {
+TEST_F(VSyncPredictorTest, inconsistentVsyncValueIsFlushedEventually) {
EXPECT_TRUE(tracker.addVsyncTimestamp(600));
EXPECT_TRUE(tracker.needsMoreSamples());
@@ -492,6 +492,24 @@
EXPECT_FALSE(tracker.needsMoreSamples());
}
+TEST_F(VSyncPredictorTest, knownVsyncIsUpdated) {
+ EXPECT_TRUE(tracker.addVsyncTimestamp(600));
+ EXPECT_TRUE(tracker.needsMoreSamples());
+ EXPECT_EQ(600, tracker.nextAnticipatedVSyncTimeFrom(mNow));
+
+ EXPECT_FALSE(tracker.addVsyncTimestamp(mNow += mPeriod));
+ EXPECT_EQ(mNow + 1000, tracker.nextAnticipatedVSyncTimeFrom(mNow));
+
+ for (auto i = 0u; i < kMinimumSamplesForPrediction; i++) {
+ EXPECT_TRUE(tracker.needsMoreSamples());
+ EXPECT_TRUE(tracker.addVsyncTimestamp(mNow += mPeriod));
+ EXPECT_EQ(mNow + 1000, tracker.nextAnticipatedVSyncTimeFrom(mNow));
+ }
+
+ EXPECT_FALSE(tracker.needsMoreSamples());
+ EXPECT_EQ(mNow + 1000, tracker.nextAnticipatedVSyncTimeFrom(mNow));
+}
+
} // namespace android::scheduler
// TODO(b/129481165): remove the #pragma below and fix conversion issues
diff --git a/services/vibratorservice/VibratorHalWrapper.cpp b/services/vibratorservice/VibratorHalWrapper.cpp
index 1010aa5..f15a963 100644
--- a/services/vibratorservice/VibratorHalWrapper.cpp
+++ b/services/vibratorservice/VibratorHalWrapper.cpp
@@ -95,7 +95,10 @@
}
HalResult<void> HalResult<void>::fromStatus(binder::Status status) {
- if (status.exceptionCode() == binder::Status::EX_UNSUPPORTED_OPERATION) {
+ if (status.exceptionCode() == binder::Status::EX_UNSUPPORTED_OPERATION ||
+ status.transactionError() == android::UNKNOWN_TRANSACTION) {
+ // UNKNOWN_TRANSACTION means the HAL implementation is an older version, so this is
+ // the same as the operation being unsupported by this HAL. Should not retry.
return HalResult<void>::unsupported();
}
if (status.isOk()) {
diff --git a/services/vibratorservice/include/vibratorservice/VibratorHalWrapper.h b/services/vibratorservice/include/vibratorservice/VibratorHalWrapper.h
index 8720d9d..87bc34e 100644
--- a/services/vibratorservice/include/vibratorservice/VibratorHalWrapper.h
+++ b/services/vibratorservice/include/vibratorservice/VibratorHalWrapper.h
@@ -42,7 +42,10 @@
static HalResult<T> unsupported() { return HalResult("", /* unsupported= */ true); }
static HalResult<T> fromStatus(binder::Status status, T data) {
- if (status.exceptionCode() == binder::Status::EX_UNSUPPORTED_OPERATION) {
+ if (status.exceptionCode() == binder::Status::EX_UNSUPPORTED_OPERATION ||
+ status.transactionError() == android::UNKNOWN_TRANSACTION) {
+ // UNKNOWN_TRANSACTION means the HAL implementation is an older version, so this is
+ // the same as the operation being unsupported by this HAL. Should not retry.
return HalResult<T>::unsupported();
}
if (status.isOk()) {
diff --git a/services/vibratorservice/test/VibratorHalWrapperAidlTest.cpp b/services/vibratorservice/test/VibratorHalWrapperAidlTest.cpp
index af0cdb8..7813303 100644
--- a/services/vibratorservice/test/VibratorHalWrapperAidlTest.cpp
+++ b/services/vibratorservice/test/VibratorHalWrapperAidlTest.cpp
@@ -189,8 +189,7 @@
.WillRepeatedly(vibrator::TriggerSchedulerCallback());
EXPECT_CALL(*mMockHal.get(), on(Eq(11), _))
.Times(Exactly(1))
- .WillRepeatedly(Return(
- Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
+ .WillRepeatedly(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)));
EXPECT_CALL(*mMockHal.get(), on(Eq(12), _))
.Times(Exactly(1))
.WillRepeatedly(Return(Status::fromExceptionCode(Status::Exception::EX_SECURITY)));
@@ -228,8 +227,7 @@
EXPECT_CALL(*mMockHal.get(), setAmplitude(Eq(0.1f))).Times(Exactly(1));
EXPECT_CALL(*mMockHal.get(), setAmplitude(Eq(0.2f)))
.Times(Exactly(1))
- .WillRepeatedly(Return(
- Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
+ .WillRepeatedly(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)));
EXPECT_CALL(*mMockHal.get(), setAmplitude(Eq(0.5f)))
.Times(Exactly(1))
.WillRepeatedly(Return(Status::fromExceptionCode(Status::Exception::EX_SECURITY)));
@@ -265,8 +263,7 @@
EXPECT_CALL(*mMockHal.get(),
alwaysOnEnable(Eq(2), Eq(Effect::TICK), Eq(EffectStrength::MEDIUM)))
.Times(Exactly(1))
- .WillRepeatedly(Return(
- Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
+ .WillRepeatedly(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)));
EXPECT_CALL(*mMockHal.get(),
alwaysOnEnable(Eq(3), Eq(Effect::POP), Eq(EffectStrength::STRONG)))
.Times(Exactly(1))
@@ -397,8 +394,7 @@
Return(Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
EXPECT_CALL(*mMockHal.get(), getSupportedPrimitives(_))
.Times(Exactly(1))
- .WillRepeatedly(
- Return(Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
+ .WillRepeatedly(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)));
EXPECT_CALL(*mMockHal.get(), getFrequencyMinimum(_))
.Times(Exactly(1))
.WillRepeatedly(DoAll(SetArgPointee<0>(F_MIN), Return(Status())));
@@ -411,8 +407,7 @@
Return(Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
EXPECT_CALL(*mMockHal.get(), getBandwidthAmplitudeMap(_))
.Times(Exactly(1))
- .WillRepeatedly(
- Return(Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
+ .WillRepeatedly(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)));
EXPECT_CALL(*mMockHal.get(), getSupportedBraking(_))
.Times(Exactly(1))
.WillRepeatedly(
@@ -451,8 +446,7 @@
DoAll(SetArgPointee<3>(1000), TriggerCallbackInArg2(), Return(Status())));
EXPECT_CALL(*mMockHal.get(), perform(Eq(Effect::POP), Eq(EffectStrength::MEDIUM), _, _))
.Times(Exactly(1))
- .WillRepeatedly(Return(
- Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
+ .WillRepeatedly(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)));
EXPECT_CALL(*mMockHal.get(), perform(Eq(Effect::THUD), Eq(EffectStrength::STRONG), _, _))
.Times(Exactly(1))
.WillRepeatedly(Return(Status::fromExceptionCode(Status::Exception::EX_SECURITY)));
@@ -549,8 +543,7 @@
.WillRepeatedly(DoAll(TriggerCallbackInArg1(), Return(Status())));
EXPECT_CALL(*mMockHal.get(), compose(Eq(singleEffect), _))
.Times(Exactly(1))
- .WillRepeatedly(Return(
- Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)));
+ .WillRepeatedly(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)));
EXPECT_CALL(*mMockHal.get(), compose(Eq(multipleEffects), _))
.Times(Exactly(1))
.WillRepeatedly(Return(Status::fromExceptionCode(Status::Exception::EX_SECURITY)));
diff --git a/services/vibratorservice/test/VibratorManagerHalWrapperAidlTest.cpp b/services/vibratorservice/test/VibratorManagerHalWrapperAidlTest.cpp
index 548d028..1593cb1 100644
--- a/services/vibratorservice/test/VibratorManagerHalWrapperAidlTest.cpp
+++ b/services/vibratorservice/test/VibratorManagerHalWrapperAidlTest.cpp
@@ -308,8 +308,7 @@
Return(Status())));
EXPECT_CALL(*mMockHal.get(), triggerSynced(_))
.Times(Exactly(3))
- .WillOnce(Return(
- Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)))
+ .WillOnce(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)))
.WillOnce(Return(Status::fromExceptionCode(Status::Exception::EX_SECURITY)))
.WillRepeatedly(DoAll(TriggerCallback(), Return(Status())));
}
@@ -345,8 +344,7 @@
TEST_F(VibratorManagerHalWrapperAidlTest, TestCancelSynced) {
EXPECT_CALL(*mMockHal.get(), cancelSynced())
.Times(Exactly(3))
- .WillOnce(
- Return(Status::fromExceptionCode(Status::Exception::EX_UNSUPPORTED_OPERATION)))
+ .WillOnce(Return(Status::fromStatusT(UNKNOWN_TRANSACTION)))
.WillOnce(Return(Status::fromExceptionCode(Status::Exception::EX_SECURITY)))
.WillRepeatedly(Return(Status()));