Merge "Add extra logging for A/B dexopt"
diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp
index 201b5a3..364ead0 100644
--- a/cmds/dumpstate/dumpstate.cpp
+++ b/cmds/dumpstate/dumpstate.cpp
@@ -81,6 +81,7 @@
#define PROFILE_DATA_DIR_CUR "/data/misc/profiles/cur"
#define PROFILE_DATA_DIR_REF "/data/misc/profiles/ref"
#define WLUTIL "/vendor/xbin/wlutil"
+#define WMTRACE_DATA_DIR "/data/misc/wmtrace"
// TODO(narayan): Since this information has to be kept in sync
// with tombstoned, we should just put it in a common header.
@@ -1210,6 +1211,11 @@
DumpFile("BINDER STATS", "/sys/kernel/debug/binder/stats");
DumpFile("BINDER STATE", "/sys/kernel/debug/binder/state");
+ /* Add window and surface trace files. */
+ if (!PropertiesHelper::IsUserBuild()) {
+ ds.AddDir(WMTRACE_DATA_DIR, false);
+ }
+
ds.DumpstateBoard();
/* Migrate the ril_dumpstate to a device specific dumpstate? */
diff --git a/libs/binder/Status.cpp b/libs/binder/Status.cpp
index 006f7f9..a9d5055 100644
--- a/libs/binder/Status.cpp
+++ b/libs/binder/Status.cpp
@@ -102,6 +102,15 @@
}
mMessage = String8(message);
+ // Skip over the remote stack trace data
+ int32_t remote_stack_trace_header_size;
+ status = parcel.readInt32(&remote_stack_trace_header_size);
+ if (status != OK) {
+ setFromStatusT(status);
+ return status;
+ }
+ parcel.setDataPosition(parcel.dataPosition() + remote_stack_trace_header_size);
+
if (mException == EX_SERVICE_SPECIFIC) {
status = parcel.readInt32(&mErrorCode);
} else if (mException == EX_PARCELABLE) {
@@ -137,6 +146,7 @@
return status;
}
status = parcel->writeString16(String16(mMessage));
+ status = parcel->writeInt32(0); // Empty remote stack trace header
if (mException == EX_SERVICE_SPECIFIC) {
status = parcel->writeInt32(mErrorCode);
} else if (mException == EX_PARCELABLE) {
diff --git a/libs/graphicsenv/GraphicsEnv.cpp b/libs/graphicsenv/GraphicsEnv.cpp
index 39b5829..f46e9f6 100644
--- a/libs/graphicsenv/GraphicsEnv.cpp
+++ b/libs/graphicsenv/GraphicsEnv.cpp
@@ -22,6 +22,7 @@
#include <log/log.h>
#include <nativeloader/dlext_namespaces.h>
+#include <nativeloader/native_loader.h>
// TODO(b/37049319) Get this from a header once one exists
extern "C" {
@@ -45,6 +46,32 @@
mDriverPath = path;
}
+void GraphicsEnv::setLayerPaths(android_namespace_t* appNamespace, const std::string layerPaths) {
+ if (mLayerPaths.empty()) {
+ mLayerPaths = layerPaths;
+ mAppNamespace = appNamespace;
+ } else {
+ ALOGV("Vulkan layer search path already set, not clobbering with '%s' for namespace %p'",
+ layerPaths.c_str(), appNamespace);
+ }
+}
+
+android_namespace_t* GraphicsEnv::getAppNamespace() {
+ return mAppNamespace;
+}
+
+const std::string GraphicsEnv::getLayerPaths(){
+ return mLayerPaths;
+}
+
+const std::string GraphicsEnv::getDebugLayers() {
+ return mDebugLayers;
+}
+
+void GraphicsEnv::setDebugLayers(const std::string layers) {
+ mDebugLayers = layers;
+}
+
android_namespace_t* GraphicsEnv::getDriverNamespace() {
static std::once_flag once;
std::call_once(once, [this]() {
diff --git a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
index 7817076..213580c 100644
--- a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
+++ b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
@@ -35,10 +35,20 @@
void setDriverPath(const std::string path);
android_namespace_t* getDriverNamespace();
+ void setLayerPaths(android_namespace_t* appNamespace, const std::string layerPaths);
+ android_namespace_t* getAppNamespace();
+ const std::string getLayerPaths();
+
+ void setDebugLayers(const std::string layers);
+ const std::string getDebugLayers();
+
private:
GraphicsEnv() = default;
std::string mDriverPath;
+ std::string mDebugLayers;
+ std::string mLayerPaths;
android_namespace_t* mDriverNamespace = nullptr;
+ android_namespace_t* mAppNamespace = nullptr;
};
} // namespace android
diff --git a/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp b/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
index 221bc4f..ace01a6 100644
--- a/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
+++ b/libs/vr/libbufferhubqueue/buffer_hub_queue_producer.cpp
@@ -9,16 +9,6 @@
namespace dvr {
/* static */
-sp<BufferHubQueueProducer> BufferHubQueueProducer::Create() {
- sp<BufferHubQueueProducer> producer = new BufferHubQueueProducer;
- auto config = ProducerQueueConfigBuilder()
- .SetMetadata<DvrNativeBufferMetadata>()
- .Build();
- producer->queue_ = ProducerQueue::Create(config, UsagePolicy{});
- return producer;
-}
-
-/* static */
sp<BufferHubQueueProducer> BufferHubQueueProducer::Create(
const std::shared_ptr<ProducerQueue>& queue) {
if (queue->metadata_size() != sizeof(DvrNativeBufferMetadata)) {
@@ -33,6 +23,19 @@
return producer;
}
+/* static */
+sp<BufferHubQueueProducer> BufferHubQueueProducer::Create(
+ ProducerQueueParcelable parcelable) {
+ if (!parcelable.IsValid()) {
+ ALOGE("BufferHubQueueProducer::Create: Invalid producer parcelable.");
+ return nullptr;
+ }
+
+ sp<BufferHubQueueProducer> producer = new BufferHubQueueProducer;
+ producer->queue_ = ProducerQueue::Import(parcelable.TakeChannelHandle());
+ return producer;
+}
+
status_t BufferHubQueueProducer::requestBuffer(int slot,
sp<GraphicBuffer>* buf) {
ALOGD_IF(TRACE, "requestBuffer: slot=%d", slot);
@@ -475,6 +478,13 @@
return BAD_VALUE;
}
+ if (!queue_->is_connected()) {
+ ALOGE(
+ "BufferHubQueueProducer::connect: This BufferHubQueueProducer is not "
+ "connected to bufferhud. Has it been taken out as a parcelable?");
+ return BAD_VALUE;
+ }
+
switch (api) {
case NATIVE_WINDOW_API_EGL:
case NATIVE_WINDOW_API_CPU:
@@ -617,6 +627,39 @@
return NO_ERROR;
}
+status_t BufferHubQueueProducer::TakeAsParcelable(
+ ProducerQueueParcelable* out_parcelable) {
+ if (!out_parcelable || out_parcelable->IsValid())
+ return BAD_VALUE;
+
+ if (connected_api_ != kNoConnectedApi) {
+ ALOGE(
+ "BufferHubQueueProducer::TakeAsParcelable: BufferHubQueueProducer has "
+ "connected client. Must disconnect first.");
+ return BAD_VALUE;
+ }
+
+ if (!queue_->is_connected()) {
+ ALOGE(
+ "BufferHubQueueProducer::TakeAsParcelable: This BufferHubQueueProducer "
+ "is not connected to bufferhud. Has it been taken out as a "
+ "parcelable?");
+ return BAD_VALUE;
+ }
+
+ auto status = queue_->TakeAsParcelable();
+ if (!status) {
+ ALOGE(
+ "BufferHubQueueProducer::TakeAsParcelable: Failed to take out "
+ "ProducuerQueueParcelable from the producer queue, error: %s.",
+ status.GetErrorMessage().c_str());
+ return BAD_VALUE;
+ }
+
+ *out_parcelable = status.take();
+ return NO_ERROR;
+}
+
status_t BufferHubQueueProducer::AllocateBuffer(uint32_t width, uint32_t height,
uint32_t layer_count,
PixelFormat format,
diff --git a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h
index c8e31c7..5b320a4 100644
--- a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h
+++ b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_client.h
@@ -74,7 +74,8 @@
return available_buffers_.size() >= kMaxQueueCapacity;
}
- explicit operator bool() const { return epoll_fd_.IsValid(); }
+ // Returns whether the buffer queue is connected to bufferhubd.
+ bool is_connected() const { return !!GetChannel(); }
int GetBufferId(size_t slot) const {
return (slot < buffers_.size() && buffers_[slot]) ? buffers_[slot]->id()
diff --git a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h
index 7ed55fb..9c85048 100644
--- a/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h
+++ b/libs/vr/libbufferhubqueue/include/private/dvr/buffer_hub_queue_producer.h
@@ -3,6 +3,7 @@
#include <gui/IGraphicBufferProducer.h>
#include <private/dvr/buffer_hub_queue_client.h>
+#include <private/dvr/buffer_hub_queue_parcelable.h>
namespace android {
namespace dvr {
@@ -17,14 +18,16 @@
// that.
static constexpr int kDefaultUndequeuedBuffers = 1;
- // Create a BufferHubQueueProducer instance by creating a new producer queue.
- static sp<BufferHubQueueProducer> Create();
-
- // Create a BufferHubQueueProducer instance by importing an existing prodcuer
+ // Creates a BufferHubQueueProducer instance by importing an existing prodcuer
// queue.
static sp<BufferHubQueueProducer> Create(
const std::shared_ptr<ProducerQueue>& producer);
+ // Creates a BufferHubQueueProducer instance by importing an existing prodcuer
+ // parcelable. Note that this call takes the ownership of the parcelable
+ // object and is guaranteed to succeed if parcelable object is valid.
+ static sp<BufferHubQueueProducer> Create(ProducerQueueParcelable parcelable);
+
// See |IGraphicBufferProducer::requestBuffer|
status_t requestBuffer(int slot, sp<GraphicBuffer>* buf) override;
@@ -115,6 +118,14 @@
// See |IGraphicBufferProducer::getConsumerUsage|
status_t getConsumerUsage(uint64_t* out_usage) const override;
+ // Takes out the current producer as a binder parcelable object. Note that the
+ // producer must be disconnected to be exportable. After successful export,
+ // the producer queue can no longer be connected again. Returns NO_ERROR when
+ // takeout is successful and out_parcelable will hold the new parcelable
+ // object. Also note that out_parcelable cannot be NULL and must points to an
+ // invalid parcelable.
+ status_t TakeAsParcelable(ProducerQueueParcelable* out_parcelable);
+
private:
using LocalHandle = pdx::LocalHandle;
diff --git a/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp b/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp
index f67ef37..3efa723 100644
--- a/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp
+++ b/libs/vr/libbufferhubqueue/tests/buffer_hub_queue-test.cpp
@@ -25,6 +25,8 @@
constexpr uint32_t kBufferLayerCount = 1;
constexpr uint32_t kBufferFormat = HAL_PIXEL_FORMAT_BLOB;
constexpr uint64_t kBufferUsage = GRALLOC_USAGE_SW_READ_RARELY;
+constexpr int kTimeoutMs = 100;
+constexpr int kNoTimeout = 0;
class BufferHubQueueTest : public ::testing::Test {
public:
@@ -84,41 +86,49 @@
};
TEST_F(BufferHubQueueTest, TestDequeue) {
- const size_t nb_dequeue_times = 16;
+ const int64_t nb_dequeue_times = 16;
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<size_t>().Build(),
- UsagePolicy{}));
+ ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
// Allocate only one buffer.
AllocateBuffer();
// But dequeue multiple times.
- for (size_t i = 0; i < nb_dequeue_times; i++) {
+ for (int64_t i = 0; i < nb_dequeue_times; i++) {
size_t slot;
LocalHandle fence;
- auto p1_status = producer_queue_->Dequeue(100, &slot, &fence);
- ASSERT_TRUE(p1_status.ok());
+ DvrNativeBufferMetadata mi, mo;
+
+ // Producer gains a buffer.
+ auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(p1_status.ok());
auto p1 = p1_status.take();
- ASSERT_NE(nullptr, p1);
- size_t mi = i;
- ASSERT_EQ(p1->Post(LocalHandle(), &mi, sizeof(mi)), 0);
- size_t mo;
- auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence);
- ASSERT_TRUE(c1_status.ok());
+ ASSERT_NE(p1, nullptr);
+
+ // Producer posts the buffer.
+ mi.index = i;
+ EXPECT_EQ(p1->PostAsync(&mi, LocalHandle()), 0);
+
+ // Consumer acquires a buffer.
+ auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(c1_status.ok());
auto c1 = c1_status.take();
- ASSERT_NE(nullptr, c1);
- ASSERT_EQ(mi, mo);
- c1->Release(LocalHandle());
+ ASSERT_NE(c1, nullptr);
+ EXPECT_EQ(mi.index, i);
+ EXPECT_EQ(mo.index, i);
+
+ // Consumer releases the buffer.
+ EXPECT_EQ(c1->ReleaseAsync(&mi, LocalHandle()), 0);
}
}
TEST_F(BufferHubQueueTest, TestProducerConsumer) {
const size_t kBufferCount = 16;
size_t slot;
- uint64_t seq;
+ DvrNativeBufferMetadata mi, mo;
+ LocalHandle fence;
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<uint64_t>().Build(),
- UsagePolicy{}));
+ ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
for (size_t i = 0; i < kBufferCount; i++) {
AllocateBuffer();
@@ -133,8 +143,7 @@
ASSERT_EQ(consumer_queue_->capacity(), i);
// Dequeue returns timeout since no buffer is ready to consumer, but
// this implicitly triggers buffer import and bump up |capacity|.
- LocalHandle fence;
- auto status = consumer_queue_->Dequeue(100, &slot, &seq, &fence);
+ auto status = consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
ASSERT_FALSE(status.ok());
ASSERT_EQ(ETIMEDOUT, status.error());
ASSERT_EQ(consumer_queue_->capacity(), i + 1);
@@ -144,37 +153,37 @@
LocalHandle post_fence(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
for (size_t i = 0; i < kBufferCount; i++) {
- LocalHandle fence;
-
// First time there is no buffer available to dequeue.
- auto consumer_status = consumer_queue_->Dequeue(100, &slot, &seq, &fence);
+ auto consumer_status =
+ consumer_queue_->Dequeue(kNoTimeout, &slot, &mo, &fence);
ASSERT_FALSE(consumer_status.ok());
- ASSERT_EQ(ETIMEDOUT, consumer_status.error());
+ ASSERT_EQ(consumer_status.error(), ETIMEDOUT);
// Make sure Producer buffer is POSTED so that it's ready to Accquire
// in the consumer's Dequeue() function.
- auto producer_status = producer_queue_->Dequeue(100, &slot, &fence);
+ auto producer_status =
+ producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(producer_status.ok());
auto producer = producer_status.take();
ASSERT_NE(nullptr, producer);
- uint64_t seq_in = static_cast<uint64_t>(i);
- ASSERT_EQ(producer->Post(post_fence, &seq_in, sizeof(seq_in)), 0);
+ mi.index = static_cast<int64_t>(i);
+ ASSERT_EQ(producer->PostAsync(&mi, post_fence), 0);
// Second time the just the POSTED buffer should be dequeued.
- uint64_t seq_out = 0;
- consumer_status = consumer_queue_->Dequeue(100, &slot, &seq_out, &fence);
+ consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(consumer_status.ok());
EXPECT_TRUE(fence.IsValid());
auto consumer = consumer_status.take();
ASSERT_NE(nullptr, consumer);
- ASSERT_EQ(seq_in, seq_out);
+ ASSERT_EQ(mi.index, mo.index);
}
}
TEST_F(BufferHubQueueTest, TestRemoveBuffer) {
ASSERT_TRUE(CreateProducerQueue(config_builder_.Build(), UsagePolicy{}));
+ DvrNativeBufferMetadata mo;
// Allocate buffers.
const size_t kBufferCount = 4u;
@@ -203,7 +212,7 @@
for (size_t i = 0; i < kBufferCount; i++) {
Entry* entry = &buffers[i];
auto producer_status = producer_queue_->Dequeue(
- /*timeout_ms=*/100, &entry->slot, &entry->fence);
+ kTimeoutMs, &entry->slot, &mo, &entry->fence);
ASSERT_TRUE(producer_status.ok());
entry->buffer = producer_status.take();
ASSERT_NE(nullptr, entry->buffer);
@@ -223,7 +232,7 @@
buffers[0].buffer = nullptr;
// Now the consumer queue should know it's gone.
- EXPECT_FALSE(WaitAndHandleOnce(consumer_queue_.get(), /*timeout_ms=*/100));
+ EXPECT_FALSE(WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs));
ASSERT_EQ(kBufferCount - 1, consumer_queue_->capacity());
// Allocate a new buffer. This should take the first empty slot.
@@ -292,126 +301,154 @@
ASSERT_NE(nullptr, silent_queue);
// Check that silent queue doesn't import buffers on creation.
- EXPECT_EQ(0, silent_queue->capacity());
+ EXPECT_EQ(silent_queue->capacity(), 0U);
// Dequeue and post a buffer.
size_t slot;
LocalHandle fence;
+ DvrNativeBufferMetadata mi, mo;
auto producer_status =
- producer_queue_->Dequeue(/*timeout_ms=*/100, &slot, &fence);
- ASSERT_TRUE(producer_status.ok());
+ producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(producer_status.ok());
auto producer_buffer = producer_status.take();
- ASSERT_NE(nullptr, producer_buffer);
- ASSERT_EQ(0, producer_buffer->Post<void>({}));
+ ASSERT_NE(producer_buffer, nullptr);
+ EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
// After post, check the number of remaining available buffers.
- EXPECT_EQ(kBufferCount - 1, producer_queue_->count());
+ EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
// Currently we expect no buffer to be available prior to calling
// WaitForBuffers/HandleQueueEvents.
// TODO(eieio): Note this behavior may change in the future.
- EXPECT_EQ(0u, silent_queue->count());
+ EXPECT_EQ(silent_queue->count(), 0U);
EXPECT_FALSE(silent_queue->HandleQueueEvents());
- EXPECT_EQ(0u, silent_queue->count());
+ EXPECT_EQ(silent_queue->count(), 0U);
// Build a new consumer queue to test multi-consumer queue features.
consumer_queue_ = silent_queue->CreateConsumerQueue();
- ASSERT_NE(nullptr, consumer_queue_);
+ ASSERT_NE(consumer_queue_, nullptr);
// Check that buffers are correctly imported on construction.
- EXPECT_EQ(kBufferCount, consumer_queue_->capacity());
- EXPECT_EQ(1u, consumer_queue_->count());
+ EXPECT_EQ(consumer_queue_->capacity(), kBufferCount);
+ EXPECT_EQ(consumer_queue_->count(), 1U);
// Reclaim released/ignored buffers.
- ASSERT_EQ(kBufferCount - 1, producer_queue_->count());
+ EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
usleep(10000);
- WaitAndHandleOnce(producer_queue_.get(), /*timeout_ms=*/100);
- ASSERT_EQ(kBufferCount - 1, producer_queue_->count());
+ WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
+ EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
// Post another buffer.
- producer_status = producer_queue_->Dequeue(/*timeout_ms=*/100, &slot, &fence);
- ASSERT_TRUE(producer_status.ok());
+ producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(producer_status.ok());
producer_buffer = producer_status.take();
- ASSERT_NE(nullptr, producer_buffer);
- ASSERT_EQ(0, producer_buffer->Post<void>({}));
+ ASSERT_NE(producer_buffer, nullptr);
+ EXPECT_EQ(producer_buffer->PostAsync(&mi, {}), 0);
// Verify that the consumer queue receives it.
size_t consumer_queue_count = consumer_queue_->count();
- WaitAndHandleOnce(consumer_queue_.get(), /*timeout_ms=*/100);
- EXPECT_LT(consumer_queue_count, consumer_queue_->count());
+ WaitAndHandleOnce(consumer_queue_.get(), kTimeoutMs);
+ EXPECT_GT(consumer_queue_->count(), consumer_queue_count);
// Save the current consumer queue buffer count to compare after the dequeue.
consumer_queue_count = consumer_queue_->count();
// Dequeue and acquire/release (discard) buffers on the consumer end.
auto consumer_status =
- consumer_queue_->Dequeue(/*timeout_ms=*/100, &slot, &fence);
- ASSERT_TRUE(consumer_status.ok());
+ consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(consumer_status.ok());
auto consumer_buffer = consumer_status.take();
- ASSERT_NE(nullptr, consumer_buffer);
+ ASSERT_NE(consumer_buffer, nullptr);
consumer_buffer->Discard();
// Buffer should be returned to the producer queue without being handled by
// the silent consumer queue.
- EXPECT_GT(consumer_queue_count, consumer_queue_->count());
- EXPECT_EQ(kBufferCount - 2, producer_queue_->count());
- EXPECT_TRUE(producer_queue_->HandleQueueEvents());
- EXPECT_EQ(kBufferCount - 1, producer_queue_->count());
+ EXPECT_LT(consumer_queue_->count(), consumer_queue_count);
+ EXPECT_EQ(producer_queue_->count(), kBufferCount - 2);
+
+ WaitAndHandleOnce(producer_queue_.get(), kTimeoutMs);
+ EXPECT_EQ(producer_queue_->count(), kBufferCount - 1);
}
-struct TestMetadata {
+struct TestUserMetadata {
char a;
int32_t b;
int64_t c;
};
-TEST_F(BufferHubQueueTest, TestMetadata) {
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<TestMetadata>().Build(),
- UsagePolicy{}));
+constexpr uint64_t kUserMetadataSize =
+ static_cast<uint64_t>(sizeof(TestUserMetadata));
+
+TEST_F(BufferHubQueueTest, TestUserMetadata) {
+ ASSERT_TRUE(CreateQueues(
+ config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
AllocateBuffer();
- std::vector<TestMetadata> ms = {
+ std::vector<TestUserMetadata> user_metadata_list = {
{'0', 0, 0}, {'1', 10, 3333}, {'@', 123, 1000000000}};
- for (auto mi : ms) {
+ for (auto user_metadata : user_metadata_list) {
size_t slot;
LocalHandle fence;
- auto p1_status = producer_queue_->Dequeue(100, &slot, &fence);
- ASSERT_TRUE(p1_status.ok());
+ DvrNativeBufferMetadata mi, mo;
+
+ auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(p1_status.ok());
auto p1 = p1_status.take();
- ASSERT_NE(nullptr, p1);
- ASSERT_EQ(p1->Post(LocalHandle(-1), &mi, sizeof(mi)), 0);
- TestMetadata mo;
- auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence);
- ASSERT_TRUE(c1_status.ok());
+ ASSERT_NE(p1, nullptr);
+
+ // TODO(b/69469185): Test against metadata from consumer once we implement
+ // release metadata properly.
+ // EXPECT_EQ(mo.user_metadata_ptr, 0U);
+ // EXPECT_EQ(mo.user_metadata_size, 0U);
+
+ mi.user_metadata_size = kUserMetadataSize;
+ mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
+ EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
+ auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(c1_status.ok());
auto c1 = c1_status.take();
- ASSERT_EQ(mi.a, mo.a);
- ASSERT_EQ(mi.b, mo.b);
- ASSERT_EQ(mi.c, mo.c);
- c1->Release(LocalHandle(-1));
+ ASSERT_NE(c1, nullptr);
+
+ EXPECT_EQ(mo.user_metadata_size, kUserMetadataSize);
+ auto out_user_metadata =
+ reinterpret_cast<TestUserMetadata*>(mo.user_metadata_ptr);
+ EXPECT_EQ(user_metadata.a, out_user_metadata->a);
+ EXPECT_EQ(user_metadata.b, out_user_metadata->b);
+ EXPECT_EQ(user_metadata.c, out_user_metadata->c);
+
+ // When release, empty metadata is also legit.
+ mi.user_metadata_size = 0U;
+ mi.user_metadata_ptr = 0U;
+ c1->ReleaseAsync(&mi, {});
}
}
-TEST_F(BufferHubQueueTest, TestMetadataMismatch) {
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
- UsagePolicy{}));
+TEST_F(BufferHubQueueTest, TestUserMetadataMismatch) {
+ ASSERT_TRUE(CreateQueues(
+ config_builder_.SetMetadata<TestUserMetadata>().Build(), UsagePolicy{}));
AllocateBuffer();
- int64_t mi = 3;
+ TestUserMetadata user_metadata;
size_t slot;
LocalHandle fence;
- auto p1_status = producer_queue_->Dequeue(100, &slot, &fence);
- ASSERT_TRUE(p1_status.ok());
+ DvrNativeBufferMetadata mi, mo;
+ auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
+ EXPECT_TRUE(p1_status.ok());
auto p1 = p1_status.take();
- ASSERT_NE(nullptr, p1);
- ASSERT_EQ(p1->Post(LocalHandle(-1), &mi, sizeof(mi)), 0);
+ ASSERT_NE(p1, nullptr);
- int32_t mo;
- // Acquire a buffer with mismatched metadata is not OK.
- auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence);
- ASSERT_FALSE(c1_status.ok());
+ // Post with mismatched user metadata size will fail. But the producer buffer
+ // itself should stay untouched.
+ mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
+ mi.user_metadata_size = kUserMetadataSize + 1;
+ EXPECT_EQ(p1->PostAsync(&mi, {}), -E2BIG);
+ // Post with the exact same user metdata size can success.
+ mi.user_metadata_ptr = reinterpret_cast<uint64_t>(&user_metadata);
+ mi.user_metadata_size = kUserMetadataSize;
+ EXPECT_EQ(p1->PostAsync(&mi, {}), 0);
}
TEST_F(BufferHubQueueTest, TestEnqueue) {
@@ -421,32 +458,32 @@
size_t slot;
LocalHandle fence;
- auto p1_status = producer_queue_->Dequeue(100, &slot, &fence);
+ DvrNativeBufferMetadata mo;
+ auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(p1_status.ok());
auto p1 = p1_status.take();
ASSERT_NE(nullptr, p1);
- int64_t mo;
producer_queue_->Enqueue(p1, slot, 0ULL);
- auto c1_status = consumer_queue_->Dequeue(100, &slot, &mo, &fence);
+ auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_FALSE(c1_status.ok());
}
TEST_F(BufferHubQueueTest, TestAllocateBuffer) {
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
- UsagePolicy{}));
+ ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
- size_t s1;
+ size_t ps1;
AllocateBuffer();
LocalHandle fence;
- auto p1_status = producer_queue_->Dequeue(100, &s1, &fence);
+ DvrNativeBufferMetadata mi, mo;
+ auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &ps1, &mo, &fence);
ASSERT_TRUE(p1_status.ok());
auto p1 = p1_status.take();
- ASSERT_NE(nullptr, p1);
+ ASSERT_NE(p1, nullptr);
// producer queue is exhausted
- size_t s2;
- auto p2_status = producer_queue_->Dequeue(100, &s2, &fence);
+ size_t ps2;
+ auto p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
ASSERT_FALSE(p2_status.ok());
ASSERT_EQ(ETIMEDOUT, p2_status.error());
@@ -456,41 +493,43 @@
ASSERT_EQ(producer_queue_->capacity(), 2U);
// now we can dequeue again
- p2_status = producer_queue_->Dequeue(100, &s2, &fence);
+ p2_status = producer_queue_->Dequeue(kTimeoutMs, &ps2, &mo, &fence);
ASSERT_TRUE(p2_status.ok());
auto p2 = p2_status.take();
- ASSERT_NE(nullptr, p2);
+ ASSERT_NE(p2, nullptr);
ASSERT_EQ(producer_queue_->count(), 0U);
// p1 and p2 should have different slot number
- ASSERT_NE(s1, s2);
+ ASSERT_NE(ps1, ps2);
// Consumer queue does not import buffers until |Dequeue| or |ImportBuffers|
// are called. So far consumer_queue_ should be empty.
ASSERT_EQ(consumer_queue_->count(), 0U);
int64_t seq = 1;
- ASSERT_EQ(p1->Post(LocalHandle(), seq), 0);
+ mi.index = seq;
+ ASSERT_EQ(p1->PostAsync(&mi, {}), 0);
+
size_t cs1, cs2;
- auto c1_status = consumer_queue_->Dequeue(100, &cs1, &seq, &fence);
+ auto c1_status = consumer_queue_->Dequeue(kTimeoutMs, &cs1, &mo, &fence);
ASSERT_TRUE(c1_status.ok());
auto c1 = c1_status.take();
- ASSERT_NE(nullptr, c1);
+ ASSERT_NE(c1, nullptr);
ASSERT_EQ(consumer_queue_->count(), 0U);
ASSERT_EQ(consumer_queue_->capacity(), 2U);
- ASSERT_EQ(cs1, s1);
+ ASSERT_EQ(cs1, ps1);
- ASSERT_EQ(p2->Post(LocalHandle(), seq), 0);
- auto c2_status = consumer_queue_->Dequeue(100, &cs2, &seq, &fence);
+ ASSERT_EQ(p2->PostAsync(&mi, {}), 0);
+ auto c2_status = consumer_queue_->Dequeue(kTimeoutMs, &cs2, &mo, &fence);
ASSERT_TRUE(c2_status.ok());
auto c2 = c2_status.take();
- ASSERT_NE(nullptr, c2);
- ASSERT_EQ(cs2, s2);
+ ASSERT_NE(c2, nullptr);
+ ASSERT_EQ(cs2, ps2);
}
TEST_F(BufferHubQueueTest, TestUsageSetMask) {
const uint32_t set_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
- UsagePolicy{set_mask, 0, 0, 0}));
+ ASSERT_TRUE(
+ CreateQueues(config_builder_.Build(), UsagePolicy{set_mask, 0, 0, 0}));
// When allocation, leave out |set_mask| from usage bits on purpose.
auto status = producer_queue_->AllocateBuffer(
@@ -500,7 +539,8 @@
LocalHandle fence;
size_t slot;
- auto p1_status = producer_queue_->Dequeue(100, &slot, &fence);
+ DvrNativeBufferMetadata mo;
+ auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(p1_status.ok());
auto p1 = p1_status.take();
ASSERT_EQ(p1->usage() & set_mask, set_mask);
@@ -508,8 +548,8 @@
TEST_F(BufferHubQueueTest, TestUsageClearMask) {
const uint32_t clear_mask = GRALLOC_USAGE_SW_WRITE_OFTEN;
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<int64_t>().Build(),
- UsagePolicy{0, clear_mask, 0, 0}));
+ ASSERT_TRUE(
+ CreateQueues(config_builder_.Build(), UsagePolicy{0, clear_mask, 0, 0}));
// When allocation, add |clear_mask| into usage bits on purpose.
auto status = producer_queue_->AllocateBuffer(
@@ -519,10 +559,11 @@
LocalHandle fence;
size_t slot;
- auto p1_status = producer_queue_->Dequeue(100, &slot, &fence);
+ DvrNativeBufferMetadata mo;
+ auto p1_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(p1_status.ok());
auto p1 = p1_status.take();
- ASSERT_EQ(0u, p1->usage() & clear_mask);
+ ASSERT_EQ(p1->usage() & clear_mask, 0U);
}
TEST_F(BufferHubQueueTest, TestUsageDenySetMask) {
@@ -600,16 +641,15 @@
EXPECT_EQ(producer_queue_->capacity(), num_buffers);
size_t slot;
- uint64_t seq;
LocalHandle fence;
pdx::Status<void> status;
pdx::Status<std::shared_ptr<BufferConsumer>> consumer_status;
pdx::Status<std::shared_ptr<BufferProducer>> producer_status;
std::shared_ptr<BufferConsumer> consumer_buffer;
std::shared_ptr<BufferProducer> producer_buffer;
+ DvrNativeBufferMetadata mi, mo;
- ASSERT_TRUE(CreateQueues(config_builder_.SetMetadata<uint64_t>().Build(),
- UsagePolicy{}));
+ ASSERT_TRUE(CreateQueues(config_builder_.Build(), UsagePolicy{}));
// Free all buffers when buffers are avaible for dequeue.
CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
@@ -618,7 +658,7 @@
// Free all buffers when one buffer is dequeued.
CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
- producer_status = producer_queue_->Dequeue(100, &slot, &fence);
+ producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(producer_status.ok());
status = producer_queue_->FreeAllBuffers();
EXPECT_TRUE(status.ok());
@@ -626,7 +666,7 @@
// Free all buffers when all buffers are dequeued.
CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
for (size_t i = 0; i < kBufferCount; i++) {
- producer_status = producer_queue_->Dequeue(100, &slot, &fence);
+ producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(producer_status.ok());
}
status = producer_queue_->FreeAllBuffers();
@@ -634,22 +674,22 @@
// Free all buffers when one buffer is posted.
CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
- producer_status = producer_queue_->Dequeue(100, &slot, &fence);
+ producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(producer_status.ok());
producer_buffer = producer_status.take();
ASSERT_NE(nullptr, producer_buffer);
- ASSERT_EQ(0, producer_buffer->Post(fence, &seq, sizeof(seq)));
+ ASSERT_EQ(0, producer_buffer->PostAsync(&mi, fence));
status = producer_queue_->FreeAllBuffers();
EXPECT_TRUE(status.ok());
// Free all buffers when all buffers are posted.
CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
for (size_t i = 0; i < kBufferCount; i++) {
- producer_status = producer_queue_->Dequeue(100, &slot, &fence);
+ producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(producer_status.ok());
producer_buffer = producer_status.take();
- ASSERT_NE(nullptr, producer_buffer);
- ASSERT_EQ(0, producer_buffer->Post(fence, &seq, sizeof(seq)));
+ ASSERT_NE(producer_buffer, nullptr);
+ ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
}
status = producer_queue_->FreeAllBuffers();
EXPECT_TRUE(status.ok());
@@ -657,12 +697,12 @@
// Free all buffers when all buffers are acquired.
CHECK_NO_BUFFER_THEN_ALLOCATE(kBufferCount);
for (size_t i = 0; i < kBufferCount; i++) {
- producer_status = producer_queue_->Dequeue(100, &slot, &fence);
+ producer_status = producer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(producer_status.ok());
producer_buffer = producer_status.take();
- ASSERT_NE(nullptr, producer_buffer);
- ASSERT_EQ(0, producer_buffer->Post(fence, &seq, sizeof(seq)));
- consumer_status = consumer_queue_->Dequeue(100, &slot, &seq, &fence);
+ ASSERT_NE(producer_buffer, nullptr);
+ ASSERT_EQ(producer_buffer->PostAsync(&mi, fence), 0);
+ consumer_status = consumer_queue_->Dequeue(kTimeoutMs, &slot, &mo, &fence);
ASSERT_TRUE(consumer_status.ok());
}
@@ -750,18 +790,18 @@
EXPECT_TRUE(s3.ok());
std::shared_ptr<BufferProducer> p1 = s3.take();
- EXPECT_NE(p1, nullptr);
+ ASSERT_NE(p1, nullptr);
producer_meta.timestamp = 42;
EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
// Make sure the buffer can be dequeued from consumer side.
- auto s4 = consumer_queue_->Dequeue(100, &slot, &consumer_meta, &fence);
+ auto s4 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
EXPECT_TRUE(s4.ok());
EXPECT_EQ(consumer_queue_->capacity(), 1U);
auto consumer = s4.take();
- EXPECT_NE(consumer, nullptr);
+ ASSERT_NE(consumer, nullptr);
EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
}
@@ -817,18 +857,18 @@
EXPECT_TRUE(s2.ok());
std::shared_ptr<BufferProducer> p1 = s2.take();
- EXPECT_NE(p1, nullptr);
+ ASSERT_NE(p1, nullptr);
producer_meta.timestamp = 42;
EXPECT_EQ(p1->PostAsync(&producer_meta, LocalHandle()), 0);
// Make sure the buffer can be dequeued from consumer side.
- auto s3 = consumer_queue_->Dequeue(100, &slot, &consumer_meta, &fence);
+ auto s3 = consumer_queue_->Dequeue(kTimeoutMs, &slot, &consumer_meta, &fence);
EXPECT_TRUE(s3.ok());
EXPECT_EQ(consumer_queue_->capacity(), 1U);
auto consumer = s3.take();
- EXPECT_NE(consumer, nullptr);
+ ASSERT_NE(consumer, nullptr);
EXPECT_EQ(producer_meta.timestamp, consumer_meta.timestamp);
}
diff --git a/libs/vr/libbufferhubqueue/tests/buffer_hub_queue_producer-test.cpp b/libs/vr/libbufferhubqueue/tests/buffer_hub_queue_producer-test.cpp
index 28cd63a..96f5404 100644
--- a/libs/vr/libbufferhubqueue/tests/buffer_hub_queue_producer-test.cpp
+++ b/libs/vr/libbufferhubqueue/tests/buffer_hub_queue_producer-test.cpp
@@ -3,12 +3,15 @@
#include <base/logging.h>
#include <gui/IProducerListener.h>
#include <gui/Surface.h>
+#include <pdx/default_transport/channel_parcelable.h>
#include <gtest/gtest.h>
namespace android {
namespace dvr {
+using pdx::LocalHandle;
+
namespace {
// Default dimensions before setDefaultBufferSize is called by the consumer.
@@ -92,7 +95,13 @@
ALOGD_IF(TRACE, "Begin test: %s.%s", testInfo->test_case_name(),
testInfo->name());
- mProducer = BufferHubQueueProducer::Create();
+ auto config = ProducerQueueConfigBuilder()
+ .SetMetadata<DvrNativeBufferMetadata>()
+ .Build();
+ auto queue = ProducerQueue::Create(config, UsagePolicy{});
+ ASSERT_TRUE(queue != nullptr);
+
+ mProducer = BufferHubQueueProducer::Create(std::move(queue));
ASSERT_TRUE(mProducer != nullptr);
mSurface = new Surface(mProducer, true);
ASSERT_TRUE(mSurface != nullptr);
@@ -546,6 +555,55 @@
EXPECT_EQ(NO_ERROR, mProducer->disconnect(kTestApi));
}
+TEST_F(BufferHubQueueProducerTest, TakeAsParcelable) {
+ // Connected producer cannot be taken out as a parcelable.
+ EXPECT_NO_FATAL_FAILURE(ConnectProducer());
+ ProducerQueueParcelable producer_parcelable;
+ EXPECT_EQ(mProducer->TakeAsParcelable(&producer_parcelable), BAD_VALUE);
+
+ // Create a valid dummy producer parcelable.
+ auto dummy_channel_parcelable =
+ std::make_unique<pdx::default_transport::ChannelParcelable>(
+ LocalHandle(0), LocalHandle(0), LocalHandle(0));
+ EXPECT_TRUE(dummy_channel_parcelable->IsValid());
+ ProducerQueueParcelable dummy_producer_parcelable(
+ std::move(dummy_channel_parcelable));
+ EXPECT_TRUE(dummy_producer_parcelable.IsValid());
+
+ // Disconnect producer can be taken out, but only to an invalid parcelable.
+ ASSERT_EQ(mProducer->disconnect(kTestApi), NO_ERROR);
+ EXPECT_EQ(mProducer->TakeAsParcelable(&dummy_producer_parcelable), BAD_VALUE);
+ EXPECT_FALSE(producer_parcelable.IsValid());
+ EXPECT_EQ(mProducer->TakeAsParcelable(&producer_parcelable), NO_ERROR);
+ EXPECT_TRUE(producer_parcelable.IsValid());
+
+ // Should still be able to query buffer dimension after disconnect.
+ int32_t value = -1;
+ EXPECT_EQ(NO_ERROR, mProducer->query(NATIVE_WINDOW_WIDTH, &value));
+ EXPECT_EQ(static_cast<uint32_t>(value), kDefaultWidth);
+
+ EXPECT_EQ(mProducer->query(NATIVE_WINDOW_HEIGHT, &value), NO_ERROR);
+ EXPECT_EQ(static_cast<uint32_t>(value), kDefaultHeight);
+
+ EXPECT_EQ(mProducer->query(NATIVE_WINDOW_FORMAT, &value), NO_ERROR);
+ EXPECT_EQ(value, kDefaultFormat);
+
+ // But connect to API will fail.
+ IGraphicBufferProducer::QueueBufferOutput output;
+ EXPECT_EQ(mProducer->connect(kDummyListener, kTestApi, kTestControlledByApp,
+ &output),
+ BAD_VALUE);
+
+ // Create a new producer from the parcelable and connect to kTestApi should
+ // succeed.
+ sp<BufferHubQueueProducer> new_producer =
+ BufferHubQueueProducer::Create(std::move(producer_parcelable));
+ ASSERT_TRUE(new_producer != nullptr);
+ EXPECT_EQ(new_producer->connect(kDummyListener, kTestApi,
+ kTestControlledByApp, &output),
+ NO_ERROR);
+}
+
} // namespace
} // namespace dvr
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index c477a3b..4e214d1 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -262,9 +262,6 @@
}
void BufferLayer::onLayerDisplayed(const sp<Fence>& releaseFence) {
- if (mHwcLayers.empty()) {
- return;
- }
mSurfaceFlingerConsumer->setReleaseFence(releaseFence);
}
diff --git a/services/surfaceflinger/DisplayDevice.cpp b/services/surfaceflinger/DisplayDevice.cpp
index ef7d482..6d6781e 100644
--- a/services/surfaceflinger/DisplayDevice.cpp
+++ b/services/surfaceflinger/DisplayDevice.cpp
@@ -325,6 +325,14 @@
return mVisibleLayersSortedByZ;
}
+void DisplayDevice::setLayersNeedingFences(const Vector< sp<Layer> >& layers) {
+ mLayersNeedingFences = layers;
+}
+
+const Vector< sp<Layer> >& DisplayDevice::getLayersNeedingFences() const {
+ return mLayersNeedingFences;
+}
+
Region DisplayDevice::getDirtyRegion(bool repaintEverything) const {
Region dirty;
if (repaintEverything) {
diff --git a/services/surfaceflinger/DisplayDevice.h b/services/surfaceflinger/DisplayDevice.h
index 50e30b2..e388a5b 100644
--- a/services/surfaceflinger/DisplayDevice.h
+++ b/services/surfaceflinger/DisplayDevice.h
@@ -113,6 +113,8 @@
void setVisibleLayersSortedByZ(const Vector< sp<Layer> >& layers);
const Vector< sp<Layer> >& getVisibleLayersSortedByZ() const;
+ void setLayersNeedingFences(const Vector< sp<Layer> >& layers);
+ const Vector< sp<Layer> >& getLayersNeedingFences() const;
Region getDirtyRegion(bool repaintEverything) const;
void setLayerStack(uint32_t stack);
@@ -214,6 +216,8 @@
// list of visible layers on that display
Vector< sp<Layer> > mVisibleLayersSortedByZ;
+ // list of layers needing fences
+ Vector< sp<Layer> > mLayersNeedingFences;
/*
* Transaction state
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 50e2c6f..58b5db0 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -217,9 +217,9 @@
return true;
}
-void Layer::destroyHwcLayer(int32_t hwcId) {
+bool Layer::destroyHwcLayer(int32_t hwcId) {
if (mHwcLayers.count(hwcId) == 0) {
- return;
+ return false;
}
auto& hwcInfo = mHwcLayers[hwcId];
LOG_ALWAYS_FATAL_IF(hwcInfo.layer == nullptr, "Attempt to destroy null layer");
@@ -228,6 +228,8 @@
// The layer destroyed listener should have cleared the entry from
// mHwcLayers. Verify that.
LOG_ALWAYS_FATAL_IF(mHwcLayers.count(hwcId) != 0, "Stale layer entry in mHwcLayers");
+
+ return true;
}
void Layer::destroyAllHwcLayers() {
@@ -1795,7 +1797,7 @@
LayerProtoHelper::writeToProto(transform, layerInfo->mutable_transform());
LayerProtoHelper::writeToProto(requestedTransform, layerInfo->mutable_requested_transform());
- auto parent = getParent();
+ auto parent = useDrawing ? mDrawingParent.promote() : mCurrentParent.promote();
if (parent != nullptr) {
layerInfo->set_parent(parent->sequence);
}
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 0fc5ad5..44b178a 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -414,7 +414,7 @@
// -----------------------------------------------------------------------
bool createHwcLayer(HWComposer* hwc, int32_t hwcId);
- void destroyHwcLayer(int32_t hwcId);
+ bool destroyHwcLayer(int32_t hwcId);
void destroyAllHwcLayers();
bool hasHwcLayer(int32_t hwcId) { return mHwcLayers.count(hwcId) > 0; }
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 1237ee4..dc178d7 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -1585,7 +1585,7 @@
ATRACE_CALL();
ATRACE_NAME(where);
if (CC_UNLIKELY(mTracing.isEnabled())) {
- mTracing.traceLayers(where, dumpProtoInfo());
+ mTracing.traceLayers(where, dumpProtoInfo(LayerVector::StateSet::Drawing));
}
}
@@ -1781,6 +1781,7 @@
Region opaqueRegion;
Region dirtyRegion;
Vector<sp<Layer>> layersSortedByZ;
+ Vector<sp<Layer>> layersNeedingFences;
const sp<DisplayDevice>& displayDevice(mDisplays[dpy]);
const Transform& tr(displayDevice->getTransform());
const Rect bounds(displayDevice->getBounds());
@@ -1788,6 +1789,7 @@
computeVisibleRegions(displayDevice, dirtyRegion, opaqueRegion);
mDrawingState.traverseInZOrder([&](Layer* layer) {
+ bool hwcLayerDestroyed = false;
if (layer->belongsToDisplay(displayDevice->getLayerStack(),
displayDevice->isPrimary())) {
Region drawRegion(tr.transform(
@@ -1798,18 +1800,32 @@
} else {
// Clear out the HWC layer if this layer was
// previously visible, but no longer is
- layer->destroyHwcLayer(
+ hwcLayerDestroyed = layer->destroyHwcLayer(
displayDevice->getHwcDisplayId());
}
} else {
// WM changes displayDevice->layerStack upon sleep/awake.
// Here we make sure we delete the HWC layers even if
// WM changed their layer stack.
- layer->destroyHwcLayer(displayDevice->getHwcDisplayId());
+ hwcLayerDestroyed = layer->destroyHwcLayer(
+ displayDevice->getHwcDisplayId());
+ }
+
+ // If a layer is not going to get a release fence because
+ // it is invisible, but it is also going to release its
+ // old buffer, add it to the list of layers needing
+ // fences.
+ if (hwcLayerDestroyed) {
+ auto found = std::find(mLayersWithQueuedFrames.cbegin(),
+ mLayersWithQueuedFrames.cend(), layer);
+ if (found != mLayersWithQueuedFrames.cend()) {
+ layersNeedingFences.add(layer);
+ }
}
});
}
displayDevice->setVisibleLayersSortedByZ(layersSortedByZ);
+ displayDevice->setLayersNeedingFences(layersNeedingFences);
displayDevice->undefinedRegion.set(bounds);
displayDevice->undefinedRegion.subtractSelf(
tr.transform(opaqueRegion));
@@ -2037,15 +2053,35 @@
displayDevice->onSwapBuffersCompleted();
displayDevice->makeCurrent(mEGLDisplay, mEGLContext);
for (auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
- sp<Fence> releaseFence = Fence::NO_FENCE;
+ // The layer buffer from the previous frame (if any) is released
+ // by HWC only when the release fence from this frame (if any) is
+ // signaled. Always get the release fence from HWC first.
+ auto hwcLayer = layer->getHwcLayer(hwcId);
+ sp<Fence> releaseFence = mHwc->getLayerReleaseFence(hwcId, hwcLayer);
+
+ // If the layer was client composited in the previous frame, we
+ // need to merge with the previous client target acquire fence.
+ // Since we do not track that, always merge with the current
+ // client target acquire fence when it is available, even though
+ // this is suboptimal.
if (layer->getCompositionType(hwcId) == HWC2::Composition::Client) {
- releaseFence = displayDevice->getClientTargetAcquireFence();
- } else {
- auto hwcLayer = layer->getHwcLayer(hwcId);
- releaseFence = mHwc->getLayerReleaseFence(hwcId, hwcLayer);
+ releaseFence = Fence::merge("LayerRelease", releaseFence,
+ displayDevice->getClientTargetAcquireFence());
}
+
layer->onLayerDisplayed(releaseFence);
}
+
+ // We've got a list of layers needing fences, that are disjoint with
+ // displayDevice->getVisibleLayersSortedByZ. The best we can do is to
+ // supply them with the present fence.
+ if (!displayDevice->getLayersNeedingFences().isEmpty()) {
+ sp<Fence> presentFence = mHwc->getPresentFence(hwcId);
+ for (auto& layer : displayDevice->getLayersNeedingFences()) {
+ layer->onLayerDisplayed(presentFence);
+ }
+ }
+
if (hwcId >= 0) {
mHwc->clearReleaseFences(hwcId);
}
@@ -3540,7 +3576,7 @@
size_t numArgs = args.size();
if (asProto) {
- LayersProto layersProto = dumpProtoInfo();
+ LayersProto layersProto = dumpProtoInfo(LayerVector::StateSet::Current);
result.append(layersProto.SerializeAsString().c_str(), layersProto.ByteSize());
dumpAll = false;
}
@@ -3789,11 +3825,13 @@
result.append("\n");
}
-LayersProto SurfaceFlinger::dumpProtoInfo() const {
+LayersProto SurfaceFlinger::dumpProtoInfo(LayerVector::StateSet stateSet) const {
LayersProto layersProto;
- mCurrentState.traverseInZOrder([&](Layer* layer) {
+ const bool useDrawing = stateSet == LayerVector::StateSet::Drawing;
+ const State& state = useDrawing ? mDrawingState : mCurrentState;
+ state.traverseInZOrder([&](Layer* layer) {
LayerProto* layerProto = layersProto.add_layers();
- layer->writeToProto(layerProto, LayerVector::StateSet::Current);
+ layer->writeToProto(layerProto, stateSet);
});
return layersProto;
@@ -3864,7 +3902,7 @@
result.appendFormat("Visible layers (count = %zu)\n", mNumLayers);
colorizer.reset(result);
- LayersProto layersProto = dumpProtoInfo();
+ LayersProto layersProto = dumpProtoInfo(LayerVector::StateSet::Current);
auto layerTree = LayerProtoParser::generateLayerTree(layersProto);
result.append(LayerProtoParser::layersToString(layerTree).c_str());
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 67e4607..386d42b 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -602,7 +602,7 @@
std::vector<OccupancyTracker::Segment>&& history);
void dumpBufferingStats(String8& result) const;
void dumpWideColorInfo(String8& result) const;
- LayersProto dumpProtoInfo() const;
+ LayersProto dumpProtoInfo(LayerVector::StateSet stateSet) const;
bool isLayerTripleBufferingDisabled() const {
return this->mLayerTripleBufferingDisabled;
diff --git a/services/surfaceflinger/SurfaceTracing.h b/services/surfaceflinger/SurfaceTracing.h
index 9b21989..590ab96 100644
--- a/services/surfaceflinger/SurfaceTracing.h
+++ b/services/surfaceflinger/SurfaceTracing.h
@@ -37,7 +37,7 @@
void traceLayers(const char* where, LayersProto);
private:
- static constexpr auto DEFAULT_FILENAME = "/data/misc/trace/layerstrace.pb";
+ static constexpr auto DEFAULT_FILENAME = "/data/misc/wmtrace/layers_trace.pb";
status_t writeProtoFileLocked();
diff --git a/services/surfaceflinger/layerproto/LayerProtoParser.cpp b/services/surfaceflinger/layerproto/LayerProtoParser.cpp
index e6f2ce7..bf37e1e 100644
--- a/services/surfaceflinger/layerproto/LayerProtoParser.cpp
+++ b/services/surfaceflinger/layerproto/LayerProtoParser.cpp
@@ -29,9 +29,11 @@
uint32_t rs = rhs->layerStack;
if (ls != rs) return ls < rs;
- uint32_t lz = lhs->z;
- uint32_t rz = rhs->z;
- if (lz != rz) return lz < rz;
+ int32_t lz = lhs->z;
+ int32_t rz = rhs->z;
+ if (lz != rz) {
+ return (lz > rz) ? 1 : -1;
+ }
return lhs->id < rhs->id;
}
diff --git a/vulkan/include/vulkan/vulkan_loader_data.h b/vulkan/include/vulkan/vulkan_loader_data.h
deleted file mode 100644
index 8ea4666..0000000
--- a/vulkan/include/vulkan/vulkan_loader_data.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VULKAN_VULKAN_LOADER_DATA_H
-#define VULKAN_VULKAN_LOADER_DATA_H
-
-#include <string>
-
-struct android_namespace_t;
-
-namespace vulkan {
- struct LoaderData {
- std::string layer_path;
- android_namespace_t* app_namespace;
-
- __attribute__((visibility("default"))) static LoaderData& GetInstance();
- };
-}
-
-#endif
diff --git a/vulkan/libvulkan/Android.bp b/vulkan/libvulkan/Android.bp
index b55fa27..c9faf28 100644
--- a/vulkan/libvulkan/Android.bp
+++ b/vulkan/libvulkan/Android.bp
@@ -61,7 +61,6 @@
"layers_extensions.cpp",
"stubhal.cpp",
"swapchain.cpp",
- "vulkan_loader_data.cpp",
],
export_header_lib_headers: ["vulkan_headers"],
diff --git a/vulkan/libvulkan/api.cpp b/vulkan/libvulkan/api.cpp
index d840786..673a066 100644
--- a/vulkan/libvulkan/api.cpp
+++ b/vulkan/libvulkan/api.cpp
@@ -29,14 +29,17 @@
#include <new>
#include <utility>
+#include <android-base/strings.h>
#include <cutils/properties.h>
#include <log/log.h>
#include <vulkan/vk_layer_interface.h>
+#include <graphicsenv/GraphicsEnv.h>
#include "api.h"
#include "driver.h"
#include "layers_extensions.h"
+
namespace vulkan {
namespace api {
@@ -121,15 +124,33 @@
if (!is_instance_ || !driver::Debuggable())
return;
- ParseDebugVulkanLayers();
- property_list(ParseDebugVulkanLayer, this);
+ GetLayersFromSettings();
- // sort by priorities
- auto& arr = implicit_layers_;
- std::sort(arr.elements, arr.elements + arr.count,
- [](const ImplicitLayer& a, const ImplicitLayer& b) {
- return (a.priority < b.priority);
- });
+ // If no layers specified via Settings, check legacy properties
+ if (implicit_layers_.count <= 0) {
+ ParseDebugVulkanLayers();
+ property_list(ParseDebugVulkanLayer, this);
+
+ // sort by priorities
+ auto& arr = implicit_layers_;
+ std::sort(arr.elements, arr.elements + arr.count,
+ [](const ImplicitLayer& a, const ImplicitLayer& b) {
+ return (a.priority < b.priority);
+ });
+ }
+ }
+
+ void GetLayersFromSettings() {
+ // These will only be available if conditions are met in GraphicsEnvironemnt
+ // gpu_debug_layers = layer1:layer2:layerN
+ const std::string layers = android::GraphicsEnv::getInstance().getDebugLayers();
+ if (!layers.empty()) {
+ ALOGV("Debug layer list: %s", layers.c_str());
+ std::vector<std::string> paths = android::base::Split(layers, ":");
+ for (uint32_t i = 0; i < paths.size(); i++) {
+ AddImplicitLayer(int(i), paths[i].c_str(), paths[i].length());
+ }
+ }
}
void ParseDebugVulkanLayers() {
diff --git a/vulkan/libvulkan/layers_extensions.cpp b/vulkan/libvulkan/layers_extensions.cpp
index 05856d3..3a59208 100644
--- a/vulkan/libvulkan/layers_extensions.cpp
+++ b/vulkan/libvulkan/layers_extensions.cpp
@@ -29,11 +29,10 @@
#include <android/dlext.h>
#include <android-base/strings.h>
#include <cutils/properties.h>
+#include <graphicsenv/GraphicsEnv.h>
#include <log/log.h>
#include <ziparchive/zip_archive.h>
-#include <vulkan/vulkan_loader_data.h>
-
// TODO(jessehall): The whole way we deal with extensions is pretty hokey, and
// not a good long-term solution. Having a hard-coded enum of extensions is
// bad, of course. Representing sets of extensions (requested, supported, etc.)
@@ -69,11 +68,16 @@
class LayerLibrary {
public:
- explicit LayerLibrary(const std::string& path)
- : path_(path), dlhandle_(nullptr), refcount_(0) {}
+ explicit LayerLibrary(const std::string& path,
+ const std::string& filename)
+ : path_(path),
+ filename_(filename),
+ dlhandle_(nullptr),
+ refcount_(0) {}
LayerLibrary(LayerLibrary&& other)
: path_(std::move(other.path_)),
+ filename_(std::move(other.filename_)),
dlhandle_(other.dlhandle_),
refcount_(other.refcount_) {
other.dlhandle_ = nullptr;
@@ -94,9 +98,14 @@
const char* gpa_name,
size_t gpa_name_len) const;
+ const std::string GetFilename() { return filename_; }
+
private:
const std::string path_;
+ // Track the filename alone so we can detect duplicates
+ const std::string filename_;
+
std::mutex mutex_;
void* dlhandle_;
size_t refcount_;
@@ -111,7 +120,7 @@
// any symbol dependencies will be resolved by system libraries. They
// can't safely use libc++_shared, for example. Which is one reason
// (among several) we only allow them in non-user builds.
- auto app_namespace = LoaderData::GetInstance().app_namespace;
+ auto app_namespace = android::GraphicsEnv::getInstance().getAppNamespace();
if (app_namespace &&
!android::base::StartsWith(path_, kSystemLayerLibraryDir)) {
android_dlextinfo dlextinfo = {};
@@ -305,8 +314,8 @@
std::vector<LayerLibrary> g_layer_libraries;
std::vector<Layer> g_instance_layers;
-void AddLayerLibrary(const std::string& path) {
- LayerLibrary library(path);
+void AddLayerLibrary(const std::string& path, const std::string& filename) {
+ LayerLibrary library(path + "/" + filename, filename);
if (!library.Open())
return;
@@ -398,7 +407,25 @@
ForEachFileInPath(path, [&](const std::string& filename) {
if (android::base::StartsWith(filename, "libVkLayer") &&
android::base::EndsWith(filename, ".so")) {
- AddLayerLibrary(path + "/" + filename);
+
+ // Check to ensure we haven't seen this layer already
+ // Let the first instance of the shared object be enumerated
+ // We're searching for layers in following order:
+ // 1. system path
+ // 2. libraryPermittedPath (if enabled)
+ // 3. libraryPath
+
+ bool duplicate = false;
+ for (auto& layer : g_layer_libraries) {
+ if (layer.GetFilename() == filename) {
+ ALOGV("Skipping duplicate layer %s in %s",
+ filename.c_str(), path.c_str());
+ duplicate = true;
+ }
+ }
+
+ if (!duplicate)
+ AddLayerLibrary(path, filename);
}
});
}
@@ -428,8 +455,8 @@
prctl(PR_GET_DUMPABLE, 0, 0, 0, 0)) {
DiscoverLayersInPathList(kSystemLayerLibraryDir);
}
- if (!LoaderData::GetInstance().layer_path.empty())
- DiscoverLayersInPathList(LoaderData::GetInstance().layer_path);
+ if (!android::GraphicsEnv::getInstance().getLayerPaths().empty())
+ DiscoverLayersInPathList(android::GraphicsEnv::getInstance().getLayerPaths());
}
uint32_t GetLayerCount() {
diff --git a/vulkan/libvulkan/vulkan_loader_data.cpp b/vulkan/libvulkan/vulkan_loader_data.cpp
deleted file mode 100644
index 0eda0af..0000000
--- a/vulkan/libvulkan/vulkan_loader_data.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <vulkan/vulkan_loader_data.h>
-
-using namespace vulkan;
-
-LoaderData& LoaderData::GetInstance() {
- static LoaderData loader_data = {};
- return loader_data;
-}