Merge "Added protobuf to gather layer info."
diff --git a/cmds/servicemanager/Android.bp b/cmds/servicemanager/Android.bp
index d3d396f..428561b 100644
--- a/cmds/servicemanager/Android.bp
+++ b/cmds/servicemanager/Android.bp
@@ -46,6 +46,6 @@
cflags: [
"-DVENDORSERVICEMANAGER=1",
],
- shared_libs: ["libcutils", "libselinux_vendor"],
+ shared_libs: ["libcutils", "libselinux"],
init_rc: ["vndservicemanager.rc"],
}
diff --git a/libs/binder/Binder.cpp b/libs/binder/Binder.cpp
index 890ef30..a81f44e 100644
--- a/libs/binder/Binder.cpp
+++ b/libs/binder/Binder.cpp
@@ -249,6 +249,8 @@
if (resultReceiver != NULL) {
resultReceiver->send(INVALID_OPERATION);
}
+
+ return NO_ERROR;
}
case SYSPROPS_TRANSACTION: {
diff --git a/libs/ui/Android.bp b/libs/ui/Android.bp
index a610a51..102964f 100644
--- a/libs/ui/Android.bp
+++ b/libs/ui/Android.bp
@@ -118,4 +118,7 @@
vendor_available: true,
}
-subdirs = ["tests"]
+subdirs = [
+ "tests",
+ "tools",
+]
diff --git a/libs/vr/libbufferhub/Android.bp b/libs/vr/libbufferhub/Android.bp
index f327200..e3cb494 100644
--- a/libs/vr/libbufferhub/Android.bp
+++ b/libs/vr/libbufferhub/Android.bp
@@ -61,10 +61,10 @@
cc_test {
tags: ["optional"],
- srcs: ["bufferhub_tests.cpp"],
+ srcs: ["buffer_hub-test.cpp"],
static_libs: ["libbufferhub"] + staticLibraries,
shared_libs: sharedLibraries,
header_libs: headerLibraries,
- name: "bufferhub_tests",
+ name: "buffer_hub-test",
}
diff --git a/libs/vr/libbufferhub/bufferhub_tests.cpp b/libs/vr/libbufferhub/buffer_hub-test.cpp
similarity index 68%
rename from libs/vr/libbufferhub/bufferhub_tests.cpp
rename to libs/vr/libbufferhub/buffer_hub-test.cpp
index c4b9a8c..3c99f99 100644
--- a/libs/vr/libbufferhub/bufferhub_tests.cpp
+++ b/libs/vr/libbufferhub/buffer_hub-test.cpp
@@ -20,6 +20,10 @@
using android::dvr::BufferConsumer;
using android::dvr::BufferHubDefs::kConsumerStateMask;
using android::dvr::BufferHubDefs::kProducerStateBit;
+using android::dvr::BufferHubDefs::IsBufferGained;
+using android::dvr::BufferHubDefs::IsBufferPosted;
+using android::dvr::BufferHubDefs::IsBufferAcquired;
+using android::dvr::BufferHubDefs::IsBufferReleased;
using android::dvr::BufferProducer;
using android::pdx::LocalHandle;
@@ -28,6 +32,7 @@
const int kFormat = HAL_PIXEL_FORMAT_RGBA_8888;
const int kUsage = 0;
const uint64_t kContext = 42;
+const size_t kMaxConsumerCount = 63;
using LibBufferHubTest = ::testing::Test;
@@ -159,10 +164,10 @@
kWidth, kHeight, kFormat, kUsage, sizeof(uint64_t));
ASSERT_TRUE(p.get() != nullptr);
- // It's ok to create up to 63 consumer buffers.
+ // It's ok to create up to kMaxConsumerCount consumer buffers.
uint64_t buffer_state_bits = p->buffer_state_bit();
- std::array<std::unique_ptr<BufferConsumer>, 63> cs;
- for (size_t i = 0; i < 63; i++) {
+ std::array<std::unique_ptr<BufferConsumer>, kMaxConsumerCount> cs;
+ for (size_t i = 0; i < kMaxConsumerCount; i++) {
cs[i] = BufferConsumer::Import(p->CreateConsumer());
ASSERT_TRUE(cs[i].get() != nullptr);
// Expect all buffers have unique state mask.
@@ -176,7 +181,7 @@
EXPECT_EQ(state.error(), E2BIG);
// Release any consumer should allow us to re-create.
- for (size_t i = 0; i < 63; i++) {
+ for (size_t i = 0; i < kMaxConsumerCount; i++) {
buffer_state_bits &= ~cs[i]->buffer_state_bit();
cs[i] = nullptr;
cs[i] = BufferConsumer::Import(p->CreateConsumer());
@@ -240,6 +245,217 @@
EXPECT_EQ(-EALREADY, p->Gain(&fence));
}
+TEST_F(LibBufferHubTest, TestAsyncStateTransitions) {
+ std::unique_ptr<BufferProducer> p = BufferProducer::Create(
+ kWidth, kHeight, kFormat, kUsage, sizeof(uint64_t));
+ ASSERT_TRUE(p.get() != nullptr);
+ std::unique_ptr<BufferConsumer> c =
+ BufferConsumer::Import(p->CreateConsumer());
+ ASSERT_TRUE(c.get() != nullptr);
+
+ DvrNativeBufferMetadata metadata;
+ LocalHandle invalid_fence;
+
+ // The producer buffer starts in gained state.
+
+ // Acquire, release, and gain in gained state should fail.
+ EXPECT_EQ(-EBUSY, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+ EXPECT_EQ(-EBUSY, c->ReleaseAsync(&metadata, invalid_fence));
+ EXPECT_EQ(-EALREADY, p->GainAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+
+ // Post in gained state should succeed.
+ EXPECT_EQ(0, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_EQ(p->buffer_state(), c->buffer_state());
+ EXPECT_TRUE(IsBufferPosted(p->buffer_state()));
+
+ // Post, release, and gain in posted state should fail.
+ EXPECT_EQ(-EBUSY, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_EQ(-EBUSY, c->ReleaseAsync(&metadata, invalid_fence));
+ EXPECT_EQ(-EBUSY, p->GainAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+
+ // Acquire in posted state should succeed.
+ EXPECT_LT(0, RETRY_EINTR(c->Poll(10)));
+ EXPECT_EQ(0, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+ EXPECT_EQ(p->buffer_state(), c->buffer_state());
+ EXPECT_TRUE(IsBufferAcquired(p->buffer_state()));
+
+ // Acquire, post, and gain in acquired state should fail.
+ EXPECT_EQ(-EBUSY, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+ EXPECT_EQ(-EBUSY, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_EQ(-EBUSY, p->GainAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+
+ // Release in acquired state should succeed.
+ EXPECT_EQ(0, c->ReleaseAsync(&metadata, invalid_fence));
+ EXPECT_LT(0, RETRY_EINTR(p->Poll(10)));
+ EXPECT_EQ(p->buffer_state(), c->buffer_state());
+ EXPECT_TRUE(IsBufferReleased(p->buffer_state()));
+
+ // Release, acquire, and post in released state should fail.
+ EXPECT_EQ(-EBUSY, c->ReleaseAsync(&metadata, invalid_fence));
+ EXPECT_EQ(-EBUSY, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+ EXPECT_EQ(-EBUSY, p->PostAsync(&metadata, invalid_fence));
+
+ // Gain in released state should succeed.
+ EXPECT_EQ(0, p->GainAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+ EXPECT_EQ(p->buffer_state(), c->buffer_state());
+ EXPECT_TRUE(IsBufferGained(p->buffer_state()));
+
+ // Acquire, release, and gain in gained state should fail.
+ EXPECT_EQ(-EBUSY, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+ EXPECT_EQ(-EBUSY, c->ReleaseAsync(&metadata, invalid_fence));
+ EXPECT_EQ(-EALREADY, p->GainAsync(&metadata, &invalid_fence));
+ EXPECT_FALSE(invalid_fence.IsValid());
+}
+
+TEST_F(LibBufferHubTest, TestZeroConsumer) {
+ std::unique_ptr<BufferProducer> p = BufferProducer::Create(
+ kWidth, kHeight, kFormat, kUsage, sizeof(uint64_t));
+ ASSERT_TRUE(p.get() != nullptr);
+
+ DvrNativeBufferMetadata metadata;
+ LocalHandle invalid_fence;
+
+ // Newly created.
+ EXPECT_TRUE(IsBufferGained(p->buffer_state()));
+ EXPECT_EQ(0, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_TRUE(IsBufferPosted(p->buffer_state()));
+
+ // The buffer should stay in posted stay until a consumer picks it up.
+ EXPECT_GE(0, RETRY_EINTR(p->Poll(100)));
+
+ // A new consumer should still be able to acquire the buffer immediately.
+ std::unique_ptr<BufferConsumer> c =
+ BufferConsumer::Import(p->CreateConsumer());
+ ASSERT_TRUE(c.get() != nullptr);
+ EXPECT_EQ(0, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_TRUE(IsBufferAcquired(c->buffer_state()));
+}
+
+TEST_F(LibBufferHubTest, TestMaxConsumers) {
+ std::unique_ptr<BufferProducer> p = BufferProducer::Create(
+ kWidth, kHeight, kFormat, kUsage, sizeof(uint64_t));
+ ASSERT_TRUE(p.get() != nullptr);
+
+ std::array<std::unique_ptr<BufferConsumer>, kMaxConsumerCount> cs;
+ for (size_t i = 0; i < kMaxConsumerCount; i++) {
+ cs[i] = BufferConsumer::Import(p->CreateConsumer());
+ ASSERT_TRUE(cs[i].get() != nullptr);
+ EXPECT_TRUE(IsBufferGained(cs[i]->buffer_state()));
+ }
+
+ DvrNativeBufferMetadata metadata;
+ LocalHandle invalid_fence;
+
+ // Post the producer should trigger all consumers to be available.
+ EXPECT_EQ(0, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_TRUE(IsBufferPosted(p->buffer_state()));
+ for (size_t i = 0; i < kMaxConsumerCount; i++) {
+ EXPECT_TRUE(IsBufferPosted(cs[i]->buffer_state(),
+ cs[i]->buffer_state_bit()));
+ EXPECT_LT(0, RETRY_EINTR(cs[i]->Poll(10)));
+ EXPECT_EQ(0, cs[i]->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_TRUE(IsBufferAcquired(p->buffer_state()));
+ }
+
+ // All consumers have to release before the buffer is considered to be
+ // released.
+ for (size_t i = 0; i < kMaxConsumerCount; i++) {
+ EXPECT_FALSE(IsBufferReleased(p->buffer_state()));
+ EXPECT_EQ(0, cs[i]->ReleaseAsync(&metadata, invalid_fence));
+ }
+
+ EXPECT_LT(0, RETRY_EINTR(p->Poll(10)));
+ EXPECT_TRUE(IsBufferReleased(p->buffer_state()));
+
+ // Buffer state cross all clients must be consistent.
+ for (size_t i = 0; i < kMaxConsumerCount; i++) {
+ EXPECT_EQ(p->buffer_state(), cs[i]->buffer_state());
+ }
+}
+
+TEST_F(LibBufferHubTest, TestCreateConsumerWhenBufferGained) {
+ std::unique_ptr<BufferProducer> p = BufferProducer::Create(
+ kWidth, kHeight, kFormat, kUsage, sizeof(uint64_t));
+ ASSERT_TRUE(p.get() != nullptr);
+ EXPECT_TRUE(IsBufferGained(p->buffer_state()));
+
+ std::unique_ptr<BufferConsumer> c =
+ BufferConsumer::Import(p->CreateConsumer());
+ ASSERT_TRUE(c.get() != nullptr);
+ EXPECT_TRUE(IsBufferGained(c->buffer_state()));
+
+ DvrNativeBufferMetadata metadata;
+ LocalHandle invalid_fence;
+
+ // Post the gained buffer should signal already created consumer.
+ EXPECT_EQ(0, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_TRUE(IsBufferPosted(p->buffer_state()));
+ EXPECT_LT(0, RETRY_EINTR(c->Poll(10)));
+ EXPECT_EQ(0, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_TRUE(IsBufferAcquired(c->buffer_state()));
+}
+
+TEST_F(LibBufferHubTest, TestCreateConsumerWhenBufferPosted) {
+ std::unique_ptr<BufferProducer> p = BufferProducer::Create(
+ kWidth, kHeight, kFormat, kUsage, sizeof(uint64_t));
+ ASSERT_TRUE(p.get() != nullptr);
+ EXPECT_TRUE(IsBufferGained(p->buffer_state()));
+
+ DvrNativeBufferMetadata metadata;
+ LocalHandle invalid_fence;
+
+ // Post the gained buffer before any consumer gets created.
+ EXPECT_EQ(0, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_TRUE(IsBufferPosted(p->buffer_state()));
+
+ // Newly created consumer should be automatically sigalled.
+ std::unique_ptr<BufferConsumer> c =
+ BufferConsumer::Import(p->CreateConsumer());
+ ASSERT_TRUE(c.get() != nullptr);
+ EXPECT_TRUE(IsBufferPosted(c->buffer_state()));
+ EXPECT_EQ(0, c->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_TRUE(IsBufferAcquired(c->buffer_state()));
+}
+
+TEST_F(LibBufferHubTest, TestCreateConsumerWhenBufferReleased) {
+ std::unique_ptr<BufferProducer> p = BufferProducer::Create(
+ kWidth, kHeight, kFormat, kUsage, sizeof(uint64_t));
+ ASSERT_TRUE(p.get() != nullptr);
+
+ std::unique_ptr<BufferConsumer> c1 =
+ BufferConsumer::Import(p->CreateConsumer());
+ ASSERT_TRUE(c1.get() != nullptr);
+
+ DvrNativeBufferMetadata metadata;
+ LocalHandle invalid_fence;
+
+ // Post, acquire, and release the buffer..
+ EXPECT_EQ(0, p->PostAsync(&metadata, invalid_fence));
+ EXPECT_LT(0, RETRY_EINTR(c1->Poll(10)));
+ EXPECT_EQ(0, c1->AcquireAsync(&metadata, &invalid_fence));
+ EXPECT_EQ(0, c1->ReleaseAsync(&metadata, invalid_fence));
+
+ // Create another consumer immediately after the release, should not make the
+ // buffer un-released. This is guaranteed by IPC execution order in bufferhubd.
+ std::unique_ptr<BufferConsumer> c2 =
+ BufferConsumer::Import(p->CreateConsumer());
+ ASSERT_TRUE(c2.get() != nullptr);
+
+ EXPECT_LT(0, RETRY_EINTR(p->Poll(10)));
+ EXPECT_TRUE(IsBufferReleased(p->buffer_state()));
+ EXPECT_EQ(0, p->GainAsync(&metadata, &invalid_fence));
+ EXPECT_TRUE(IsBufferGained(p->buffer_state()));
+}
+
TEST_F(LibBufferHubTest, TestWithCustomMetadata) {
struct Metadata {
int64_t field1;
diff --git a/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h b/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
index 1186f93..a356959 100644
--- a/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
+++ b/libs/vr/libbufferhub/include/private/dvr/buffer_hub_client.h
@@ -94,6 +94,9 @@
int id() const { return id_; }
+ // Returns the buffer buffer state.
+ uint64_t buffer_state() { return buffer_state_->load(); };
+
// A state mask which is unique to a buffer hub client among all its siblings
// sharing the same concrete graphic buffer.
uint64_t buffer_state_bit() const { return buffer_state_bit_; }
diff --git a/libs/vr/libbufferhubqueue/tests/Android.bp b/libs/vr/libbufferhubqueue/tests/Android.bp
index 8bd1ef1..c4ffb41 100644
--- a/libs/vr/libbufferhubqueue/tests/Android.bp
+++ b/libs/vr/libbufferhubqueue/tests/Android.bp
@@ -12,6 +12,7 @@
"libhardware",
"libui",
"libutils",
+ "libnativewindow",
]
static_libraries = [
@@ -20,6 +21,7 @@
"libchrome",
"libdvrcommon",
"libpdx_default_transport",
+ "libperformance",
]
cc_test {
@@ -51,3 +53,17 @@
name: "buffer_hub_queue_producer-test",
tags: ["optional"],
}
+
+cc_test {
+ srcs: ["buffer_transport_benchmark.cpp"],
+ static_libs: static_libraries,
+ shared_libs: shared_libraries,
+ header_libs: header_libraries,
+ cflags: [
+ "-DLOG_TAG=\"buffer_transport_benchmark\"",
+ "-DTRACE=0",
+ "-O2",
+ ],
+ name: "buffer_transport_benchmark",
+ tags: ["optional"],
+}
diff --git a/libs/vr/libbufferhubqueue/tests/buffer_transport_benchmark.cpp b/libs/vr/libbufferhubqueue/tests/buffer_transport_benchmark.cpp
new file mode 100644
index 0000000..5b580df
--- /dev/null
+++ b/libs/vr/libbufferhubqueue/tests/buffer_transport_benchmark.cpp
@@ -0,0 +1,619 @@
+#include <android/native_window.h>
+#include <base/logging.h>
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <dvr/dvr_api.h>
+#include <dvr/performance_client_api.h>
+#include <gtest/gtest.h>
+#include <gui/BufferItem.h>
+#include <gui/BufferItemConsumer.h>
+#include <gui/Surface.h>
+#include <private/dvr/buffer_hub_queue_producer.h>
+#include <utils/Trace.h>
+
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+#include <poll.h>
+#include <sys/wait.h>
+#include <unistd.h> // for pipe
+
+// Use ALWAYS at the tag level. Control is performed manually during command
+// line processing.
+#ifdef ATRACE_TAG
+#undef ATRACE_TAG
+#endif
+#define ATRACE_TAG ATRACE_TAG_ALWAYS
+
+using namespace android;
+using namespace android::dvr;
+
+static const String16 kBinderService = String16("bufferTransport");
+static const uint32_t kBufferWidth = 100;
+static const uint32_t kBufferHeight = 1;
+static const uint32_t kBufferLayerCount = 1;
+static const uint32_t kBufferFormat = HAL_PIXEL_FORMAT_BLOB;
+static const uint64_t kBufferUsage =
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN;
+static const int kMaxAcquiredImages = 1;
+static const size_t kMaxQueueCounts = 128;
+
+static int gConcurrency = 1; // 1 writer at a time
+static int gIterations = 1000; // 1K times
+static int gSleepIntervalUs = 16 * 1000; // 16ms
+
+enum BufferTransportServiceCode {
+ CREATE_BUFFER_QUEUE = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+// A mininal cross process helper class based on a bidirectional pipe pair. This
+// is used to signal that Binder-based BufferTransportService has finished
+// initialization.
+class Pipe {
+ public:
+ static std::tuple<Pipe, Pipe> CreatePipePair() {
+ int a[2] = {-1, -1};
+ int b[2] = {-1, -1};
+
+ pipe(a);
+ pipe(b);
+
+ return std::make_tuple(Pipe(a[0], b[1]), Pipe(b[0], a[1]));
+ }
+
+ Pipe() = default;
+
+ Pipe(Pipe&& other) {
+ read_fd_ = other.read_fd_;
+ write_fd_ = other.write_fd_;
+ other.read_fd_ = 0;
+ other.write_fd_ = 0;
+ }
+
+ Pipe& operator=(Pipe&& other) {
+ Reset();
+ read_fd_ = other.read_fd_;
+ write_fd_ = other.write_fd_;
+ other.read_fd_ = 0;
+ other.write_fd_ = 0;
+ return *this;
+ }
+
+ ~Pipe() { Reset(); }
+
+ Pipe(const Pipe&) = delete;
+ Pipe& operator=(const Pipe&) = delete;
+ Pipe& operator=(const Pipe&&) = delete;
+
+ bool IsValid() { return read_fd_ > 0 && write_fd_ > 0; }
+
+ void Signal() {
+ bool val = true;
+ int error = write(write_fd_, &val, sizeof(val));
+ ASSERT_GE(error, 0);
+ };
+
+ void Wait() {
+ bool val = false;
+ int error = read(read_fd_, &val, sizeof(val));
+ ASSERT_GE(error, 0);
+ }
+
+ void Reset() {
+ if (read_fd_)
+ close(read_fd_);
+ if (write_fd_)
+ close(write_fd_);
+ }
+
+ private:
+ int read_fd_ = -1;
+ int write_fd_ = -1;
+ Pipe(int read_fd, int write_fd) : read_fd_{read_fd}, write_fd_{write_fd} {}
+};
+
+// A binder services that minics a compositor that consumes buffers. It provides
+// one Binder interface to create a new Surface for buffer producer to write
+// into; while itself will carry out no-op buffer consuming by acquiring then
+// releasing the buffer immediately.
+class BufferTransportService : public BBinder {
+ public:
+ BufferTransportService() = default;
+ ~BufferTransportService() = default;
+
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0) {
+ (void)flags;
+ (void)data;
+ switch (code) {
+ case CREATE_BUFFER_QUEUE: {
+ auto new_queue = std::make_shared<BufferQueueHolder>(this);
+ reply->writeStrongBinder(
+ IGraphicBufferProducer::asBinder(new_queue->producer_));
+ buffer_queues_.push_back(new_queue);
+ return NO_ERROR;
+ }
+ default:
+ return UNKNOWN_TRANSACTION;
+ };
+ }
+
+ private:
+ struct FrameListener : public ConsumerBase::FrameAvailableListener {
+ public:
+ FrameListener(BufferTransportService* service,
+ sp<BufferItemConsumer> buffer_item_consumer)
+ : service_(service),
+ buffer_item_consumer_(buffer_item_consumer) {}
+
+ void onFrameAvailable(const BufferItem& /*item*/) override {
+ std::unique_lock<std::mutex> autolock(service_->reader_mutex_);
+
+ BufferItem buffer;
+ status_t ret = 0;
+ {
+ ATRACE_NAME("AcquireBuffer");
+ ret = buffer_item_consumer_->acquireBuffer(&buffer, /*presentWhen=*/0,
+ /*waitForFence=*/false);
+ }
+
+ if (ret != NO_ERROR) {
+ LOG(ERROR) << "Failed to acquire next buffer.";
+ return;
+ }
+
+ {
+ ATRACE_NAME("ReleaseBuffer");
+ ret = buffer_item_consumer_->releaseBuffer(buffer);
+ }
+
+ if (ret != NO_ERROR) {
+ LOG(ERROR) << "Failed to release buffer.";
+ return;
+ }
+ }
+
+ private:
+ BufferTransportService* service_ = nullptr;
+ sp<BufferItemConsumer> buffer_item_consumer_;
+ };
+
+ struct BufferQueueHolder {
+ explicit BufferQueueHolder(BufferTransportService* service) {
+ BufferQueue::createBufferQueue(&producer_, &consumer_);
+
+ sp<BufferItemConsumer> buffer_item_consumer =
+ new BufferItemConsumer(consumer_, kBufferUsage, kMaxAcquiredImages,
+ /*controlledByApp=*/true);
+ buffer_item_consumer->setName(String8("BinderBufferTransport"));
+ frame_listener_ = new FrameListener(service, buffer_item_consumer);
+ buffer_item_consumer->setFrameAvailableListener(frame_listener_);
+ }
+
+ sp<IGraphicBufferProducer> producer_;
+ sp<IGraphicBufferConsumer> consumer_;
+ sp<FrameListener> frame_listener_;
+ };
+
+ std::mutex reader_mutex_;
+ std::vector<std::shared_ptr<BufferQueueHolder>> buffer_queues_;
+};
+
+// A virtual interfaces that abstracts the common BufferQueue operations, so
+// that the test suite can use the same test case to drive different types of
+// transport backends.
+class BufferTransport {
+ public:
+ virtual ~BufferTransport() {}
+
+ virtual int Start() = 0;
+ virtual sp<Surface> CreateSurface() = 0;
+};
+
+// Binder-based buffer transport backend.
+//
+// On Start() a new process will be swapned to run a Binder server that
+// actually consumes the buffer.
+// On CreateSurface() a new Binder BufferQueue will be created, which the
+// service holds the concrete binder node of the IGraphicBufferProducer while
+// sending the binder proxy to the client. In another word, the producer side
+// operations are carried out process while the consumer side operations are
+// carried out within the BufferTransportService's own process.
+class BinderBufferTransport : public BufferTransport {
+ public:
+ BinderBufferTransport() {}
+
+ ~BinderBufferTransport() {
+ if (client_pipe_.IsValid()) {
+ client_pipe_.Signal();
+ LOG(INFO) << "Client signals service to shut down.";
+ }
+ }
+
+ int Start() override {
+ // Fork a process to run a binder server. The parent process will return
+ // a pipe here, and we use the pipe to signal the binder server to exit.
+ client_pipe_ = CreateBinderServer();
+
+ // Wait until service is ready.
+ LOG(INFO) << "Service is ready for client.";
+ client_pipe_.Wait();
+ return 0;
+ }
+
+ sp<Surface> CreateSurface() override {
+ sp<IServiceManager> sm = defaultServiceManager();
+ service_ = sm->getService(kBinderService);
+ if (service_ == nullptr) {
+ LOG(ERROR) << "Failed to set the benchmark service.";
+ return nullptr;
+ }
+
+ Parcel data;
+ Parcel reply;
+ int error = service_->transact(CREATE_BUFFER_QUEUE, data, &reply);
+ if (error != NO_ERROR) {
+ LOG(ERROR) << "Failed to get buffer queue over binder.";
+ return nullptr;
+ }
+
+ sp<IBinder> binder;
+ error = reply.readNullableStrongBinder(&binder);
+ if (error != NO_ERROR) {
+ LOG(ERROR) << "Failed to get IGraphicBufferProducer over binder.";
+ return nullptr;
+ }
+
+ auto producer = interface_cast<IGraphicBufferProducer>(binder);
+ if (producer == nullptr) {
+ LOG(ERROR) << "Failed to get IGraphicBufferProducer over binder.";
+ return nullptr;
+ }
+
+ sp<Surface> surface = new Surface(producer, /*controlledByApp=*/true);
+
+ // Set buffer dimension.
+ ANativeWindow* window = static_cast<ANativeWindow*>(surface.get());
+ ANativeWindow_setBuffersGeometry(window, kBufferWidth, kBufferHeight,
+ kBufferFormat);
+
+ return surface;
+ }
+
+ private:
+ static Pipe CreateBinderServer() {
+ std::tuple<Pipe, Pipe> pipe_pair = Pipe::CreatePipePair();
+ pid_t pid = fork();
+ if (pid) {
+ // parent, i.e. the client side.
+ ProcessState::self()->startThreadPool();
+ LOG(INFO) << "Binder server pid: " << pid;
+ return std::move(std::get<0>(pipe_pair));
+ } else {
+ // child, i.e. the service side.
+ Pipe service_pipe = std::move(std::get<1>(pipe_pair));
+
+ ProcessState::self()->startThreadPool();
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<BufferTransportService> service = new BufferTransportService;
+ sm->addService(kBinderService, service, false);
+
+ LOG(INFO) << "Binder Service Running...";
+
+ service_pipe.Signal();
+ service_pipe.Wait();
+
+ LOG(INFO) << "Service Exiting...";
+ exit(EXIT_SUCCESS);
+
+ /* never get here */
+ return {};
+ }
+ }
+
+ sp<IBinder> service_;
+ Pipe client_pipe_;
+};
+
+// BufferHub/PDX-based buffer transport.
+//
+// On Start() a new thread will be swapned to run an epoll polling thread which
+// minics the behavior of a compositor. Similar to Binder-based backend, the
+// buffer available handler is also a no-op: Buffer gets acquired and released
+// immediately.
+// On CreateSurface() a pair of dvr::ProducerQueue and dvr::ConsumerQueue will
+// be created. The epoll thread holds on the consumer queue and dequeues buffer
+// from it; while the producer queue will be wrapped in a Surface and returned
+// to test suite.
+class BufferHubTransport : public BufferTransport {
+ public:
+ virtual ~BufferHubTransport() {
+ stopped_.store(true);
+ if (reader_thread_.joinable()) {
+ reader_thread_.join();
+ }
+ }
+
+ int Start() override {
+ int ret = epoll_fd_.Create();
+ if (ret < 0) {
+ LOG(ERROR) << "Failed to create epoll fd: %s", strerror(-ret);
+ return -1;
+ }
+
+ // Create the reader thread.
+ reader_thread_ = std::thread([this]() {
+ int ret = dvrSetSchedulerClass(0, "graphics");
+ if (ret < 0) {
+ LOG(ERROR) << "Failed to set thread priority";
+ return;
+ }
+
+
+ ret = dvrSetCpuPartition(0, "/system/performance");
+ if (ret < 0) {
+ LOG(ERROR) << "Failed to set thread cpu partition";
+ return;
+ }
+
+ stopped_.store(false);
+ LOG(INFO) << "Reader Thread Running...";
+
+ while (!stopped_.load()) {
+ std::array<epoll_event, kMaxQueueCounts> events;
+
+ // Don't sleep forever so that we will have a chance to wake up.
+ const int ret = epoll_fd_.Wait(events.data(), events.size(),
+ /*timeout=*/100);
+ if (ret < 0) {
+ LOG(ERROR) << "Error polling consumer queues.";
+ continue;
+ }
+ if (ret == 0) {
+ continue;
+ }
+
+ const int num_events = ret;
+ for (int i = 0; i < num_events; i++) {
+ uint32_t surface_index = events[i].data.u32;
+ // LOG(INFO) << "!!! handle queue events index: " << surface_index;
+ buffer_queues_[surface_index]->consumer_queue_->HandleQueueEvents();
+ }
+ }
+
+ LOG(INFO) << "Reader Thread Exiting...";
+ });
+
+ return 0;
+ }
+
+ sp<Surface> CreateSurface() override {
+ std::lock_guard<std::mutex> autolock(queue_mutex_);
+
+ auto new_queue = std::make_shared<BufferQueueHolder>();
+ if (new_queue->producer_ == nullptr) {
+ LOG(ERROR) << "Failed to create buffer producer.";
+ return nullptr;
+ }
+
+ sp<Surface> surface =
+ new Surface(new_queue->producer_, /*controlledByApp=*/true);
+
+ // Set buffer dimension.
+ ANativeWindow* window = static_cast<ANativeWindow*>(surface.get());
+ ANativeWindow_setBuffersGeometry(window, kBufferWidth, kBufferHeight,
+ kBufferFormat);
+
+ // Use the next position as buffer_queue index.
+ uint32_t index = buffer_queues_.size();
+ epoll_event event = {.events = EPOLLIN | EPOLLET, .data = {.u32 = index}};
+ const int ret = epoll_fd_.Control(
+ EPOLL_CTL_ADD, new_queue->consumer_queue_->queue_fd(), &event);
+ if (ret < 0) {
+ LOG(ERROR) << "Failed to track consumer queue: " << strerror(-ret)
+ << ", consumer queue fd: "
+ << new_queue->consumer_queue_->queue_fd();
+ return nullptr;
+ }
+
+ new_queue->queue_index_ = index;
+ buffer_queues_.push_back(new_queue);
+ return surface;
+ }
+
+ private:
+ struct BufferQueueHolder {
+ BufferQueueHolder() {
+ ProducerQueueConfigBuilder config_builder;
+ producer_queue_ =
+ ProducerQueue::Create(config_builder.SetDefaultWidth(kBufferWidth)
+ .SetDefaultHeight(kBufferHeight)
+ .SetDefaultFormat(kBufferFormat)
+ .SetMetadata<DvrNativeBufferMetadata>()
+ .Build(),
+ UsagePolicy{});
+ consumer_queue_ = producer_queue_->CreateConsumerQueue();
+ consumer_queue_->SetBufferAvailableCallback([this]() {
+ size_t index = 0;
+ pdx::LocalHandle fence;
+ DvrNativeBufferMetadata meta;
+ pdx::Status<std::shared_ptr<BufferConsumer>> status;
+
+ {
+ ATRACE_NAME("AcquireBuffer");
+ status = consumer_queue_->Dequeue(0, &index, &meta, &fence);
+ }
+ if (!status.ok()) {
+ LOG(ERROR) << "Failed to dequeue consumer buffer, error: "
+ << status.GetErrorMessage().c_str();
+ return;
+ }
+
+ auto buffer = status.take();
+
+ if (buffer) {
+ ATRACE_NAME("ReleaseBuffer");
+ buffer->ReleaseAsync();
+ }
+ });
+
+ producer_ = BufferHubQueueProducer::Create(producer_queue_);
+ }
+
+ int count_ = 0;
+ int queue_index_;
+ std::shared_ptr<ProducerQueue> producer_queue_;
+ std::shared_ptr<ConsumerQueue> consumer_queue_;
+ sp<IGraphicBufferProducer> producer_;
+ };
+
+ std::atomic<bool> stopped_;
+ std::thread reader_thread_;
+
+ // Mutex to guard epoll_fd_ and buffer_queues_.
+ std::mutex queue_mutex_;
+ EpollFileDescriptor epoll_fd_;
+ std::vector<std::shared_ptr<BufferQueueHolder>> buffer_queues_;
+};
+
+enum TransportType {
+ kBinderBufferTransport,
+ kBufferHubTransport,
+};
+
+// Main test suite, which supports two transport backend: 1) BinderBufferQueue,
+// 2) BufferHubQueue. The test case drives the producer end of both transport
+// backend by queuing buffers into the buffer queue by using ANativeWindow API.
+class BufferTransportBenchmark
+ : public ::testing::TestWithParam<TransportType> {
+ public:
+ void SetUp() override {
+ switch (GetParam()) {
+ case kBinderBufferTransport:
+ transport_.reset(new BinderBufferTransport);
+ break;
+ case kBufferHubTransport:
+ transport_.reset(new BufferHubTransport);
+ break;
+ default:
+ FAIL() << "Unknown test case.";
+ break;
+ }
+ }
+
+ protected:
+ void ProduceBuffers(sp<Surface> surface, int iterations, int sleep_usec) {
+ ANativeWindow* window = static_cast<ANativeWindow*>(surface.get());
+ ANativeWindow_Buffer buffer;
+ int32_t error = 0;
+
+ for (int i = 0; i < iterations; i++) {
+ usleep(sleep_usec);
+
+ {
+ ATRACE_NAME("GainBuffer");
+ error = ANativeWindow_lock(window, &buffer,
+ /*inOutDirtyBounds=*/nullptr);
+ }
+ ASSERT_EQ(error, 0);
+
+ {
+ ATRACE_NAME("PostBuffer");
+ error = ANativeWindow_unlockAndPost(window);
+ }
+ ASSERT_EQ(error, 0);
+ }
+ }
+
+ std::unique_ptr<BufferTransport> transport_;
+};
+
+TEST_P(BufferTransportBenchmark, ContinuousLoad) {
+ ASSERT_NE(transport_, nullptr);
+ const int ret = transport_->Start();
+ ASSERT_EQ(ret, 0);
+
+ LOG(INFO) << "Start Running.";
+
+ std::vector<std::thread> writer_threads;
+ for (int i = 0; i < gConcurrency; i++) {
+ std::thread writer_thread = std::thread([this]() {
+ sp<Surface> surface = transport_->CreateSurface();
+ ASSERT_NE(surface, nullptr);
+
+ ASSERT_NO_FATAL_FAILURE(
+ ProduceBuffers(surface, gIterations, gSleepIntervalUs));
+
+ usleep(1000 * 100);
+ });
+
+ writer_threads.push_back(std::move(writer_thread));
+ }
+
+ for (auto& writer_thread : writer_threads) {
+ writer_thread.join();
+ }
+
+ LOG(INFO) << "All done.";
+};
+
+INSTANTIATE_TEST_CASE_P(BufferTransportBenchmarkInstance,
+ BufferTransportBenchmark,
+ ::testing::ValuesIn({kBinderBufferTransport,
+ kBufferHubTransport}));
+
+// To run binder-based benchmark, use:
+// adb shell buffer_transport_benchmark \
+// --gtest_filter="BufferTransportBenchmark.ContinuousLoad/0"
+//
+// To run bufferhub-based benchmark, use:
+// adb shell buffer_transport_benchmark \
+// --gtest_filter="BufferTransportBenchmark.ContinuousLoad/1"
+int main(int argc, char** argv) {
+ bool tracing_enabled = false;
+
+ // Parse arguments in addition to "--gtest_filter" paramters.
+ for (int i = 1; i < argc; i++) {
+ if (std::string(argv[i]) == "--help") {
+ std::cout << "Usage: binderThroughputTest [OPTIONS]" << std::endl;
+ std::cout << "\t-c N: Specify number of concurrent writer threads, "
+ "(default: 1, max: 128)."
+ << std::endl;
+ std::cout << "\t-i N: Specify number of iterations, (default: 1000)."
+ << std::endl;
+ std::cout << "\t-s N: Specify sleep interval in usec, (default: 16000)."
+ << std::endl;
+ std::cout << "\t--trace: Enable systrace logging."
+ << std::endl;
+ return 0;
+ }
+ if (std::string(argv[i]) == "-c") {
+ gConcurrency = atoi(argv[i + 1]);
+ i++;
+ continue;
+ }
+ if (std::string(argv[i]) == "-s") {
+ gSleepIntervalUs = atoi(argv[i + 1]);
+ i++;
+ continue;
+ }
+ if (std::string(argv[i]) == "-i") {
+ gIterations = atoi(argv[i + 1]);
+ i++;
+ continue;
+ }
+ if (std::string(argv[i]) == "--trace") {
+ tracing_enabled = true;
+ continue;
+ }
+ }
+
+ // Setup ATRACE/systrace based on command line.
+ atrace_setup();
+ atrace_set_tracing_enabled(tracing_enabled);
+
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/services/surfaceflinger/DisplayHardware/ComposerHal.cpp b/services/surfaceflinger/DisplayHardware/ComposerHal.cpp
index 7d6d988..c707e3c 100644
--- a/services/surfaceflinger/DisplayHardware/ComposerHal.cpp
+++ b/services/surfaceflinger/DisplayHardware/ComposerHal.cpp
@@ -223,6 +223,10 @@
mWriter.reset();
}
+Error Composer::executeCommands() {
+ return execute();
+}
+
uint32_t Composer::getMaxVirtualDisplayCount()
{
auto ret = mClient->getMaxVirtualDisplayCount();
@@ -750,6 +754,11 @@
}
}
+ if (commandLength == 0) {
+ mWriter.reset();
+ return Error::NONE;
+ }
+
Error error = kDefaultError;
auto ret = mClient->executeCommands(commandLength, commandHandles,
[&](const auto& tmpError, const auto& tmpOutChanged,
diff --git a/services/surfaceflinger/DisplayHardware/ComposerHal.h b/services/surfaceflinger/DisplayHardware/ComposerHal.h
index 31a3c1d..104ca60 100644
--- a/services/surfaceflinger/DisplayHardware/ComposerHal.h
+++ b/services/surfaceflinger/DisplayHardware/ComposerHal.h
@@ -152,6 +152,9 @@
// skip a frame but have already queued some commands.
void resetCommands();
+ // Explicitly flush all pending commands in the command buffer.
+ Error executeCommands();
+
uint32_t getMaxVirtualDisplayCount();
bool isUsingVrComposer() const { return mIsUsingVrComposer; }
Error createVirtualDisplay(uint32_t width, uint32_t height,
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.cpp b/services/surfaceflinger/DisplayHardware/HWC2.cpp
index 78c0c85..ab4a4b2 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWC2.cpp
@@ -221,6 +221,11 @@
}
}
+Error Device::flushCommands()
+{
+ return static_cast<Error>(mComposer->executeCommands());
+}
+
// Display methods
Display::Display(android::Hwc2::Composer& composer,
@@ -632,11 +637,6 @@
return error;
}
-void Display::discardCommands()
-{
- mComposer.resetCommands();
-}
-
// For use by Device
void Display::setConnected(bool connected) {
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.h b/services/surfaceflinger/DisplayHardware/HWC2.h
index fbe4c7e..a15c6d9 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.h
+++ b/services/surfaceflinger/DisplayHardware/HWC2.h
@@ -106,6 +106,11 @@
android::Hwc2::Composer* getComposer() { return mComposer.get(); }
+ // We buffer most state changes and flush them implicitly with
+ // Display::validate, Display::present, and Display::presentOrValidate.
+ // This method provides an explicit way to flush state changes to HWC.
+ Error flushCommands();
+
private:
// Initialization methods
@@ -244,12 +249,6 @@
uint32_t* outNumRequests,
android::sp<android::Fence>* outPresentFence, uint32_t* state);
- // Most methods in this class write a command to a command buffer. The
- // command buffer is implicitly submitted in validate, present, and
- // presentOrValidate. This method provides a way to discard the commands,
- // which can be used to discard stale commands.
- void discardCommands();
-
// Other Display methods
hwc2_display_t getId() const { return mId; }
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.cpp b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
index b096a3a..5328a22 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
@@ -605,8 +605,11 @@
auto& hwcDisplay = displayData.hwcDisplay;
if (displayData.validateWasSkipped) {
- hwcDisplay->discardCommands();
- auto error = displayData.presentError;
+ // explicitly flush all pending commands
+ auto error = mHwcDevice->flushCommands();
+ if (displayData.presentError != HWC2::Error::None) {
+ error = displayData.presentError;
+ }
if (error != HWC2::Error::None) {
ALOGE("skipValidate: failed for display %d: %s (%d)",
displayId, to_string(error).c_str(), static_cast<int32_t>(error));
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 4a98252..54c040d 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -684,6 +684,12 @@
property_get("persist.sys.sf.color_saturation", value, "1.0");
mSaturation = atof(value);
ALOGV("Saturation is set to %.2f", mSaturation);
+
+ property_get("persist.sys.sf.native_mode", value, "0");
+ mForceNativeColorMode = atoi(value) == 1;
+ if (mForceNativeColorMode) {
+ ALOGV("Forcing native color mode");
+ }
}
void SurfaceFlinger::startBootAnim() {
@@ -1290,12 +1296,13 @@
break;
}
}
+ bool useWideColorMode = hasWideColorModes && hasWideColorDisplay && !mForceNativeColorMode;
sp<DisplayDevice> hw = new DisplayDevice(this, DisplayDevice::DISPLAY_PRIMARY, type, isSecure,
token, fbs, producer, mRenderEngine->getEGLConfig(),
- hasWideColorModes && hasWideColorDisplay);
+ useWideColorMode);
mDisplays.add(token, hw);
android_color_mode defaultColorMode = HAL_COLOR_MODE_NATIVE;
- if (hasWideColorModes && hasWideColorDisplay) {
+ if (useWideColorMode) {
defaultColorMode = HAL_COLOR_MODE_SRGB;
}
setActiveColorModeInternal(hw, defaultColorMode);
@@ -1812,6 +1819,10 @@
// pickColorMode translates a given dataspace into the best available color mode.
// Currently only support sRGB and Display-P3.
android_color_mode SurfaceFlinger::pickColorMode(android_dataspace dataSpace) const {
+ if (mForceNativeColorMode) {
+ return HAL_COLOR_MODE_NATIVE;
+ }
+
switch (dataSpace) {
// treat Unknown as regular SRGB buffer, since that's what the rest of the
// system expects.
@@ -2646,8 +2657,10 @@
ALOGV("hasClientComposition");
#ifdef USE_HWC2
- mRenderEngine->setWideColor(displayDevice->getWideColorSupport());
- mRenderEngine->setColorMode(displayDevice->getActiveColorMode());
+ mRenderEngine->setWideColor(
+ displayDevice->getWideColorSupport() && !mForceNativeColorMode);
+ mRenderEngine->setColorMode(mForceNativeColorMode ?
+ HAL_COLOR_MODE_NATIVE : displayDevice->getActiveColorMode());
#endif
if (!displayDevice->makeCurrent(mEGLDisplay, mEGLContext)) {
ALOGW("DisplayDevice::makeCurrent failed. Aborting surface composition for display %s",
@@ -3713,6 +3726,7 @@
void SurfaceFlinger::dumpWideColorInfo(String8& result) const {
result.appendFormat("hasWideColorDisplay: %d\n", hasWideColorDisplay);
+ result.appendFormat("forceNativeColorMode: %d\n", mForceNativeColorMode);
// TODO: print out if wide-color mode is active or not
@@ -4177,6 +4191,17 @@
repaintEverything();
return NO_ERROR;
}
+ case 1023: { // Set native mode
+ mForceNativeColorMode = data.readInt32() == 1;
+
+ invalidateHwcGeometry();
+ repaintEverything();
+ return NO_ERROR;
+ }
+ case 1024: { // Is wide color gamut rendering/color management supported?
+ reply->writeBool(hasWideColorDisplay);
+ return NO_ERROR;
+ }
}
}
return err;
@@ -4334,8 +4359,9 @@
WindowDisconnector disconnector(window, NATIVE_WINDOW_API_EGL);
ANativeWindowBuffer* buffer = nullptr;
- result = getWindowBuffer(window, reqWidth, reqHeight, hasWideColorDisplay,
- getRenderEngine().usesWideColor(), &buffer);
+ result = getWindowBuffer(window, reqWidth, reqHeight,
+ hasWideColorDisplay && !mForceNativeColorMode,
+ getRenderEngine().usesWideColor(), &buffer);
if (result != NO_ERROR) {
return result;
}
@@ -4437,8 +4463,8 @@
}
#ifdef USE_HWC2
- engine.setWideColor(hw->getWideColorSupport());
- engine.setColorMode(hw->getActiveColorMode());
+ engine.setWideColor(hw->getWideColorSupport() && !mForceNativeColorMode);
+ engine.setColorMode(mForceNativeColorMode ? HAL_COLOR_MODE_NATIVE : hw->getActiveColorMode());
#endif
// make sure to clear all GL error flags
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 9030124..bd98c8f 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -835,6 +835,7 @@
#endif
float mSaturation = 1.0f;
+ bool mForceNativeColorMode = false;
};
}; // namespace android
diff --git a/vulkan/nulldrv/null_driver.cpp b/vulkan/nulldrv/null_driver.cpp
index c701d48..e2d5c83 100644
--- a/vulkan/nulldrv/null_driver.cpp
+++ b/vulkan/nulldrv/null_driver.cpp
@@ -344,9 +344,14 @@
VkResult EnumeratePhysicalDevices(VkInstance instance,
uint32_t* physical_device_count,
VkPhysicalDevice* physical_devices) {
- if (physical_devices && *physical_device_count >= 1)
+ if (!physical_devices)
+ *physical_device_count = 1;
+ else if (*physical_device_count == 0)
+ return VK_INCOMPLETE;
+ else {
physical_devices[0] = &instance->physical_device;
- *physical_device_count = 1;
+ *physical_device_count = 1;
+ }
return VK_SUCCESS;
}