media.c2 aidl: Use pipe() based fd for waitable object
Use pipe() based single fd instead of eventfd() based pair of fds.
Bug: 254050314
Test: m
Change-Id: I4cfcba4b9a2ce52133eacb89e771f31a06d22d4b
diff --git a/media/codec2/hal/client/GraphicBufferAllocator.cpp b/media/codec2/hal/client/GraphicBufferAllocator.cpp
index bbef1b5..7045537 100644
--- a/media/codec2/hal/client/GraphicBufferAllocator.cpp
+++ b/media/codec2/hal/client/GraphicBufferAllocator.cpp
@@ -62,14 +62,12 @@
return ::ndk::ScopedAStatus::ok();
}
-::ndk::ScopedAStatus GraphicBufferAllocator::getWaitableFds(
- IGraphicBufferAllocator::WaitableFds* _aidl_return) {
- int allocFd;
- int statusFd;
- c2_status_t ret = mGraphicsTracker->getWaitableFds(&allocFd, &statusFd);
+::ndk::ScopedAStatus GraphicBufferAllocator::getWaitableFd(
+ ::ndk::ScopedFileDescriptor* _aidl_return) {
+ int pipeFd;
+ c2_status_t ret = mGraphicsTracker->getWaitableFd(&pipeFd);
if (ret == C2_OK) {
- _aidl_return->allocEvent.set(allocFd);
- _aidl_return->statusEvent.set(statusFd);
+ _aidl_return->set(pipeFd);
return ::ndk::ScopedAStatus::ok();
}
return ::ndk::ScopedAStatus::fromServiceSpecificError(ret);
diff --git a/media/codec2/hal/client/GraphicsTracker.cpp b/media/codec2/hal/client/GraphicsTracker.cpp
index 5a2cb86..2424f7b 100644
--- a/media/codec2/hal/client/GraphicsTracker.cpp
+++ b/media/codec2/hal/client/GraphicsTracker.cpp
@@ -13,7 +13,8 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include <sys/eventfd.h>
+#include <fcntl.h>
+#include <unistd.h>
#include <media/stagefright/foundation/ADebug.h>
#include <private/android/AHardwareBufferHelpers.h>
@@ -25,6 +26,9 @@
namespace {
+static constexpr int kMaxDequeueMin = 1;
+static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
+
c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
// TODO
(void)blk;
@@ -139,21 +143,26 @@
mDequeueable{maxDequeueCount},
mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
mInConfig{false}, mStopped{false} {
- if (maxDequeueCount <= 0) {
- mMaxDequeue = kDefaultMaxDequeue;
- mMaxDequeueRequested = kDefaultMaxDequeue;
- mMaxDequeueCommitted = kDefaultMaxDequeue;
- mDequeueable = kDefaultMaxDequeue;
+ if (maxDequeueCount < kMaxDequeueMin) {
+ mMaxDequeue = kMaxDequeueMin;
+ mMaxDequeueRequested = kMaxDequeueMin;
+ mMaxDequeueCommitted = kMaxDequeueMin;
+ mDequeueable = kMaxDequeueMin;
+ } else if(maxDequeueCount > kMaxDequeueMax) {
+ mMaxDequeue = kMaxDequeueMax;
+ mMaxDequeueRequested = kMaxDequeueMax;
+ mMaxDequeueCommitted = kMaxDequeueMax;
+ mDequeueable = kMaxDequeueMax;
}
- int allocEventFd = ::eventfd(mDequeueable, EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE);
- int statusEventFd = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ int pipefd[2] = { -1, -1};
+ int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
- mAllocEventFd.reset(allocEventFd);
- mStopEventFd.reset(statusEventFd);
+ mReadPipeFd.reset(pipefd[0]);
+ mWritePipeFd.reset(pipefd[1]);
mEventQueueThread = std::thread([this](){processEvent();});
- CHECK(allocEventFd >= 0 && statusEventFd >= 0);
+ CHECK(ret >= 0);
CHECK(mEventQueueThread.joinable());
}
@@ -161,7 +170,6 @@
stop();
if (mEventQueueThread.joinable()) {
std::unique_lock<std::mutex> l(mEventLock);
- mStopEventThread = true;
l.unlock();
mEventCv.notify_one();
mEventQueueThread.join();
@@ -231,6 +239,11 @@
c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
std::shared_ptr<BufferCache> cache;
+ if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
+ ALOGE("max dequeue count %d is not valid", maxDequeueCount);
+ return C2_BAD_VALUE;
+ }
+
// max dequeue count which can be committed to IGBP.
// (Sometimes maxDequeueCount cannot be committed if the number of
// dequeued buffer count is bigger.)
@@ -347,89 +360,76 @@
void GraphicsTracker::stop() {
bool expected = false;
+ std::unique_lock<std::mutex> l(mEventLock);
bool updated = mStopped.compare_exchange_strong(expected, true);
if (updated) {
- uint64_t val = 1ULL;
- int ret = ::write(mStopEventFd.get(), &val, 8);
- if (ret < 0) {
- // EINTR maybe
- std::unique_lock<std::mutex> l(mEventLock);
- mStopRequest = true;
- l.unlock();
- mEventCv.notify_one();
- ALOGW("stop() status update pending");
- }
+ int writeFd = mWritePipeFd.release();
+ ::close(writeFd);
}
}
void GraphicsTracker::writeIncDequeueable(int inc) {
- uint64_t val = inc;
- int ret = ::write(mAllocEventFd.get(), &val, 8);
- if (ret < 0) {
- // EINTR due to signal handling maybe, this should be rare
+ CHECK(inc > 0 && inc < kMaxDequeueMax);
+ thread_local char buf[kMaxDequeueMax];
+ int diff = 0;
+ {
std::unique_lock<std::mutex> l(mEventLock);
- mIncDequeueable += inc;
- l.unlock();
- mEventCv.notify_one();
- ALOGW("updating dequeueable to eventfd pending");
+ if (mStopped) {
+ return;
+ }
+ CHECK(mWritePipeFd.get() >= 0);
+ int ret = ::write(mWritePipeFd.get(), buf, inc);
+ if (ret == inc) {
+ return;
+ }
+ diff = ret < 0 ? inc : inc - ret;
+
+ // Partial write or EINTR. This will not happen in a real scenario.
+ mIncDequeueable += diff;
+ if (mIncDequeueable > 0) {
+ l.unlock();
+ mEventCv.notify_one();
+ ALOGW("updating dequeueable to pipefd pending");
+ }
}
}
void GraphicsTracker::processEvent() {
- // This is for write() failure of eventfds.
- // write() failure other than EINTR should not happen.
- int64_t acc = 0;
- bool stopRequest = false;
- bool stopCommitted = false;
-
+ // This is for partial/failed writes to the writing end.
+ // This may not happen in the real scenario.
+ thread_local char buf[kMaxDequeueMax];
while (true) {
- {
- std::unique_lock<std::mutex> l(mEventLock);
- acc += mIncDequeueable;
- mIncDequeueable = 0;
- stopRequest |= mStopRequest;
- mStopRequest = false;
- if (acc == 0 && stopRequest == stopCommitted) {
- if (mStopEventThread) {
- break;
+ std::unique_lock<std::mutex> l(mEventLock);
+ if (mStopped) {
+ break;
+ }
+ if (mIncDequeueable > 0) {
+ int inc = mIncDequeueable > kMaxDequeueMax ? kMaxDequeueMax : mIncDequeueable;
+ int ret = ::write(mWritePipeFd.get(), buf, inc);
+ int written = ret <= 0 ? 0 : ret;
+ mIncDequeueable -= written;
+ if (mIncDequeueable > 0) {
+ l.unlock();
+ if (ret < 0) {
+ ALOGE("write to writing end failed %d", errno);
+ } else {
+ ALOGW("partial write %d(%d)", inc, written);
}
- mEventCv.wait(l);
continue;
}
}
-
- if (acc > 0) {
- int ret = ::write(mAllocEventFd.get(), &acc, 8);
- if (ret > 0) {
- acc = 0;
- }
- }
- if (stopRequest && !stopCommitted) {
- uint64_t val = 1ULL;
- int ret = ::write(mStopEventFd.get(), &val, 8);
- if (ret > 0) {
- stopCommitted = true;
- }
- }
- if (mStopEventThread) {
- break;
- }
+ mEventCv.wait(l);
}
}
-c2_status_t GraphicsTracker::getWaitableFds(int *allocFd, int *statusFd) {
- *allocFd = ::dup(mAllocEventFd.get());
- *statusFd = ::dup(mStopEventFd.get());
-
- if (*allocFd < 0 || *statusFd < 0) {
- if (*allocFd >= 0) {
- ::close(*allocFd);
- *allocFd = -1;
+c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
+ *pipeFd = ::dup(mReadPipeFd.get());
+ if (*pipeFd < 0) {
+ if (mReadPipeFd.get() < 0) {
+ return C2_BAD_STATE;
}
- if (*statusFd >= 0) {
- ::close(*statusFd);
- *statusFd = -1;
- }
+ // dup error
+ ALOGE("dup() for the reading end failed %d", errno);
return C2_NO_MEMORY;
}
return C2_OK;
@@ -438,8 +438,8 @@
c2_status_t GraphicsTracker::requestAllocate(std::shared_ptr<BufferCache> *cache) {
std::lock_guard<std::mutex> l(mLock);
if (mDequeueable > 0) {
- uint64_t val;
- int ret = ::read(mAllocEventFd.get(), &val, 8);
+ char buf[1];
+ int ret = ::read(mReadPipeFd.get(), buf, 1);
if (ret < 0) {
if (errno == EINTR) {
// Do we really need to care for cancel due to signal handling?
@@ -452,6 +452,10 @@
}
CHECK(errno != 0);
}
+ if (ret == 0) {
+ // writing end is closed
+ return C2_BAD_STATE;
+ }
mDequeueable--;
*cache = mBufferCache;
return C2_OK;
diff --git a/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h b/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h
index f9c8aca..902c53f 100644
--- a/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h
+++ b/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h
@@ -38,8 +38,8 @@
::ndk::ScopedAStatus deallocate(int64_t in_id, bool* _aidl_return) override;
- ::ndk::ScopedAStatus getWaitableFds(
- IGraphicBufferAllocator::WaitableFds* _aidl_return) override;
+ ::ndk::ScopedAStatus getWaitableFd(
+ ::ndk::ScopedFileDescriptor* _aidl_return) override;
/**
* Configuring Surface/BufferQueue for the interface.
diff --git a/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h b/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
index 681b7e8..1fd9049 100644
--- a/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
+++ b/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
@@ -142,15 +142,15 @@
void onReleased(uint32_t generation);
/**
- * Get waitable fds for events.(allocate is ready, end of life cycle)
+ * Get waitable fd for events.(allocate is ready, end of life cycle)
*
- * @param[out] allocFd eventFd which signals being ready to allocate
- * @param[out] statusFd eventFd which signals end of life cycle.
- * When signaled no more allocate is possible.
+ * @param[out] pipeFd a file descriptor created from pipe2()
+ * in order for notifying being ready to allocate
+ *
* @return C2_OK
* C2_NO_MEMORY Max # of fd reached.(not really a memory issue)
*/
- c2_status_t getWaitableFds(int *allocFd, int *statusFd);
+ c2_status_t getWaitableFd(int *pipeFd);
/**
* Ends to use the class. after the call, allocate will fail.
@@ -158,8 +158,6 @@
void stop();
private:
- static constexpr int kDefaultMaxDequeue = 2;
-
struct BufferCache;
struct BufferItem {
@@ -246,21 +244,30 @@
std::mutex mLock; // locks for data synchronization
std::mutex mConfigLock; // locks for configuration change.
+ // NOTE: pipe2() creates two file descriptors for allocatable events
+ // and irrecoverable error events notification.
+ //
+ // A byte will be written to the writing end whenever a buffer is ready to
+ // dequeue/allocate. A byte will be read from the reading end whenever
+ // an allocate/dequeue event happens.
+ //
+ // The writing end will be closed when the end-of-lifecycle event was met.
+ //
+ // The reading end will be shared to the remote processes. Remote processes
+ // use ::poll() to check whether a buffer is ready to allocate/ready.
+ // Also ::poll() will let remote processes know the end-of-lifecycle event
+ // by returning POLLHUP event from the reading end.
+ ::android::base::unique_fd mReadPipeFd; // The reading end file descriptor
+ ::android::base::unique_fd mWritePipeFd; // The writing end file descriptor
+
std::atomic<bool> mStopped;
-
- ::android::base::unique_fd mAllocEventFd; // eventfd in semaphore mode which
- // mirrors mDqueueable.
- ::android::base::unique_fd mStopEventFd; // eventfd which indicates the life
- // cycle of the class being stopped.
-
std::thread mEventQueueThread; // Thread to handle interrupted
- // writes to eventfd{s}.
+ // writes to the writing end.
std::mutex mEventLock;
std::condition_variable mEventCv;
bool mStopEventThread;
int mIncDequeueable; // pending # of write to increase dequeueable eventfd
- bool mStopRequest; // pending write to statusfd
private:
explicit GraphicsTracker(int maxDequeueCount);