Merge changes from topic "c2-aidl-fence" into main am: cec0166e49

Original change: https://android-review.googlesource.com/c/platform/frameworks/av/+/2752997

Change-Id: I013857db5f9500bebe9867ac5ecd246f0458dbeb
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/media/codec2/hal/client/GraphicBufferAllocator.cpp b/media/codec2/hal/client/GraphicBufferAllocator.cpp
index bbef1b5..7045537 100644
--- a/media/codec2/hal/client/GraphicBufferAllocator.cpp
+++ b/media/codec2/hal/client/GraphicBufferAllocator.cpp
@@ -62,14 +62,12 @@
     return ::ndk::ScopedAStatus::ok();
 }
 
-::ndk::ScopedAStatus GraphicBufferAllocator::getWaitableFds(
-        IGraphicBufferAllocator::WaitableFds* _aidl_return) {
-    int allocFd;
-    int statusFd;
-    c2_status_t ret = mGraphicsTracker->getWaitableFds(&allocFd, &statusFd);
+::ndk::ScopedAStatus GraphicBufferAllocator::getWaitableFd(
+        ::ndk::ScopedFileDescriptor* _aidl_return) {
+    int pipeFd;
+    c2_status_t ret = mGraphicsTracker->getWaitableFd(&pipeFd);
     if (ret == C2_OK) {
-        _aidl_return->allocEvent.set(allocFd);
-        _aidl_return->statusEvent.set(statusFd);
+        _aidl_return->set(pipeFd);
         return ::ndk::ScopedAStatus::ok();
     }
     return ::ndk::ScopedAStatus::fromServiceSpecificError(ret);
diff --git a/media/codec2/hal/client/GraphicsTracker.cpp b/media/codec2/hal/client/GraphicsTracker.cpp
index 5a2cb86..2424f7b 100644
--- a/media/codec2/hal/client/GraphicsTracker.cpp
+++ b/media/codec2/hal/client/GraphicsTracker.cpp
@@ -13,7 +13,8 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-#include <sys/eventfd.h>
+#include <fcntl.h>
+#include <unistd.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <private/android/AHardwareBufferHelpers.h>
@@ -25,6 +26,9 @@
 
 namespace {
 
+static constexpr int kMaxDequeueMin = 1;
+static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
+
 c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
     // TODO
     (void)blk;
@@ -139,21 +143,26 @@
     mDequeueable{maxDequeueCount},
     mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
     mInConfig{false}, mStopped{false} {
-    if (maxDequeueCount <= 0) {
-        mMaxDequeue = kDefaultMaxDequeue;
-        mMaxDequeueRequested = kDefaultMaxDequeue;
-        mMaxDequeueCommitted = kDefaultMaxDequeue;
-        mDequeueable = kDefaultMaxDequeue;
+    if (maxDequeueCount < kMaxDequeueMin) {
+        mMaxDequeue = kMaxDequeueMin;
+        mMaxDequeueRequested = kMaxDequeueMin;
+        mMaxDequeueCommitted = kMaxDequeueMin;
+        mDequeueable = kMaxDequeueMin;
+    } else if(maxDequeueCount > kMaxDequeueMax) {
+        mMaxDequeue = kMaxDequeueMax;
+        mMaxDequeueRequested = kMaxDequeueMax;
+        mMaxDequeueCommitted = kMaxDequeueMax;
+        mDequeueable = kMaxDequeueMax;
     }
-    int allocEventFd = ::eventfd(mDequeueable, EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE);
-    int statusEventFd = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+    int pipefd[2] = { -1, -1};
+    int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
 
-    mAllocEventFd.reset(allocEventFd);
-    mStopEventFd.reset(statusEventFd);
+    mReadPipeFd.reset(pipefd[0]);
+    mWritePipeFd.reset(pipefd[1]);
 
     mEventQueueThread = std::thread([this](){processEvent();});
 
-    CHECK(allocEventFd >= 0 && statusEventFd >= 0);
+    CHECK(ret >= 0);
     CHECK(mEventQueueThread.joinable());
 }
 
@@ -161,7 +170,6 @@
     stop();
     if (mEventQueueThread.joinable()) {
         std::unique_lock<std::mutex> l(mEventLock);
-        mStopEventThread = true;
         l.unlock();
         mEventCv.notify_one();
         mEventQueueThread.join();
@@ -231,6 +239,11 @@
 c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
     std::shared_ptr<BufferCache> cache;
 
+    if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
+        ALOGE("max dequeue count %d is not valid", maxDequeueCount);
+        return C2_BAD_VALUE;
+    }
+
     // max dequeue count which can be committed to IGBP.
     // (Sometimes maxDequeueCount cannot be committed if the number of
     // dequeued buffer count is bigger.)
@@ -347,89 +360,76 @@
 
 void GraphicsTracker::stop() {
     bool expected = false;
+    std::unique_lock<std::mutex> l(mEventLock);
     bool updated = mStopped.compare_exchange_strong(expected, true);
     if (updated) {
-        uint64_t val = 1ULL;
-        int ret = ::write(mStopEventFd.get(), &val, 8);
-        if (ret < 0) {
-            // EINTR maybe
-            std::unique_lock<std::mutex> l(mEventLock);
-            mStopRequest = true;
-            l.unlock();
-            mEventCv.notify_one();
-            ALOGW("stop() status update pending");
-        }
+        int writeFd = mWritePipeFd.release();
+        ::close(writeFd);
     }
 }
 
 void GraphicsTracker::writeIncDequeueable(int inc) {
-    uint64_t val = inc;
-    int ret = ::write(mAllocEventFd.get(), &val, 8);
-    if (ret < 0) {
-        // EINTR due to signal handling maybe, this should be rare
+    CHECK(inc > 0 && inc < kMaxDequeueMax);
+    thread_local char buf[kMaxDequeueMax];
+    int diff = 0;
+    {
         std::unique_lock<std::mutex> l(mEventLock);
-        mIncDequeueable += inc;
-        l.unlock();
-        mEventCv.notify_one();
-        ALOGW("updating dequeueable to eventfd pending");
+        if (mStopped) {
+            return;
+        }
+        CHECK(mWritePipeFd.get() >= 0);
+        int ret = ::write(mWritePipeFd.get(), buf, inc);
+        if (ret == inc) {
+            return;
+        }
+        diff = ret < 0 ? inc : inc - ret;
+
+        // Partial write or EINTR. This will not happen in a real scenario.
+        mIncDequeueable += diff;
+        if (mIncDequeueable > 0) {
+            l.unlock();
+            mEventCv.notify_one();
+            ALOGW("updating dequeueable to pipefd pending");
+        }
     }
 }
 
 void GraphicsTracker::processEvent() {
-    // This is for write() failure of eventfds.
-    // write() failure other than EINTR should not happen.
-    int64_t acc = 0;
-    bool stopRequest = false;
-    bool stopCommitted = false;
-
+    // This is for partial/failed writes to the writing end.
+    // This may not happen in the real scenario.
+    thread_local char buf[kMaxDequeueMax];
     while (true) {
-        {
-            std::unique_lock<std::mutex> l(mEventLock);
-            acc += mIncDequeueable;
-            mIncDequeueable = 0;
-            stopRequest |= mStopRequest;
-            mStopRequest = false;
-            if (acc == 0 && stopRequest == stopCommitted) {
-                if (mStopEventThread) {
-                    break;
+        std::unique_lock<std::mutex> l(mEventLock);
+        if (mStopped) {
+            break;
+        }
+        if (mIncDequeueable > 0) {
+            int inc = mIncDequeueable > kMaxDequeueMax ? kMaxDequeueMax : mIncDequeueable;
+            int ret = ::write(mWritePipeFd.get(), buf, inc);
+            int written = ret <= 0 ? 0 : ret;
+            mIncDequeueable -= written;
+            if (mIncDequeueable > 0) {
+                l.unlock();
+                if (ret < 0) {
+                    ALOGE("write to writing end failed %d", errno);
+                } else {
+                    ALOGW("partial write %d(%d)", inc, written);
                 }
-                mEventCv.wait(l);
                 continue;
             }
         }
-
-        if (acc > 0) {
-            int ret = ::write(mAllocEventFd.get(), &acc, 8);
-            if (ret > 0) {
-                acc = 0;
-            }
-        }
-        if (stopRequest && !stopCommitted) {
-            uint64_t val = 1ULL;
-            int ret = ::write(mStopEventFd.get(), &val, 8);
-            if (ret > 0) {
-                stopCommitted = true;
-            }
-        }
-        if (mStopEventThread) {
-            break;
-        }
+        mEventCv.wait(l);
     }
 }
 
-c2_status_t GraphicsTracker::getWaitableFds(int *allocFd, int *statusFd) {
-    *allocFd = ::dup(mAllocEventFd.get());
-    *statusFd = ::dup(mStopEventFd.get());
-
-    if (*allocFd < 0 || *statusFd < 0) {
-        if (*allocFd >= 0) {
-            ::close(*allocFd);
-            *allocFd = -1;
+c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
+    *pipeFd = ::dup(mReadPipeFd.get());
+    if (*pipeFd < 0) {
+        if (mReadPipeFd.get() < 0) {
+            return C2_BAD_STATE;
         }
-        if (*statusFd >= 0) {
-            ::close(*statusFd);
-            *statusFd = -1;
-        }
+        // dup error
+        ALOGE("dup() for the reading end failed %d", errno);
         return C2_NO_MEMORY;
     }
     return C2_OK;
@@ -438,8 +438,8 @@
 c2_status_t GraphicsTracker::requestAllocate(std::shared_ptr<BufferCache> *cache) {
     std::lock_guard<std::mutex> l(mLock);
     if (mDequeueable > 0) {
-        uint64_t val;
-        int ret = ::read(mAllocEventFd.get(), &val, 8);
+        char buf[1];
+        int ret = ::read(mReadPipeFd.get(), buf, 1);
         if (ret < 0) {
             if (errno == EINTR) {
                 // Do we really need to care for cancel due to signal handling?
@@ -452,6 +452,10 @@
             }
             CHECK(errno != 0);
         }
+        if (ret == 0) {
+            // writing end is closed
+            return C2_BAD_STATE;
+        }
         mDequeueable--;
         *cache = mBufferCache;
         return C2_OK;
diff --git a/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h b/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h
index f9c8aca..902c53f 100644
--- a/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h
+++ b/media/codec2/hal/client/include/codec2/aidl/GraphicBufferAllocator.h
@@ -38,8 +38,8 @@
 
     ::ndk::ScopedAStatus deallocate(int64_t in_id, bool* _aidl_return) override;
 
-    ::ndk::ScopedAStatus getWaitableFds(
-            IGraphicBufferAllocator::WaitableFds* _aidl_return) override;
+    ::ndk::ScopedAStatus getWaitableFd(
+            ::ndk::ScopedFileDescriptor* _aidl_return) override;
 
     /**
      * Configuring Surface/BufferQueue for the interface.
diff --git a/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h b/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
index 681b7e8..1fd9049 100644
--- a/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
+++ b/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
@@ -142,15 +142,15 @@
     void onReleased(uint32_t generation);
 
     /**
-     * Get waitable fds for events.(allocate is ready, end of life cycle)
+     * Get waitable fd for events.(allocate is ready, end of life cycle)
      *
-     * @param[out]  allocFd     eventFd which signals being ready to allocate
-     * @param[out]  statusFd    eventFd which signals end of life cycle.
-     *                          When signaled no more allocate is possible.
+     * @param[out]  pipeFd      a file descriptor created from pipe2()
+     *                          in order for notifying being ready to allocate
+     *
      * @return  C2_OK
      *          C2_NO_MEMORY    Max # of fd reached.(not really a memory issue)
      */
-    c2_status_t getWaitableFds(int *allocFd, int *statusFd);
+    c2_status_t getWaitableFd(int *pipeFd);
 
     /**
      *  Ends to use the class. after the call, allocate will fail.
@@ -158,8 +158,6 @@
     void stop();
 
 private:
-    static constexpr int kDefaultMaxDequeue = 2;
-
     struct BufferCache;
 
     struct BufferItem {
@@ -246,21 +244,30 @@
     std::mutex mLock; // locks for data synchronization
     std::mutex mConfigLock; // locks for configuration change.
 
+    // NOTE: pipe2() creates two file descriptors for allocatable events
+    // and irrecoverable error events notification.
+    //
+    // A byte will be written to the writing end whenever a buffer is ready to
+    // dequeue/allocate. A byte will be read from the reading end whenever
+    // an allocate/dequeue event happens.
+    //
+    // The writing end will be closed when the end-of-lifecycle event was met.
+    //
+    // The reading end will be shared to the remote processes. Remote processes
+    // use ::poll() to check whether a buffer is ready to allocate/ready.
+    // Also ::poll() will let remote processes know the end-of-lifecycle event
+    // by returning POLLHUP event from the reading end.
+    ::android::base::unique_fd mReadPipeFd;   // The reading end file descriptor
+    ::android::base::unique_fd mWritePipeFd;  // The writing end file descriptor
+
     std::atomic<bool> mStopped;
-
-    ::android::base::unique_fd mAllocEventFd; // eventfd in semaphore mode which
-                                              // mirrors mDqueueable.
-    ::android::base::unique_fd mStopEventFd; // eventfd which indicates the life
-                                             // cycle of the class being stopped.
-
     std::thread mEventQueueThread; // Thread to handle interrupted
-                                   // writes to eventfd{s}.
+                                   // writes to the writing end.
     std::mutex mEventLock;
     std::condition_variable mEventCv;
 
     bool mStopEventThread;
     int mIncDequeueable; // pending # of write to increase dequeueable eventfd
-    bool mStopRequest; // pending write to statusfd
 
 private:
     explicit GraphicsTracker(int maxDequeueCount);
diff --git a/media/codec2/vndk/C2Fence.cpp b/media/codec2/vndk/C2Fence.cpp
index 0344fd3..d40b469 100644
--- a/media/codec2/vndk/C2Fence.cpp
+++ b/media/codec2/vndk/C2Fence.cpp
@@ -16,6 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "C2FenceFactory"
+#include <poll.h>
+
+#include <android-base/unique_fd.h>
 #include <cutils/native_handle.h>
 #include <utils/Log.h>
 #include <ui/Fence.h>
@@ -32,6 +35,7 @@
         NULL_FENCE,
         SURFACE_FENCE,
         SYNC_FENCE,
+        PIPE_FENCE,
     };
 
     virtual c2_status_t wait(c2_nsecs_t timeoutNs) = 0;
@@ -353,6 +357,154 @@
     return C2Fence(p);
 }
 
+/**
+ * Fence implementation for notifying # of events available based on
+ * file descriptors created by pipe()/pipe2(). The writing end of the
+ * file descriptors is used to create the implementation.
+ * The implementation supports all C2Fence interface.
+ */
+class _C2FenceFactory::PipeFenceImpl: public C2Fence::Impl {
+private:
+    bool waitEvent(c2_nsecs_t timeoutNs, bool *hangUp, bool *event) const {
+        if (!mValid) {
+            *hangUp = true;
+            return true;
+        }
+
+        struct pollfd pfd;
+        pfd.fd = mPipeFd.get();
+        pfd.events = POLLIN;
+        pfd.revents = 0;
+        struct timespec ts;
+        if (timeoutNs >= 0) {
+            ts.tv_sec = int(timeoutNs / 1000000000);
+            ts.tv_nsec = timeoutNs;
+        } else {
+            ALOGD("polling for indefinite duration requested, but changed to wait for %d sec",
+                  kPipeFenceWaitLimitSecs);
+            ts.tv_sec = kPipeFenceWaitLimitSecs;
+            ts.tv_nsec = 0;
+        }
+        int ret = ::ppoll(&pfd, 1, &ts, nullptr);
+        if (ret >= 0) {
+            if (pfd.revents) {
+                if (pfd.revents & ~POLLIN) {
+                    // Mostly this means the writing end fd was closed.
+                    *hangUp = true;
+                    mValid = false;
+                    ALOGD("PipeFenceImpl: pipe fd hangup or err event returned");
+                }
+                *event = true;
+                return true;
+            }
+            // event not ready yet.
+            return true;
+        }
+        if (errno == EINTR) {
+            // poll() was cancelled by signal or inner kernel status.
+            return false;
+        }
+        // Since poll error happened here, treat the error is irrecoverable.
+        ALOGE("PipeFenceImpl: poll() error %d", errno);
+        *hangUp = true;
+        mValid = false;
+        return true;
+    }
+
+public:
+    virtual c2_status_t wait(c2_nsecs_t timeoutNs) {
+        if (!mValid) {
+            return C2_BAD_STATE;
+        }
+        bool hangUp = false;
+        bool event = false;
+        if (waitEvent(timeoutNs, &hangUp, &event)) {
+            if (hangUp) {
+                return C2_BAD_STATE;
+            }
+            if (event) {
+                return C2_OK;
+            }
+            return C2_TIMED_OUT;
+        } else {
+            return C2_CANCELED;
+        }
+    }
+
+    virtual bool valid() const {
+        if (!mValid) {
+            return false;
+        }
+        bool hangUp = false;
+        bool event = false;
+        if (waitEvent(0, &event, &event)) {
+            if (hangUp) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    virtual bool ready() const {
+        if (!mValid) {
+            return false;
+        }
+        bool hangUp = false;
+        bool event = false;
+        if (waitEvent(0, &hangUp, &event)) {
+            if (event) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    virtual int fd() const {
+        if (!mValid) {
+            return -1;
+        }
+        return ::dup(mPipeFd.get());
+    }
+
+    virtual bool isHW() const {
+        return false;
+    }
+
+    virtual type_t type() const {
+        return PIPE_FENCE;
+    }
+
+    virtual native_handle_t *createNativeHandle() const {
+        // This is not supported.
+        return nullptr;
+    }
+
+    virtual ~PipeFenceImpl() = default;
+
+    PipeFenceImpl(int fd) : mPipeFd(fd) {
+        mValid = (mPipeFd.get() >= 0);
+    }
+
+private:
+    friend struct _C2FenceFactory;
+    static constexpr int kPipeFenceWaitLimitSecs = 5;
+
+    mutable std::atomic<bool> mValid;
+    ::android::base::unique_fd mPipeFd;
+};
+
+C2Fence _C2FenceFactory::CreatePipeFence(int fd) {
+    std::shared_ptr<_C2FenceFactory::PipeFenceImpl> impl =
+        std::make_shared<_C2FenceFactory::PipeFenceImpl>(fd);
+    std::shared_ptr<C2Fence::Impl> p = std::static_pointer_cast<C2Fence::Impl>(impl);
+    if (!p) {
+        ALOGE("PipeFence creation failure");
+    } else if (!impl->mValid) {
+        p.reset();
+    }
+    return C2Fence(p);
+}
+
 native_handle_t* _C2FenceFactory::CreateNativeHandle(const C2Fence& fence) {
     return fence.mImpl? fence.mImpl->createNativeHandle() : nullptr;
 }
diff --git a/media/codec2/vndk/include/C2FenceFactory.h b/media/codec2/vndk/include/C2FenceFactory.h
index ef25c47..9b09980 100644
--- a/media/codec2/vndk/include/C2FenceFactory.h
+++ b/media/codec2/vndk/include/C2FenceFactory.h
@@ -39,6 +39,7 @@
 
     class SurfaceFenceImpl;
     class SyncFenceImpl;
+    class PipeFenceImpl;
 
     /*
      * Create C2Fence for BufferQueueBased blockpool.
@@ -66,6 +67,15 @@
      */
     static C2Fence CreateMultipleFdSyncFence(const std::vector<int>& fenceFds);
 
+    /*
+     * Create C2Fence from an fd created by pipe()/pipe2() syscall.
+     *
+     * \param fd                An fd representing the write end from a pair of
+     *                          file descriptors which are created by
+     *                          pipe()/pipe2() syscall.
+     */
+    static C2Fence CreatePipeFence(int fd);
+
     /**
      * Create a native handle from fence for marshalling
      *