Merge changes from topic "c2-aidl-test" into main

* changes:
  media.c2 aidl: Handle Gralloc buffer conversion from C2HandleAhwb
  media.c2 aidl: minor fixes
  c2 hal: handle C2_BAD_INDEX
  media.c2 VTS: modify video tests for AIDL
  media.c2 aidl: Add a test for GraphicsTracker
  media.c2 aidl GraphicsTracker: Synchronize mDequeueable and pipe fds
  media.c2 aidl: fix GraphicsTracker bugs and add logs
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 8a894f3..b911e11 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -25,5 +25,8 @@
         }
       ]
     }
+  ],
+  "postsubmit": [
+    { "name": "c2aidl_gtracker_test"}
   ]
 }
diff --git a/media/codec2/hal/aidl/Configurable.cpp b/media/codec2/hal/aidl/Configurable.cpp
index 051e38a..2daaac2 100644
--- a/media/codec2/hal/aidl/Configurable.cpp
+++ b/media/codec2/hal/aidl/Configurable.cpp
@@ -19,6 +19,7 @@
 #include <android-base/logging.h>
 
 #include <android/binder_auto_utils.h>
+#include <android-base/hex.h>
 #include <codec2/aidl/Configurable.h>
 #include <codec2/aidl/ParamTypes.h>
 
@@ -135,8 +136,6 @@
                 LOG(WARNING) << "querySupportedParams -- invalid output params.";
                 break;
             }
-        } else {
-            res = Status::BAD_INDEX;
         }
     }
     paramDesc->resize(dstIx);
diff --git a/media/codec2/hal/client/GraphicsTracker.cpp b/media/codec2/hal/client/GraphicsTracker.cpp
index 0848fc6..01b0678 100644
--- a/media/codec2/hal/client/GraphicsTracker.cpp
+++ b/media/codec2/hal/client/GraphicsTracker.cpp
@@ -174,20 +174,16 @@
 
 GraphicsTracker::GraphicsTracker(int maxDequeueCount)
     : mBufferCache(new BufferCache()), mMaxDequeue{maxDequeueCount},
-    mMaxDequeueRequested{maxDequeueCount},
     mMaxDequeueCommitted{maxDequeueCount},
-    mMaxDequeueRequestedSeqId{0UL}, mMaxDequeueCommittedSeqId{0ULL},
     mDequeueable{maxDequeueCount},
     mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
     mInConfig{false}, mStopped{false} {
     if (maxDequeueCount < kMaxDequeueMin) {
         mMaxDequeue = kMaxDequeueMin;
-        mMaxDequeueRequested = kMaxDequeueMin;
         mMaxDequeueCommitted = kMaxDequeueMin;
         mDequeueable = kMaxDequeueMin;
     } else if(maxDequeueCount > kMaxDequeueMax) {
         mMaxDequeue = kMaxDequeueMax;
-        mMaxDequeueRequested = kMaxDequeueMax;
         mMaxDequeueCommitted = kMaxDequeueMax;
         mDequeueable = kMaxDequeueMax;
     }
@@ -197,34 +193,36 @@
     mReadPipeFd.reset(pipefd[0]);
     mWritePipeFd.reset(pipefd[1]);
 
-    mEventQueueThread = std::thread([this](){processEvent();});
-    writeIncDequeueable(mDequeueable);
+    // ctor does not require lock to be held.
+    writeIncDequeueableLocked(mDequeueable);
 
     CHECK(ret >= 0);
-    CHECK(mEventQueueThread.joinable());
 }
 
 GraphicsTracker::~GraphicsTracker() {
     stop();
-    if (mEventQueueThread.joinable()) {
-        mEventQueueThread.join();
-    }
 }
 
 bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
     // TODO: can't we adjust during config? not committing it may safe?
     *updateDequeue = false;
-    if (!mInConfig && mMaxDequeueRequested < mMaxDequeue) {
-        int delta = mMaxDequeue - mMaxDequeueRequested;
+    if (!mInConfig && mMaxDequeueRequested.has_value() && mMaxDequeueRequested < mMaxDequeue) {
+        int delta = mMaxDequeue - mMaxDequeueRequested.value();
+        int drained = 0;
         // Since we are supposed to increase mDequeuable by one already
         int adjustable = mDequeueable + 1;
         if (adjustable >= delta) {
-            mMaxDequeue = mMaxDequeueRequested;
+            mMaxDequeue = mMaxDequeueRequested.value();
             mDequeueable -= (delta - 1);
+            drained = delta - 1;
         } else {
             mMaxDequeue -= adjustable;
+            drained = mDequeueable;
             mDequeueable = 0;
         }
+        if (drained > 0) {
+            drainDequeueableLocked(drained);
+        }
         if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
             *updateDequeue = true;
         }
@@ -235,6 +233,7 @@
 
 c2_status_t GraphicsTracker::configureGraphics(
         const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
+    // TODO: wait until operations to previous IGBP is completed.
     std::shared_ptr<BufferCache> prevCache;
     int prevDequeueCommitted;
 
@@ -254,14 +253,28 @@
     if (igbp) {
         ret = igbp->getUniqueId(&bqId);
     }
-    if (ret != ::android::OK || prevCache->mGeneration == generation || prevCache->mBqId == bqId) {
+    if (ret != ::android::OK ||
+            prevCache->mGeneration == generation) {
+        ALOGE("new surface configure fail due to wrong or same bqId or same generation:"
+              "igbp(%d:%llu -> %llu), gen(%lu -> %lu)", (bool)igbp,
+              (unsigned long long)prevCache->mBqId, (unsigned long long)bqId,
+              (unsigned long)prevCache->mGeneration, (unsigned long)generation);
+        std::unique_lock<std::mutex> l(mLock);
+        mInConfig = false;
         return C2_BAD_VALUE;
     }
-    ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
-    if (ret != ::android::OK) {
-        // TODO: sort out the error from igbp and return an error accordingly.
-        return C2_CORRUPTED;
+    if (igbp) {
+        ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
+        if (ret != ::android::OK) {
+            ALOGE("new surface maxDequeueBufferCount configure fail");
+            // TODO: sort out the error from igbp and return an error accordingly.
+            std::unique_lock<std::mutex> l(mLock);
+            mInConfig = false;
+            return C2_CORRUPTED;
+        }
     }
+    ALOGD("new surface configured with id:%llu gen:%lu maxDequeue:%d",
+          (unsigned long long)bqId, (unsigned long)generation, prevDequeueCommitted);
     std::shared_ptr<BufferCache> newCache = std::make_shared<BufferCache>(bqId, generation, igbp);
     {
         std::unique_lock<std::mutex> l(mLock);
@@ -283,59 +296,74 @@
     // (Sometimes maxDequeueCount cannot be committed if the number of
     // dequeued buffer count is bigger.)
     int maxDequeueToCommit;
-    // max dequeue count which is committed to IGBP currently
-    // (actually mMaxDequeueCommitted, but needs to be read outside lock.)
-    int curMaxDequeueCommitted;
     std::unique_lock<std::mutex> cl(mConfigLock);
     {
         std::unique_lock<std::mutex> l(mLock);
-        if (mMaxDequeueRequested == maxDequeueCount) {
+        if (mMaxDequeueRequested.has_value()) {
+            if (mMaxDequeueRequested == maxDequeueCount) {
+                ALOGD("maxDequeueCount requested with %d already", maxDequeueCount);
+                return C2_OK;
+            }
+        } else if (mMaxDequeue == maxDequeueCount) {
+            ALOGD("maxDequeueCount is already %d", maxDequeueCount);
             return C2_OK;
         }
         mInConfig = true;
         mMaxDequeueRequested = maxDequeueCount;
         cache = mBufferCache;
-        curMaxDequeueCommitted = mMaxDequeueCommitted;
         if (mMaxDequeue <= maxDequeueCount) {
             maxDequeueToCommit = maxDequeueCount;
         } else {
             // Since mDequeuable is decreasing,
             // a delievered ready to allocate event may not be fulfilled.
             // Another waiting via a waitable object may be necessary in the case.
-            int delta = mMaxDequeue - maxDequeueCount;
-            if (delta <= mDequeueable) {
-                maxDequeueToCommit = maxDequeueCount;
-                mDequeueable -= delta;
-            } else {
-                maxDequeueToCommit = mMaxDequeue - mDequeueable;
-                mDequeueable = 0;
+            int delta = std::min(mMaxDequeue - maxDequeueCount, mDequeueable);
+            maxDequeueToCommit = mMaxDequeue - delta;
+            mDequeueable -= delta;
+            if (delta > 0) {
+                drainDequeueableLocked(delta);
             }
         }
     }
 
     bool committed = true;
-    if (cache->mIgbp && maxDequeueToCommit != curMaxDequeueCommitted) {
+    if (cache->mIgbp && maxDequeueToCommit != mMaxDequeueCommitted) {
         ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
         committed = (ret == ::android::OK);
-        if (!committed) {
+        if (committed) {
+            ALOGD("maxDequeueCount committed to IGBP: %d", maxDequeueToCommit);
+        } else {
             // This should not happen.
-            ALOGE("dequeueCount failed with error(%d)", (int)ret);
+            ALOGE("maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
         }
     }
 
+    int oldMaxDequeue = 0;
+    int requested = 0;
     {
         std::unique_lock<std::mutex> l(mLock);
         mInConfig = false;
+        oldMaxDequeue = mMaxDequeue;
+        mMaxDequeue = maxDequeueToCommit; // we already drained dequeueable
         if (committed) {
+            clearCacheIfNecessaryLocked(cache, maxDequeueToCommit);
             mMaxDequeueCommitted = maxDequeueToCommit;
-            int delta = mMaxDequeueCommitted - mMaxDequeue;
+            if (mMaxDequeueRequested == mMaxDequeueCommitted &&
+                  mMaxDequeueRequested == mMaxDequeue) {
+                mMaxDequeueRequested.reset();
+            }
+            if (mMaxDequeueRequested.has_value()) {
+                requested = mMaxDequeueRequested.value();
+            }
+            int delta = mMaxDequeueCommitted - oldMaxDequeue;
             if (delta > 0) {
                 mDequeueable += delta;
-                l.unlock();
-                writeIncDequeueable(delta);
+                writeIncDequeueableLocked(delta);
             }
         }
     }
+    ALOGD("maxDqueueCount change %d -> %d: pending: %d",
+          oldMaxDequeue, maxDequeueToCommit, requested);
 
     if (!committed) {
         return C2_CORRUPTED;
@@ -350,48 +378,60 @@
     std::unique_lock<std::mutex> cl(mConfigLock);
     {
         std::unique_lock<std::mutex> l(mLock);
-        if (mMaxDequeue == mMaxDequeueRequested && mMaxDequeueCommitted != mMaxDequeueRequested) {
-            dequeueCommit = mMaxDequeue;
-            mInConfig = true;
-            cache = mBufferCache;
-        } else {
+        if (!mMaxDequeueRequested.has_value() || mMaxDequeue != mMaxDequeueRequested) {
             return;
         }
+        if (mMaxDequeueCommitted == mMaxDequeueRequested) {
+            // already committed. may not happen.
+            mMaxDequeueRequested.reset();
+            return;
+        }
+        dequeueCommit = mMaxDequeue;
+        mInConfig = true;
+        cache = mBufferCache;
     }
     bool committed = true;
     if (cache->mIgbp) {
         ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
         committed = (ret == ::android::OK);
-        if (!committed) {
+        if (committed) {
+            ALOGD("delayed maxDequeueCount update to IGBP: %d", dequeueCommit);
+        } else {
             // This should not happen.
-            ALOGE("dequeueCount failed with error(%d)", (int)ret);
+            ALOGE("delayed maxdequeueCount update to IGBP failed with error(%d)", (int)ret);
         }
     }
-    int cleared = 0;
     {
         // cache == mCache here, since we locked config.
         std::unique_lock<std::mutex> l(mLock);
         mInConfig = false;
         if (committed) {
-            if (cache->mIgbp && dequeueCommit < mMaxDequeueCommitted) {
-                // we are shrinking # of buffers, so clearing the cache.
-                for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
-                    uint64_t bid = it->second->mId;
-                    if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
-                        ++cleared;
-                        it = cache->mBuffers.erase(it);
-                    } else {
-                        ++it;
-                    }
-                }
-            }
+            clearCacheIfNecessaryLocked(cache, dequeueCommit);
             mMaxDequeueCommitted = dequeueCommit;
         }
+        mMaxDequeueRequested.reset();
     }
-    if (cleared > 0) {
-        ALOGD("%d buffers are cleared from cache, due to IGBP capacity change", cleared);
-    }
+}
 
+void GraphicsTracker::clearCacheIfNecessaryLocked(const std::shared_ptr<BufferCache> &cache,
+                                            int maxDequeueCommitted) {
+    int cleared = 0;
+    size_t origCacheSize = cache->mBuffers.size();
+    if (cache->mIgbp && maxDequeueCommitted < mMaxDequeueCommitted) {
+        // we are shrinking # of buffers in the case, so evict the previous
+        // cached buffers.
+        for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
+            uint64_t bid = it->second->mId;
+            if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
+                ++cleared;
+                it = cache->mBuffers.erase(it);
+            } else {
+                ++it;
+            }
+        }
+    }
+    ALOGD("Cache size %zu -> %zu: maybe_cleared(%d), dequeued(%zu)",
+          origCacheSize, cache->mBuffers.size(), cleared, mDequeued.size());
 }
 
 int GraphicsTracker::getCurDequeueable() {
@@ -400,70 +440,58 @@
 }
 
 void GraphicsTracker::stop() {
-    bool expected = false;
-    std::unique_lock<std::mutex> l(mEventLock);
-    bool updated = mStopped.compare_exchange_strong(expected, true);
-    if (updated) {
-        int writeFd = mWritePipeFd.release();
+   // TODO: wait until all operation to current IGBP
+   // being completed.
+    std::unique_lock<std::mutex> l(mLock);
+    if (mStopped) {
+        return;
+    }
+    mStopped = true;
+    int writeFd = mWritePipeFd.release();
+    if (writeFd >= 0) {
         ::close(writeFd);
-        int readFd = mReadPipeFd.release();
-        ::close(readFd);
-        mEventCv.notify_one();
     }
 }
 
-void GraphicsTracker::writeIncDequeueable(int inc) {
+void GraphicsTracker::writeIncDequeueableLocked(int inc) {
     CHECK(inc > 0 && inc < kMaxDequeueMax);
     thread_local char buf[kMaxDequeueMax];
-    int diff = 0;
-    {
-        std::unique_lock<std::mutex> l(mEventLock);
-        if (mStopped) {
-            return;
-        }
-        CHECK(mWritePipeFd.get() >= 0);
-        int ret = ::write(mWritePipeFd.get(), buf, inc);
-        if (ret == inc) {
-            return;
-        }
-        diff = ret < 0 ? inc : inc - ret;
-
-        // Partial write or EINTR. This will not happen in a real scenario.
-        mIncDequeueable += diff;
-        if (mIncDequeueable > 0) {
-            l.unlock();
-            mEventCv.notify_one();
-            ALOGW("updating dequeueable to pipefd pending");
-        }
+    if (mStopped) { // reading end closed;
+        return;
     }
+    int writeFd = mWritePipeFd.get();
+    if (writeFd < 0) {
+        // initialization fail and not valid though.
+        return;
+    }
+    int ret = ::write(writeFd, buf, inc);
+    // Since this is non-blocking i/o, it never returns EINTR.
+    //
+    // ::write() to pipe guarantee to succeed atomically if it writes less than
+    // the given PIPE_BUF. And the buffer size in pipe/fifo is at least 4K and our total
+    // max pending buffer size is 64. So it never returns EAGAIN here either.
+    // See pipe(7) for further information.
+    //
+    // Other errors are serious errors and we cannot synchronize mDequeueable to
+    // length of pending buffer in pipe/fifo anymore. So better to abort here.
+    // TODO: do not abort here. (b/318717399)
+    CHECK(ret == inc);
 }
 
-void GraphicsTracker::processEvent() {
-    // This is for partial/failed writes to the writing end.
-    // This may not happen in the real scenario.
+void GraphicsTracker::drainDequeueableLocked(int dec) {
+    CHECK(dec > 0 && dec < kMaxDequeueMax);
     thread_local char buf[kMaxDequeueMax];
-    while (true) {
-        std::unique_lock<std::mutex> l(mEventLock);
-        if (mStopped) {
-            break;
-        }
-        if (mIncDequeueable > 0) {
-            int inc = mIncDequeueable > kMaxDequeueMax ? kMaxDequeueMax : mIncDequeueable;
-            int ret = ::write(mWritePipeFd.get(), buf, inc);
-            int written = ret <= 0 ? 0 : ret;
-            mIncDequeueable -= written;
-            if (mIncDequeueable > 0) {
-                l.unlock();
-                if (ret < 0) {
-                    ALOGE("write to writing end failed %d", errno);
-                } else {
-                    ALOGW("partial write %d(%d)", inc, written);
-                }
-                continue;
-            }
-        }
-        mEventCv.wait(l);
+    if (mStopped) {
+        return;
     }
+    int readFd = mReadPipeFd.get();
+    if (readFd < 0) {
+        // initializationf fail and not valid though.
+        return;
+    }
+    int ret = ::read(readFd, buf, dec);
+    // TODO: no dot abort here. (b/318717399)
+    CHECK(ret == dec);
 }
 
 c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
@@ -539,8 +567,7 @@
             return;
         }
         mDequeueable++;
-        l.unlock();
-        writeIncDequeueable(1);
+        writeIncDequeueableLocked(1);
     }
 }
 
@@ -715,14 +742,13 @@
             return C2_OK;
         }
         mDequeueable++;
-        l.unlock();
-        writeIncDequeueable(1);
+        writeIncDequeueableLocked(1);
     }
     return C2_OK;
 }
 
 void GraphicsTracker::commitDeallocate(
-        std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid) {
+        std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid, bool *updateDequeue) {
     std::unique_lock<std::mutex> l(mLock);
     size_t del1 = mDequeued.erase(bid);
     size_t del2 = mDeallocating.erase(bid);
@@ -730,9 +756,11 @@
     if (cache) {
         cache->unblockSlot(slotId);
     }
+    if (adjustDequeueConfLocked(updateDequeue)) {
+        return;
+    }
     mDequeueable++;
-    l.unlock();
-    writeIncDequeueable(1);
+    writeIncDequeueableLocked(1);
 }
 
 
@@ -758,7 +786,10 @@
     // cache->mIgbp is not null, if completed is false.
     (void)cache->mIgbp->cancelBuffer(slotId, rFence);
 
-    commitDeallocate(cache, slotId, bid);
+    commitDeallocate(cache, slotId, bid, &updateDequeue);
+    if (updateDequeue) {
+        updateDequeueConf();
+    }
     return C2_OK;
 }
 
@@ -785,8 +816,7 @@
             return C2_BAD_STATE;
         }
         mDequeueable++;
-        l.unlock();
-        writeIncDequeueable(1);
+        writeIncDequeueableLocked(1);
         return C2_BAD_STATE;
     }
     std::shared_ptr<BufferItem> buffer = it->second;
@@ -828,8 +858,7 @@
             return;
         }
         mDequeueable++;
-        l.unlock();
-        writeIncDequeueable(1);
+        writeIncDequeueableLocked(1);
         return;
     }
 }
@@ -843,6 +872,9 @@
         ALOGE("retrieving AHB-ID for GraphicBlock failed");
         return C2_CORRUPTED;
     }
+    std::shared_ptr<_C2BlockPoolData> poolData =
+            _C2BlockFactory::GetGraphicBlockPoolData(blk);
+    _C2BlockFactory::DisownIgbaBlock(poolData);
     std::shared_ptr<BufferCache> cache;
     std::shared_ptr<BufferItem> buffer;
     std::shared_ptr<BufferItem> oldBuffer;
@@ -870,13 +902,19 @@
         if (!gb) {
             ALOGE("render: realloc-ing a new buffer for migration failed");
             std::shared_ptr<BufferCache> nullCache;
-            commitDeallocate(nullCache, -1, bid);
+            commitDeallocate(nullCache, -1, bid, &updateDequeue);
+            if (updateDequeue) {
+                updateDequeueConf();
+            }
             return C2_REFUSED;
         }
         if (cache->mIgbp->attachBuffer(&(newBuffer->mSlot), gb) != ::android::OK) {
             ALOGE("render: attaching a new buffer to IGBP failed");
             std::shared_ptr<BufferCache> nullCache;
-            commitDeallocate(nullCache, -1, bid);
+            commitDeallocate(nullCache, -1, bid, &updateDequeue);
+            if (updateDequeue) {
+                updateDequeueConf();
+            }
             return C2_REFUSED;
         }
         cache->waitOnSlot(newBuffer->mSlot);
@@ -890,11 +928,13 @@
         CHECK(renderRes != ::android::BAD_VALUE);
         ALOGE("render: failed to queueBuffer() err = %d", renderRes);
         (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
-        commitDeallocate(cache, buffer->mSlot, bid);
+        commitDeallocate(cache, buffer->mSlot, bid, &updateDequeue);
+        if (updateDequeue) {
+            updateDequeueConf();
+        }
         return C2_REFUSED;
     }
 
-    updateDequeue = false;
     commitRender(cache, buffer, oldBuffer, output->bufferReplaced, &updateDequeue);
     if (updateDequeue) {
         updateDequeueConf();
@@ -909,8 +949,7 @@
         if (mBufferCache->mGeneration == generation) {
             if (!adjustDequeueConfLocked(&updateDequeue)) {
                 mDequeueable++;
-                l.unlock();
-                writeIncDequeueable(1);
+                writeIncDequeueableLocked(1);
             }
         }
     }
diff --git a/media/codec2/hal/client/client.cpp b/media/codec2/hal/client/client.cpp
index b872cea..85b5ec8 100644
--- a/media/codec2/hal/client/client.cpp
+++ b/media/codec2/hal/client/client.cpp
@@ -649,8 +649,8 @@
         return C2_CORRUPTED;
     }
     size_t i = 0;
-    for (auto it = paramPointers.begin();
-            it != paramPointers.end(); ) {
+    size_t numUpdatedStackParams = 0;
+    for (auto it = paramPointers.begin(); it != paramPointers.end(); ) {
         C2Param* paramPointer = *it;
         if (numStackIndices > 0) {
             --numStackIndices;
@@ -677,7 +677,9 @@
                 status = C2_BAD_INDEX;
                 continue;
             }
-            if (!stackParams[i++]->updateFrom(*paramPointer)) {
+            if (stackParams[i++]->updateFrom(*paramPointer)) {
+                ++numUpdatedStackParams;
+            } else {
                 LOG(WARNING) << "query -- param update failed: "
                                 "index = "
                              << paramPointer->index() << ".";
@@ -697,6 +699,13 @@
         }
         ++it;
     }
+    size_t numQueried = numUpdatedStackParams;
+    if (heapParams) {
+        numQueried += heapParams->size();
+    }
+    if (status == C2_OK && indices.size() != numQueried) {
+        status = C2_BAD_INDEX;
+    }
     return status;
 }
 
diff --git a/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h b/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
index 4640243..dd6c869 100644
--- a/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
+++ b/media/codec2/hal/client/include/codec2/aidl/GraphicsTracker.h
@@ -27,6 +27,7 @@
 #include <mutex>
 #include <set>
 #include <thread>
+#include <optional>
 
 #include <C2Buffer.h>
 
@@ -234,12 +235,14 @@
     std::map<uint64_t, std::shared_ptr<BufferItem>> mDequeued;
     std::set<uint64_t> mDeallocating;
 
+    // These member variables are read and modified accessed as follows.
+    // 1. mConfigLock being held
+    //    Set mInConfig true with mLock in the beginning
+    //    Clear mInConfig with mLock in the end
+    // 2. mLock is held and mInConfig is false.
     int mMaxDequeue;
-    int mMaxDequeueRequested;
     int mMaxDequeueCommitted;
-
-    uint32_t mMaxDequeueRequestedSeqId;
-    uint32_t mMaxDequeueCommittedSeqId;
+    std::optional<int> mMaxDequeueRequested;
 
     int mDequeueable;
 
@@ -271,13 +274,6 @@
     ::android::base::unique_fd mWritePipeFd;  // The writing end file descriptor
 
     std::atomic<bool> mStopped;
-    std::thread mEventQueueThread; // Thread to handle interrupted
-                                   // writes to the writing end.
-    std::mutex mEventLock;
-    std::condition_variable mEventCv;
-
-    bool mStopEventThread;
-    int mIncDequeueable; // pending # of write to increase dequeueable eventfd
 
 private:
     explicit GraphicsTracker(int maxDequeueCount);
@@ -289,6 +285,9 @@
     bool adjustDequeueConfLocked(bool *updateDequeueConf);
 
     void updateDequeueConf();
+    void clearCacheIfNecessaryLocked(
+            const std::shared_ptr<BufferCache> &cache,
+            int maxDequeueCommitted);
 
     c2_status_t requestAllocate(std::shared_ptr<BufferCache> *cache);
     c2_status_t requestDeallocate(uint64_t bid, const sp<Fence> &fence,
@@ -305,7 +304,9 @@
                         bool cached, int slotId, const sp<Fence> &fence,
                         std::shared_ptr<BufferItem> *buffer,
                         bool *updateDequeue);
-    void commitDeallocate(std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid);
+    void commitDeallocate(std::shared_ptr<BufferCache> &cache,
+                          int slotId, uint64_t bid,
+                          bool *updateDequeue);
     void commitRender(const std::shared_ptr<BufferCache> &cache,
                       const std::shared_ptr<BufferItem> &buffer,
                       const std::shared_ptr<BufferItem> &oldBuffer,
@@ -318,8 +319,8 @@
             bool *cached, int *rSlotId, sp<Fence> *rFence,
             std::shared_ptr<BufferItem> *buffer);
 
-    void writeIncDequeueable(int inc);
-    void processEvent();
+    void writeIncDequeueableLocked(int inc);
+    void drainDequeueableLocked(int dec);
 };
 
 } // namespace aidl::android::hardware::media::c2::implementation
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index fdb28f4..df89510 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -29,6 +29,7 @@
 #include <C2BufferPriv.h>
 #include <C2Config.h>
 #include <C2Debug.h>
+#include <codec2/common/HalSelection.h>
 #include <codec2/hidl/client.h>
 #include <gui/BufferQueue.h>
 #include <gui/IConsumerListener.h>
@@ -407,30 +408,45 @@
                       surfaceMode_t surfMode) {
     using namespace android;
     sp<IGraphicBufferProducer> producer = nullptr;
+    sp<IGraphicBufferConsumer> consumer = nullptr;
+    sp<GLConsumer> texture = nullptr;
+    sp<ANativeWindow> surface = nullptr;
     static std::atomic_uint32_t surfaceGeneration{0};
     uint32_t generation =
             (getpid() << 10) |
             ((surfaceGeneration.fetch_add(1, std::memory_order_relaxed) + 1) & ((1 << 10) - 1));
     int32_t maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
+    C2BlockPool::local_id_t poolId = C2BlockPool::BASIC_GRAPHIC;
+    std::shared_ptr<Codec2Client::Configurable> configurable;
+    bool aidl = ::android::IsCodec2AidlHalSelected();
+    if (aidl) {
+        // AIDL does not support blockpool-less mode.
+        c2_status_t poolRet = component->createBlockPool(
+                C2PlatformAllocatorStore::IGBA, &poolId, &configurable);
+        ASSERT_EQ(poolRet, C2_OK) << "setOutputSurface failed";
+    }
+
     if (surfMode == SURFACE) {
-        sp<IGraphicBufferConsumer> consumer = nullptr;
         BufferQueue::createBufferQueue(&producer, &consumer);
         ASSERT_NE(producer, nullptr) << "createBufferQueue returned invalid producer";
         ASSERT_NE(consumer, nullptr) << "createBufferQueue returned invalid consumer";
 
-        sp<GLConsumer> texture =
+        texture =
                 new GLConsumer(consumer, 0 /* tex */, GLConsumer::TEXTURE_EXTERNAL,
                                true /* useFenceSync */, false /* isControlledByApp */);
 
-        sp<ANativeWindow> gSurface = new Surface(producer);
-        ASSERT_NE(gSurface, nullptr) << "getSurface failed";
+        surface = new Surface(producer);
+        ASSERT_NE(surface, nullptr) << "failed to create Surface object";
 
         producer->setGenerationNumber(generation);
     }
 
-    c2_status_t err = component->setOutputSurface(C2BlockPool::BASIC_GRAPHIC, producer, generation,
+    c2_status_t err = component->setOutputSurface(poolId, producer, generation,
                                                   maxDequeueBuffers);
-    ASSERT_EQ(err, C2_OK) << "setOutputSurface failed";
+    std::string surfStr = surfMode == NO_SURFACE ? "NO_SURFACE" :
+            (surfMode == NULL_SURFACE ? "NULL_SURFACE" : "WITH_SURFACE");
+
+    ASSERT_EQ(err, C2_OK) << "setOutputSurface failed, surfMode: " << surfStr;
 }
 
 void decodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index fbb4f18..8ecb9c0 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -19,6 +19,7 @@
 
 #include <android-base/logging.h>
 #include <android/binder_process.h>
+#include <codec2/common/HalSelection.h>
 #include <gtest/gtest.h>
 #include <hidl/GtestPrinter.h>
 #include <stdio.h>
@@ -71,7 +72,9 @@
         std::shared_ptr<C2AllocatorStore> store = android::GetCodec2PlatformAllocatorStore();
         CHECK_EQ(store->fetchAllocator(C2AllocatorStore::DEFAULT_GRAPHIC, &mGraphicAllocator),
                  C2_OK);
-        mGraphicPool = std::make_shared<C2PooledBlockPool>(mGraphicAllocator, mBlockPoolId++);
+        C2PooledBlockPool::BufferPoolVer ver = ::android::IsCodec2AidlHalSelected() ?
+                C2PooledBlockPool::VER_AIDL2 : C2PooledBlockPool::VER_HIDL;
+        mGraphicPool = std::make_shared<C2PooledBlockPool>(mGraphicAllocator, mBlockPoolId++, ver);
         ASSERT_NE(mGraphicPool, nullptr);
 
         std::vector<std::unique_ptr<C2Param>> queried;
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 3bb6593..5c1755e 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -1049,7 +1049,11 @@
         // Unwrap raw buffer handle from the C2Handle
         native_handle_t *nh = UnwrapNativeCodec2GrallocHandle(handle);
         if (!nh) {
-            return;
+            nh = UnwrapNativeCodec2AhwbHandle(handle);
+            if (!nh) {
+                ALOGE("handle is not compatible to neither C2HandleGralloc nor C2HandleAhwb");
+                return;
+            }
         }
         // Import the raw handle so IMapper can use the buffer. The imported
         // handle must be freed when the client is done with the buffer.
diff --git a/media/codec2/tests/aidl/Android.bp b/media/codec2/tests/aidl/Android.bp
new file mode 100644
index 0000000..2ad245c
--- /dev/null
+++ b/media/codec2/tests/aidl/Android.bp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "frameworks_av_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_test {
+    name: "c2aidl_gtracker_test",
+    test_suites: ["device-tests"],
+    defaults: [
+        "libcodec2-aidl-client-defaults",
+    ],
+
+    header_libs: [
+        "libcodec2_client_headers",
+        "libcodec2_internal",
+        "libcodec2_vndk_headers",
+    ],
+
+    srcs: [
+        "GraphicsTracker_test.cpp",
+    ],
+
+    shared_libs: [
+        "libbinder",
+        "libcodec2_client",
+        "libgui",
+        "libnativewindow",
+        "libui",
+    ],
+}
diff --git a/media/codec2/tests/aidl/GraphicsTracker_test.cpp b/media/codec2/tests/aidl/GraphicsTracker_test.cpp
new file mode 100644
index 0000000..9008086
--- /dev/null
+++ b/media/codec2/tests/aidl/GraphicsTracker_test.cpp
@@ -0,0 +1,820 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "GraphicsTracker_test"
+#include <unistd.h>
+
+#include <android/hardware_buffer.h>
+#include <codec2/aidl/GraphicsTracker.h>
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/ProcessState.h>
+#include <gtest/gtest.h>
+#include <gui/BufferQueue.h>
+#include <gui/IProducerListener.h>
+#include <gui/IConsumerListener.h>
+#include <gui/Surface.h>
+#include <private/android/AHardwareBufferHelpers.h>
+
+#include <C2BlockInternal.h>
+#include <C2FenceFactory.h>
+
+#include <atomic>
+#include <memory>
+#include <iostream>
+#include <thread>
+
+using ::aidl::android::hardware::media::c2::implementation::GraphicsTracker;
+using ::android::BufferItem;
+using ::android::BufferQueue;
+using ::android::Fence;
+using ::android::GraphicBuffer;
+using ::android::IGraphicBufferProducer;
+using ::android::IGraphicBufferConsumer;
+using ::android::IProducerListener;
+using ::android::IConsumerListener;
+using ::android::OK;
+using ::android::sp;
+using ::android::wp;
+
+namespace {
+struct BqStatistics {
+    std::atomic<int> mDequeued;
+    std::atomic<int> mQueued;
+    std::atomic<int> mBlocked;
+    std::atomic<int> mDropped;
+    std::atomic<int> mDiscarded;
+    std::atomic<int> mReleased;
+
+    void log() {
+        ALOGD("Dequeued: %d, Queued: %d, Blocked: %d, "
+              "Dropped: %d, Discarded %d, Released %d",
+              (int)mDequeued, (int)mQueued, (int)mBlocked,
+              (int)mDropped, (int)mDiscarded, (int)mReleased);
+    }
+
+    void clear() {
+        mDequeued = 0;
+        mQueued = 0;
+        mBlocked = 0;
+        mDropped = 0;
+        mDiscarded = 0;
+        mReleased = 0;
+    }
+};
+
+struct DummyConsumerListener : public android::BnConsumerListener {
+    void onFrameAvailable(const BufferItem& /* item */) override {}
+    void onBuffersReleased() override {}
+    void onSidebandStreamChanged() override {}
+};
+
+struct TestConsumerListener : public android::BnConsumerListener {
+    TestConsumerListener(const sp<IGraphicBufferConsumer> &consumer)
+            : BnConsumerListener(), mConsumer(consumer) {}
+    void onFrameAvailable(const BufferItem&) override {
+        constexpr static int kRenderDelayUs = 1000000/30; // 30fps
+        BufferItem buffer;
+        // consume buffer
+        sp<IGraphicBufferConsumer> consumer = mConsumer.promote();
+        if (consumer != nullptr && consumer->acquireBuffer(&buffer, 0) == android::NO_ERROR) {
+            ::usleep(kRenderDelayUs);
+            consumer->releaseBuffer(buffer.mSlot, buffer.mFrameNumber,
+                                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, buffer.mFence);
+        }
+    }
+    void onBuffersReleased() override {}
+    void onSidebandStreamChanged() override {}
+
+    wp<IGraphicBufferConsumer> mConsumer;
+};
+
+struct TestProducerListener : public android::BnProducerListener {
+    TestProducerListener(std::shared_ptr<GraphicsTracker> tracker,
+                         std::shared_ptr<BqStatistics> &stat,
+                         uint32_t generation) : BnProducerListener(),
+        mTracker(tracker), mStat(stat), mGeneration(generation) {}
+    virtual void onBufferReleased() override {
+        auto tracker = mTracker.lock();
+        if (tracker) {
+            mStat->mReleased++;
+            tracker->onReleased(mGeneration);
+        }
+    }
+    virtual bool needsReleaseNotify() override { return true; }
+    virtual void onBuffersDiscarded(const std::vector<int32_t>&) override {}
+
+    std::weak_ptr<GraphicsTracker> mTracker;
+    std::shared_ptr<BqStatistics> mStat;
+    uint32_t mGeneration;
+};
+
+struct Frame {
+    AHardwareBuffer *buffer_;
+    sp<Fence> fence_;
+
+    Frame() : buffer_{nullptr}, fence_{nullptr} {}
+    Frame(AHardwareBuffer *buffer, sp<Fence> fence)
+            : buffer_(buffer), fence_(fence) {}
+    ~Frame() {
+        if (buffer_) {
+            AHardwareBuffer_release(buffer_);
+        }
+    }
+};
+
+struct FrameQueue {
+    bool mStopped;
+    bool mDrain;
+    std::queue<std::shared_ptr<Frame>> mQueue;
+    std::mutex mMutex;
+    std::condition_variable mCond;
+
+    FrameQueue() : mStopped{false}, mDrain{false} {}
+
+    bool queueItem(AHardwareBuffer *buffer, sp<Fence> fence) {
+        std::shared_ptr<Frame> frame = std::make_shared<Frame>(buffer, fence);
+        if (mStopped) {
+            return false;
+        }
+        if (!frame) {
+            return false;
+        }
+        std::unique_lock<std::mutex> l(mMutex);
+        mQueue.emplace(frame);
+        l.unlock();
+        mCond.notify_all();
+        return true;
+    }
+
+    void stop(bool drain = false) {
+        bool stopped = false;
+        {
+            std::unique_lock<std::mutex> l(mMutex);
+            if (!mStopped) {
+                mStopped = true;
+                mDrain = drain;
+                stopped = true;
+            }
+            l.unlock();
+            if (stopped) {
+                mCond.notify_all();
+            }
+        }
+    }
+
+    bool waitItem(std::shared_ptr<Frame> *frame) {
+        while(true) {
+            std::unique_lock<std::mutex> l(mMutex);
+            if (!mDrain && mStopped) {
+                // stop without consuming the queue.
+                return false;
+            }
+            if (!mQueue.empty()) {
+                *frame = mQueue.front();
+                mQueue.pop();
+                return true;
+            } else if (mStopped) {
+                // stop after consuming the queue.
+                return false;
+            }
+            mCond.wait(l);
+        }
+    }
+};
+
+} // namespace anonymous
+
+class GraphicsTrackerTest : public ::testing::Test {
+public:
+    const uint64_t kTestUsageFlag = GRALLOC_USAGE_SW_WRITE_OFTEN;
+
+    void queueBuffer(FrameQueue *queue) {
+        while (true) {
+            std::shared_ptr<Frame> frame;
+            if (!queue->waitItem(&frame)) {
+                break;
+            }
+            uint64_t bid;
+            if (__builtin_available(android __ANDROID_API_T__, *)) {
+                if (AHardwareBuffer_getId(frame->buffer_, &bid) !=
+                        android::NO_ERROR) {
+                    break;
+                }
+            } else {
+                break;
+            }
+            android::status_t ret = frame->fence_->wait(-1);
+            if (ret != android::NO_ERROR) {
+                mTracker->deallocate(bid, frame->fence_);
+                mBqStat->mDiscarded++;
+                continue;
+            }
+
+            std::shared_ptr<C2GraphicBlock> blk =
+                    _C2BlockFactory::CreateGraphicBlock(frame->buffer_);
+            if (!blk) {
+                mTracker->deallocate(bid, Fence::NO_FENCE);
+                mBqStat->mDiscarded++;
+                continue;
+            }
+            IGraphicBufferProducer::QueueBufferInput input(
+                    0, false,
+                    HAL_DATASPACE_UNKNOWN, android::Rect(0, 0, 1, 1),
+                    NATIVE_WINDOW_SCALING_MODE_FREEZE, 0, Fence::NO_FENCE);
+            IGraphicBufferProducer::QueueBufferOutput output{};
+            c2_status_t res = mTracker->render(
+                    blk->share(C2Rect(1, 1), C2Fence()),
+                    input, &output);
+            if (res != C2_OK) {
+                mTracker->deallocate(bid, Fence::NO_FENCE);
+                mBqStat->mDiscarded++;
+                continue;
+            }
+            if (output.bufferReplaced) {
+                mBqStat->mDropped++;
+            }
+            mBqStat->mQueued++;
+        }
+    }
+
+    void stopTrackerAfterUs(int us) {
+        ::usleep(us);
+        mTracker->stop();
+    }
+
+protected:
+    bool init(int maxDequeueCount) {
+        mTracker = GraphicsTracker::CreateGraphicsTracker(maxDequeueCount);
+        if (!mTracker) {
+            return false;
+        }
+        BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+        if (!mProducer || !mConsumer) {
+            return false;
+        }
+        return true;
+    }
+    bool configure(sp<IProducerListener> producerListener,
+                   sp<IConsumerListener> consumerListener,
+                   int maxAcquiredCount = 1, bool controlledByApp = true) {
+        if (mConsumer->consumerConnect(
+                consumerListener, controlledByApp) != ::android::NO_ERROR) {
+            return false;
+        }
+        if (mConsumer->setMaxAcquiredBufferCount(maxAcquiredCount) != ::android::NO_ERROR) {
+            return false;
+        }
+        IGraphicBufferProducer::QueueBufferOutput qbo{};
+        if (mProducer->connect(producerListener,
+                          NATIVE_WINDOW_API_MEDIA, true, &qbo) != ::android::NO_ERROR) {
+            return false;
+        }
+        if (mProducer->setDequeueTimeout(0) != ::android::NO_ERROR) {
+            return false;
+        }
+        return true;
+    }
+
+    virtual void TearDown() override {
+        mBqStat->log();
+        mBqStat->clear();
+
+        if (mTracker) {
+            mTracker->stop();
+            mTracker.reset();
+        }
+        if (mProducer) {
+            mProducer->disconnect(NATIVE_WINDOW_API_MEDIA);
+        }
+        mProducer.clear();
+        mConsumer.clear();
+    }
+
+protected:
+    std::shared_ptr<BqStatistics> mBqStat = std::make_shared<BqStatistics>();
+    sp<IGraphicBufferProducer> mProducer;
+    sp<IGraphicBufferConsumer> mConsumer;
+    std::shared_ptr<GraphicsTracker> mTracker;
+};
+
+
+TEST_F(GraphicsTrackerTest, AllocateAndBlockedTest) {
+    uint32_t generation = 1;
+    const int maxDequeueCount = 10;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new DummyConsumerListener()));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+    c2_status_t ret = mTracker->configureGraphics(mProducer, generation);
+    ASSERT_EQ(C2_OK, ret);
+    ASSERT_EQ(maxDequeueCount, mTracker->getCurDequeueable());
+
+    AHardwareBuffer *buf;
+    sp<Fence> fence;
+    uint64_t bid;
+
+    // Allocate and check dequeueable
+    if (__builtin_available(android __ANDROID_API_T__, *)) {
+        for (int i = 0; i < maxDequeueCount; ++i) {
+            ret = mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf, &fence);
+            ASSERT_EQ(C2_OK, ret);
+            mBqStat->mDequeued++;
+            ASSERT_EQ(maxDequeueCount - (i + 1), mTracker->getCurDequeueable());
+            ASSERT_EQ(OK, AHardwareBuffer_getId(buf, &bid));
+            ALOGD("alloced : bufferId: %llu", (unsigned long long)bid);
+            AHardwareBuffer_release(buf);
+        }
+    } else {
+        GTEST_SKIP();
+    }
+
+    // Allocate should be blocked
+    ret = mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf, &fence);
+    ALOGD("alloc : err(%d, %d)", ret, C2_BLOCKING);
+    ASSERT_EQ(C2_BLOCKING, ret);
+    mBqStat->mBlocked++;
+    ASSERT_EQ(0, mTracker->getCurDequeueable());
+}
+
+TEST_F(GraphicsTrackerTest, AllocateAndDeallocateTest) {
+    uint32_t generation = 1;
+    const int maxDequeueCount = 10;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new DummyConsumerListener()));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+    c2_status_t ret = mTracker->configureGraphics(mProducer, generation);
+    ASSERT_EQ(C2_OK, ret);
+
+    ASSERT_EQ(maxDequeueCount, mTracker->getCurDequeueable());
+    AHardwareBuffer *buf;
+    sp<Fence> fence;
+    uint64_t bid;
+    std::vector<uint64_t> bids;
+
+    // Allocate and store buffer id
+    if (__builtin_available(android __ANDROID_API_T__, *)) {
+        for (int i = 0; i < maxDequeueCount; ++i) {
+            ret = mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf, &fence);
+            ASSERT_EQ(C2_OK, ret);
+            mBqStat->mDequeued++;
+            ASSERT_EQ(OK, AHardwareBuffer_getId(buf, &bid));
+            bids.push_back(bid);
+            ALOGD("alloced : bufferId: %llu", (unsigned long long)bid);
+            AHardwareBuffer_release(buf);
+        }
+    } else {
+        GTEST_SKIP();
+    }
+
+    // Deallocate and check dequeueable
+    for (int i = 0; i < maxDequeueCount; ++i) {
+        ALOGD("dealloc : bufferId: %llu", (unsigned long long)bids[i]);
+        ret = mTracker->deallocate(bids[i], Fence::NO_FENCE);
+        ASSERT_EQ(C2_OK, ret);
+        ASSERT_EQ(i + 1, mTracker->getCurDequeueable());
+        mBqStat->mDiscarded++;
+    }
+}
+
+TEST_F(GraphicsTrackerTest, DropAndReleaseTest) {
+    uint32_t generation = 1;
+    const int maxDequeueCount = 10;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new DummyConsumerListener()));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+    c2_status_t ret = mTracker->configureGraphics(mProducer, generation);
+    ASSERT_EQ(C2_OK, ret);
+
+    ASSERT_EQ(maxDequeueCount, mTracker->getCurDequeueable());
+
+    FrameQueue frameQueue;
+    std::thread queueThread(&GraphicsTrackerTest::queueBuffer, this, &frameQueue);
+    AHardwareBuffer *buf1, *buf2;
+    sp<Fence> fence1, fence2;
+
+    ret = mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf1, &fence1);
+    ASSERT_EQ(C2_OK, ret);
+    mBqStat->mDequeued++;
+    ASSERT_EQ(maxDequeueCount - 1, mTracker->getCurDequeueable());
+
+    ret = mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf2, &fence2);
+    ASSERT_EQ(C2_OK, ret);
+    mBqStat->mDequeued++;
+    ASSERT_EQ(maxDequeueCount - 2, mTracker->getCurDequeueable());
+
+    // Queue two buffers without consuming, one should be dropped
+    ASSERT_TRUE(frameQueue.queueItem(buf1, fence1));
+    ASSERT_TRUE(frameQueue.queueItem(buf2, fence2));
+
+    frameQueue.stop(true);
+    if (queueThread.joinable()) {
+        queueThread.join();
+    }
+
+    ASSERT_EQ(maxDequeueCount - 1, mTracker->getCurDequeueable());
+
+    // Consume one buffer and release
+    BufferItem item;
+    ASSERT_EQ(OK, mConsumer->acquireBuffer(&item, 0));
+    ASSERT_EQ(OK, mConsumer->releaseBuffer(item.mSlot, item.mFrameNumber,
+            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence));
+    // Nothing to consume
+    ASSERT_NE(OK, mConsumer->acquireBuffer(&item, 0));
+
+    ASSERT_EQ(maxDequeueCount, mTracker->getCurDequeueable());
+    ASSERT_EQ(1, mBqStat->mReleased);
+    ASSERT_EQ(1, mBqStat->mDropped);
+}
+
+TEST_F(GraphicsTrackerTest, RenderTest) {
+    uint32_t generation = 1;
+    const int maxDequeueCount = 10;
+    const int maxNumAlloc = 20;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new TestConsumerListener(mConsumer), 1, false));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+
+    ASSERT_EQ(C2_OK, mTracker->configureGraphics(mProducer, generation));
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+
+    int waitFd = -1;
+    ASSERT_EQ(C2_OK, mTracker->getWaitableFd(&waitFd));
+    C2Fence waitFence = _C2FenceFactory::CreatePipeFence(waitFd);
+
+
+    FrameQueue frameQueue;
+    std::thread queueThread(&GraphicsTrackerTest::queueBuffer, this, &frameQueue);
+
+    int numAlloc = 0;
+
+    while (numAlloc < maxNumAlloc) {
+        AHardwareBuffer *buf;
+        sp<Fence> fence;
+        c2_status_t ret = mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf, &fence);
+        if (ret == C2_BLOCKING) {
+            mBqStat->mBlocked++;
+            c2_status_t waitRes = waitFence.wait(3000000000);
+            if (waitRes == C2_TIMED_OUT || waitRes == C2_OK) {
+                continue;
+            }
+            ALOGE("alloc wait failed: c2_err(%d)", waitRes);
+            break;
+        }
+        if (ret != C2_OK) {
+            ALOGE("alloc error: c2_err(%d)", ret);
+            break;
+        }
+        mBqStat->mDequeued++;
+        if (!frameQueue.queueItem(buf, fence)) {
+            ALOGE("queue to render failed");
+            break;
+        }
+        ++numAlloc;
+    }
+
+    frameQueue.stop(true);
+    // Wait more than enough time(1 sec) to render all queued frames for sure.
+    ::usleep(1000000);
+
+    if (queueThread.joinable()) {
+        queueThread.join();
+    }
+    ASSERT_EQ(numAlloc, maxNumAlloc);
+    ASSERT_EQ(numAlloc, mBqStat->mDequeued);
+    ASSERT_EQ(mBqStat->mDequeued, mBqStat->mQueued);
+    ASSERT_EQ(mBqStat->mDequeued, mBqStat->mReleased + mBqStat->mDropped);
+}
+
+TEST_F(GraphicsTrackerTest, StopAndWaitTest) {
+    uint32_t generation = 1;
+    const int maxDequeueCount = 2;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new TestConsumerListener(mConsumer), 1, false));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+
+    ASSERT_EQ(C2_OK, mTracker->configureGraphics(mProducer, generation));
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+
+    int waitFd = -1;
+    ASSERT_EQ(C2_OK, mTracker->getWaitableFd(&waitFd));
+    C2Fence waitFence = _C2FenceFactory::CreatePipeFence(waitFd);
+
+    AHardwareBuffer *buf1, *buf2;
+    sp<Fence> fence;
+
+    ASSERT_EQ(C2_OK, mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf1, &fence));
+    mBqStat->mDequeued++;
+    AHardwareBuffer_release(buf1);
+
+    ASSERT_EQ(C2_OK, mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf2, &fence));
+    mBqStat->mDequeued++;
+    AHardwareBuffer_release(buf2);
+
+    ASSERT_EQ(0, mTracker->getCurDequeueable());
+    ASSERT_EQ(C2_TIMED_OUT, waitFence.wait(3000000000));
+
+    std::thread stopThread(&GraphicsTrackerTest::stopTrackerAfterUs, this, 500000);
+    ASSERT_EQ(C2_BAD_STATE, waitFence.wait(3000000000));
+
+    if (stopThread.joinable()) {
+        stopThread.join();
+    }
+}
+
+TEST_F(GraphicsTrackerTest, SurfaceChangeTest) {
+    uint32_t generation = 1;
+    const int maxDequeueCount = 10;
+
+    const int maxNumAlloc = 20;
+
+    const int firstPassAlloc = 12;
+    const int firstPassRender = 8;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new TestConsumerListener(mConsumer), 1, false));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+
+    ASSERT_EQ(C2_OK, mTracker->configureGraphics(mProducer, generation));
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+
+    int waitFd = -1;
+    ASSERT_EQ(C2_OK, mTracker->getWaitableFd(&waitFd));
+    C2Fence waitFence = _C2FenceFactory::CreatePipeFence(waitFd);
+
+    AHardwareBuffer *bufs[maxNumAlloc];
+    sp<Fence> fences[maxNumAlloc];
+
+    FrameQueue frameQueue;
+    std::thread queueThread(&GraphicsTrackerTest::queueBuffer, this, &frameQueue);
+    int numAlloc = 0;
+
+    for (int i = 0; i < firstPassRender; ++i) {
+        ASSERT_EQ(C2_OK, mTracker->allocate(
+                0, 0, 0, kTestUsageFlag, &bufs[i], &fences[i]));
+        mBqStat->mDequeued++;
+        numAlloc++;
+        ASSERT_EQ(true, frameQueue.queueItem(bufs[i], fences[i]));
+    }
+
+    while (numAlloc < firstPassAlloc) {
+        c2_status_t ret = mTracker->allocate(
+                0, 0, 0, kTestUsageFlag, &bufs[numAlloc], &fences[numAlloc]);
+        if (ret == C2_BLOCKING) {
+            mBqStat->mBlocked++;
+            c2_status_t waitRes = waitFence.wait(3000000000);
+            if (waitRes == C2_TIMED_OUT || waitRes == C2_OK) {
+                continue;
+            }
+            ALOGE("alloc wait failed: c2_err(%d)", waitRes);
+            break;
+        }
+        if (ret != C2_OK) {
+            ALOGE("alloc error: c2_err(%d)", ret);
+            break;
+        }
+        mBqStat->mDequeued++;
+        numAlloc++;
+    }
+    ASSERT_EQ(numAlloc, firstPassAlloc);
+
+    // switching surface
+    sp<IGraphicBufferProducer> oldProducer = mProducer;
+    sp<IGraphicBufferConsumer> oldConsumer = mConsumer;
+    mProducer.clear();
+    mConsumer.clear();
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+    ASSERT_TRUE((bool)mProducer && (bool)mConsumer);
+
+    generation += 1;
+
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new TestConsumerListener(mConsumer), 1, false));
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+    ASSERT_EQ(C2_OK, mTracker->configureGraphics(mProducer, generation));
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+
+    ASSERT_EQ(OK, oldProducer->disconnect(NATIVE_WINDOW_API_MEDIA));
+    oldProducer.clear();
+    oldConsumer.clear();
+
+    for (int i = firstPassRender ; i < firstPassAlloc; ++i) {
+        ASSERT_EQ(true, frameQueue.queueItem(bufs[i], fences[i]));
+    }
+
+    while (numAlloc < maxNumAlloc) {
+        AHardwareBuffer *buf;
+        sp<Fence> fence;
+        c2_status_t ret = mTracker->allocate(0, 0, 0, kTestUsageFlag, &buf, &fence);
+        if (ret == C2_BLOCKING) {
+            mBqStat->mBlocked++;
+            c2_status_t waitRes = waitFence.wait(3000000000);
+            if (waitRes == C2_TIMED_OUT || waitRes == C2_OK) {
+                continue;
+            }
+            ALOGE("alloc wait failed: c2_err(%d)", waitRes);
+            break;
+        }
+        if (ret != C2_OK) {
+            ALOGE("alloc error: c2_err(%d)", ret);
+            break;
+        }
+        mBqStat->mDequeued++;
+        if (!frameQueue.queueItem(buf, fence)) {
+            ALOGE("queue to render failed");
+            break;
+        }
+        ++numAlloc;
+    }
+
+    ASSERT_EQ(numAlloc, maxNumAlloc);
+
+    frameQueue.stop(true);
+    // Wait more than enough time(1 sec) to render all queued frames for sure.
+    ::usleep(1000000);
+
+    if (queueThread.joinable()) {
+        queueThread.join();
+    }
+    // mReleased should not be checked. IProducerListener::onBufferReleased()
+    // from the previous Surface could be missing after a new Surface was
+    // configured. Instead check # of dequeueable and queueBuffer() calls.
+    ASSERT_EQ(numAlloc, mBqStat->mQueued);
+    ASSERT_EQ(maxDequeueCount, mTracker->getCurDequeueable());
+
+    for (int i = 0; i < maxDequeueCount; ++i) {
+        AHardwareBuffer *buf;
+        sp<Fence> fence;
+
+        ASSERT_EQ(C2_OK, mTracker->allocate(
+                0, 0, 0, kTestUsageFlag, &buf, &fence));
+        AHardwareBuffer_release(buf);
+        mBqStat->mDequeued++;
+        numAlloc++;
+    }
+    ASSERT_EQ(C2_BLOCKING, mTracker->allocate(
+            0, 0, 0, kTestUsageFlag, &bufs[0], &fences[0]));
+}
+
+TEST_F(GraphicsTrackerTest, maxDequeueIncreaseTest) {
+    uint32_t generation = 1;
+    int maxDequeueCount = 10;
+    int dequeueIncrease = 4;
+
+    int numAlloc = 0;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new TestConsumerListener(mConsumer), 1, false));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+    ASSERT_EQ(C2_OK, mTracker->configureGraphics(mProducer, generation));
+
+    int waitFd = -1;
+    ASSERT_EQ(C2_OK, mTracker->getWaitableFd(&waitFd));
+    C2Fence waitFence = _C2FenceFactory::CreatePipeFence(waitFd);
+
+    AHardwareBuffer *buf;
+    sp<Fence> fence;
+    uint64_t bids[maxDequeueCount];
+    if (__builtin_available(android __ANDROID_API_T__, *)) {
+        for (int i = 0; i < maxDequeueCount; ++i) {
+            ASSERT_EQ(C2_OK, waitFence.wait(1000000000));
+            ASSERT_EQ(C2_OK, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+            ASSERT_EQ(OK, AHardwareBuffer_getId(buf, &bids[i]));
+            AHardwareBuffer_release(buf);
+            mBqStat->mDequeued++;
+            numAlloc++;
+        }
+    } else {
+        GTEST_SKIP();
+    }
+    ASSERT_EQ(C2_TIMED_OUT, waitFence.wait(1000000000));
+    ASSERT_EQ(C2_BLOCKING, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+
+    ASSERT_EQ(C2_OK, mTracker->deallocate(bids[0], Fence::NO_FENCE));
+    mBqStat->mDiscarded++;
+
+    maxDequeueCount += dequeueIncrease;
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+    for (int i = 0; i < dequeueIncrease + 1; ++i) {
+        ASSERT_EQ(C2_OK, waitFence.wait(1000000000));
+        ASSERT_EQ(C2_OK, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+        AHardwareBuffer_release(buf);
+        mBqStat->mDequeued++;
+        numAlloc++;
+    }
+    ASSERT_EQ(C2_TIMED_OUT, waitFence.wait(1000000000));
+    ASSERT_EQ(C2_BLOCKING, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+
+    ASSERT_EQ(C2_OK, mTracker->deallocate(bids[1], Fence::NO_FENCE));
+    mBqStat->mDiscarded++;
+
+    maxDequeueCount += dequeueIncrease;
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+    for (int i = 0; i < dequeueIncrease + 1; ++i) {
+        ASSERT_EQ(C2_OK, waitFence.wait(1000000000));
+        ASSERT_EQ(C2_OK, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+        AHardwareBuffer_release(buf);
+        mBqStat->mDequeued++;
+        numAlloc++;
+    }
+    ASSERT_EQ(C2_TIMED_OUT, waitFence.wait(1000000000));
+    ASSERT_EQ(C2_BLOCKING, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+}
+
+TEST_F(GraphicsTrackerTest, maxDequeueDecreaseTest) {
+    uint32_t generation = 1;
+    int maxDequeueCount = 12;
+    int dequeueDecrease = 4;
+
+    int numAlloc = 0;
+
+    ASSERT_TRUE(init(maxDequeueCount));
+    ASSERT_TRUE(configure(new TestProducerListener(mTracker, mBqStat, generation),
+                          new TestConsumerListener(mConsumer), 1, false));
+
+    ASSERT_EQ(OK, mProducer->setGenerationNumber(generation));
+    ASSERT_EQ(C2_OK, mTracker->configureGraphics(mProducer, generation));
+
+    int waitFd = -1;
+    ASSERT_EQ(C2_OK, mTracker->getWaitableFd(&waitFd));
+    C2Fence waitFence = _C2FenceFactory::CreatePipeFence(waitFd);
+
+    AHardwareBuffer *buf;
+    sp<Fence> fence;
+    uint64_t bids[maxDequeueCount];
+    if (__builtin_available(android __ANDROID_API_T__, *)) {
+        for (int i = 0; i < maxDequeueCount; ++i) {
+            ASSERT_EQ(C2_OK, waitFence.wait(1000000000));
+            ASSERT_EQ(C2_OK, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+            ASSERT_EQ(OK, AHardwareBuffer_getId(buf, &bids[i]));
+            AHardwareBuffer_release(buf);
+            mBqStat->mDequeued++;
+            numAlloc++;
+        }
+    } else {
+        GTEST_SKIP();
+    }
+    ASSERT_EQ(C2_TIMED_OUT, waitFence.wait(1000000000));
+    ASSERT_EQ(C2_BLOCKING, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+
+    int discardIdx = 0;
+    maxDequeueCount -= dequeueDecrease;
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+    for (int i = 0; i < dequeueDecrease + 1; ++i) {
+        ASSERT_EQ(C2_TIMED_OUT, waitFence.wait(1000000000));
+        ASSERT_EQ(C2_BLOCKING, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+        ASSERT_EQ(C2_OK, mTracker->deallocate(bids[discardIdx++], Fence::NO_FENCE));
+        mBqStat->mDiscarded++;
+    }
+    ASSERT_EQ(C2_OK, waitFence.wait(1000000000));
+    ASSERT_EQ(C2_OK, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+    mBqStat->mDequeued++;
+
+    ASSERT_EQ(C2_OK, mTracker->deallocate(bids[discardIdx++], Fence::NO_FENCE));
+    mBqStat->mDiscarded++;
+    ASSERT_EQ(C2_OK, mTracker->deallocate(bids[discardIdx++], Fence::NO_FENCE));
+    mBqStat->mDiscarded++;
+    maxDequeueCount -= dequeueDecrease;
+
+    ASSERT_EQ(C2_OK, mTracker->configureMaxDequeueCount(maxDequeueCount));
+    for (int i = 0; i < dequeueDecrease - 1; ++i) {
+        ASSERT_EQ(C2_TIMED_OUT, waitFence.wait(1000000000));
+        ASSERT_EQ(C2_BLOCKING, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+        ASSERT_EQ(C2_OK, mTracker->deallocate(bids[discardIdx++], Fence::NO_FENCE));
+        mBqStat->mDiscarded++;
+    }
+    ASSERT_EQ(C2_OK, waitFence.wait(1000000000));
+    ASSERT_EQ(C2_OK, mTracker->allocate( 0, 0, 0, kTestUsageFlag, &buf, &fence));
+    mBqStat->mDequeued++;
+}
diff --git a/media/codec2/vndk/C2Fence.cpp b/media/codec2/vndk/C2Fence.cpp
index 52ebe25..5d50fc3 100644
--- a/media/codec2/vndk/C2Fence.cpp
+++ b/media/codec2/vndk/C2Fence.cpp
@@ -335,7 +335,8 @@
             p.reset();
         }
     } else {
-        ALOGE("Create sync fence from invalid fd");
+        ALOGV("Create sync fence from invalid fd");
+        return C2Fence();
     }
     return C2Fence(p);
 }
@@ -531,7 +532,9 @@
             p = SyncFenceImpl::CreateFromNativeHandle(handle);
             break;
         default:
-            ALOGD("Unsupported fence type %d", type);
+            ALOGV("Unsupported fence type %d", type);
+            // If this is malformed-handle close the handle here.
+            (void) native_handle_close(handle);
             // return a null-fence in this case
             break;
     }
diff --git a/media/codec2/vndk/platform/C2IgbaBuffer.cpp b/media/codec2/vndk/platform/C2IgbaBuffer.cpp
index 2051e8f..eafdb22 100644
--- a/media/codec2/vndk/platform/C2IgbaBuffer.cpp
+++ b/media/codec2/vndk/platform/C2IgbaBuffer.cpp
@@ -193,7 +193,7 @@
             width, height, format, usage, kBlockingFetchTimeoutNs, &origId, block, &fence);
 
     if (res == C2_BLOCKING) {
-        return C2_TIMED_OUT;
+        return C2_BLOCKING;
     }
     if (res != C2_OK) {
         return res;