blob: 0848fc6827be5e2f6dca4667e0ed7d9e21374751 [file] [log] [blame]
Sungtak Leef075f712023-07-20 23:37:45 +00001/*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Sungtak Lee5bce4ca2023-10-12 03:13:04 +000016//#define LOG_NDEBUG 0
17#define LOG_TAG "GraphicsTracker"
Sungtak Leef6fe5f72023-09-17 23:53:50 +000018#include <fcntl.h>
19#include <unistd.h>
Sungtak Leef075f712023-07-20 23:37:45 +000020
21#include <media/stagefright/foundation/ADebug.h>
22#include <private/android/AHardwareBufferHelpers.h>
23#include <vndk/hardware_buffer.h>
24
Sungtak Lee72dfba62023-09-07 23:26:30 +000025#include <C2BlockInternal.h>
Sungtak Leef075f712023-07-20 23:37:45 +000026#include <codec2/aidl/GraphicsTracker.h>
27
28namespace aidl::android::hardware::media::c2::implementation {
29
30namespace {
31
Sungtak Leef6fe5f72023-09-17 23:53:50 +000032static constexpr int kMaxDequeueMin = 1;
33static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
34
Sungtak Leef075f712023-07-20 23:37:45 +000035c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
Sungtak Lee72dfba62023-09-07 23:26:30 +000036 std::shared_ptr<const _C2BlockPoolData> bpData = _C2BlockFactory::GetGraphicBlockPoolData(blk);
37 if (bpData->getType() != _C2BlockPoolData::TYPE_AHWBUFFER) {
38 return C2_BAD_VALUE;
39 }
40 if (__builtin_available(android __ANDROID_API_T__, *)) {
41 AHardwareBuffer *pBuf;
42 if (!_C2BlockFactory::GetAHardwareBuffer(bpData, &pBuf)) {
43 return C2_CORRUPTED;
44 }
45 int ret = AHardwareBuffer_getId(pBuf, bid);
46 if (ret != ::android::OK) {
47 return C2_CORRUPTED;
48 }
49 return C2_OK;
50 } else {
51 return C2_OMITTED;
52 }
Sungtak Leef075f712023-07-20 23:37:45 +000053}
54
55} // anonymous namespace
56
57GraphicsTracker::BufferItem::BufferItem(
58 uint32_t generation, int slot, const sp<GraphicBuffer>& buf, const sp<Fence>& fence) :
59 mInit{false}, mGeneration{generation}, mSlot{slot} {
60 if (!buf) {
61 return;
62 }
Sungtak Lee72dfba62023-09-07 23:26:30 +000063 if (__builtin_available(android __ANDROID_API_T__, *)) {
64 AHardwareBuffer *pBuf = AHardwareBuffer_from_GraphicBuffer(buf.get());
65 int ret = AHardwareBuffer_getId(pBuf, &mId);
66 if (ret != ::android::OK) {
67 return;
68 }
69 mUsage = buf->getUsage();
70 AHardwareBuffer_acquire(pBuf);
71 mBuf = pBuf;
72 mFence = fence;
73 mInit = true;
Sungtak Leef075f712023-07-20 23:37:45 +000074 }
Sungtak Leef075f712023-07-20 23:37:45 +000075}
76
77GraphicsTracker::BufferItem::BufferItem(
Sungtak Lee5bce4ca2023-10-12 03:13:04 +000078 uint32_t generation, AHardwareBuffer *pBuf, uint64_t usage) :
Sungtak Leef075f712023-07-20 23:37:45 +000079 mInit{true}, mGeneration{generation}, mSlot{-1},
Sungtak Lee5bce4ca2023-10-12 03:13:04 +000080 mBuf{pBuf}, mUsage{usage},
Sungtak Leef075f712023-07-20 23:37:45 +000081 mFence{Fence::NO_FENCE} {
Sungtak Lee5bce4ca2023-10-12 03:13:04 +000082 if (__builtin_available(android __ANDROID_API_T__, *)) {
83 int ret = AHardwareBuffer_getId(mBuf, &mId);
84 if (ret != ::android::OK) {
85 mInit = false;
86 mBuf = nullptr;
87 return;
88 }
89 }
90 AHardwareBuffer_acquire(mBuf);
Sungtak Leef075f712023-07-20 23:37:45 +000091}
92
93GraphicsTracker::BufferItem::~BufferItem() {
94 if (mInit) {
95 AHardwareBuffer_release(mBuf);
96 }
97}
98
Sungtak Lee5bce4ca2023-10-12 03:13:04 +000099
100std::shared_ptr<GraphicsTracker::BufferItem> GraphicsTracker::BufferItem::migrateBuffer(
Sungtak Leef075f712023-07-20 23:37:45 +0000101 uint64_t newUsage, uint32_t newGeneration) {
102 if (!mInit) {
103 return nullptr;
104 }
105 newUsage |= mUsage;
106 uint64_t ahbUsage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(newUsage);
107 AHardwareBuffer_Desc desc;
108 AHardwareBuffer_describe(mBuf, &desc);
109 // TODO: we need well-established buffer migration features from graphics.
110 // (b/273776738)
111 desc.usage = ahbUsage;
112 const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mBuf);
113 if (!handle) {
114 return nullptr;
115 }
116
117 AHardwareBuffer *newBuf;
118 int err = AHardwareBuffer_createFromHandle(&desc, handle,
119 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
120 &newBuf);
121 if (err != ::android::NO_ERROR) {
122 return nullptr;
123 }
124
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000125 std::shared_ptr<BufferItem> newBuffer =
126 std::make_shared<BufferItem>(newGeneration, newBuf, newUsage);
127 AHardwareBuffer_release(newBuf);
128 return newBuffer;
129}
130
131sp<GraphicBuffer> GraphicsTracker::BufferItem::getGraphicBuffer() {
132 if (!mInit) {
Sungtak Leef075f712023-07-20 23:37:45 +0000133 return nullptr;
134 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000135 GraphicBuffer *gb = ::android::AHardwareBuffer_to_GraphicBuffer(mBuf);
136 if (!gb) {
137 return nullptr;
138 }
139 gb->setGenerationNumber(mGeneration);
Sungtak Leef075f712023-07-20 23:37:45 +0000140 return gb;
141}
142
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000143GraphicsTracker::BufferCache::~BufferCache() {
144 ALOGV("BufferCache destruction: generation(%d), igbp(%d)", mGeneration, (bool)mIgbp);
145}
146
Sungtak Leef075f712023-07-20 23:37:45 +0000147void GraphicsTracker::BufferCache::waitOnSlot(int slot) {
148 // TODO: log
149 CHECK(0 <= slot && slot < kNumSlots);
150 BlockedSlot *p = &mBlockedSlots[slot];
151 std::unique_lock<std::mutex> l(p->l);
152 while (p->blocked) {
153 p->cv.wait(l);
154 }
155}
156
157void GraphicsTracker::BufferCache::blockSlot(int slot) {
158 CHECK(0 <= slot && slot < kNumSlots);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000159 ALOGV("block slot %d", slot);
Sungtak Leef075f712023-07-20 23:37:45 +0000160 BlockedSlot *p = &mBlockedSlots[slot];
161 std::unique_lock<std::mutex> l(p->l);
162 p->blocked = true;
163}
164
165void GraphicsTracker::BufferCache::unblockSlot(int slot) {
166 CHECK(0 <= slot && slot < kNumSlots);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000167 ALOGV("unblock slot %d", slot);
Sungtak Leef075f712023-07-20 23:37:45 +0000168 BlockedSlot *p = &mBlockedSlots[slot];
169 std::unique_lock<std::mutex> l(p->l);
170 p->blocked = false;
171 l.unlock();
172 p->cv.notify_one();
173}
174
175GraphicsTracker::GraphicsTracker(int maxDequeueCount)
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000176 : mBufferCache(new BufferCache()), mMaxDequeue{maxDequeueCount},
177 mMaxDequeueRequested{maxDequeueCount},
Sungtak Leef075f712023-07-20 23:37:45 +0000178 mMaxDequeueCommitted{maxDequeueCount},
179 mMaxDequeueRequestedSeqId{0UL}, mMaxDequeueCommittedSeqId{0ULL},
180 mDequeueable{maxDequeueCount},
181 mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
182 mInConfig{false}, mStopped{false} {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000183 if (maxDequeueCount < kMaxDequeueMin) {
184 mMaxDequeue = kMaxDequeueMin;
185 mMaxDequeueRequested = kMaxDequeueMin;
186 mMaxDequeueCommitted = kMaxDequeueMin;
187 mDequeueable = kMaxDequeueMin;
188 } else if(maxDequeueCount > kMaxDequeueMax) {
189 mMaxDequeue = kMaxDequeueMax;
190 mMaxDequeueRequested = kMaxDequeueMax;
191 mMaxDequeueCommitted = kMaxDequeueMax;
192 mDequeueable = kMaxDequeueMax;
Sungtak Leef075f712023-07-20 23:37:45 +0000193 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000194 int pipefd[2] = { -1, -1};
195 int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
Sungtak Leef075f712023-07-20 23:37:45 +0000196
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000197 mReadPipeFd.reset(pipefd[0]);
198 mWritePipeFd.reset(pipefd[1]);
Sungtak Leef075f712023-07-20 23:37:45 +0000199
200 mEventQueueThread = std::thread([this](){processEvent();});
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000201 writeIncDequeueable(mDequeueable);
Sungtak Leef075f712023-07-20 23:37:45 +0000202
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000203 CHECK(ret >= 0);
Sungtak Leef075f712023-07-20 23:37:45 +0000204 CHECK(mEventQueueThread.joinable());
205}
206
207GraphicsTracker::~GraphicsTracker() {
208 stop();
209 if (mEventQueueThread.joinable()) {
Sungtak Leef075f712023-07-20 23:37:45 +0000210 mEventQueueThread.join();
211 }
212}
213
214bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
215 // TODO: can't we adjust during config? not committing it may safe?
216 *updateDequeue = false;
217 if (!mInConfig && mMaxDequeueRequested < mMaxDequeue) {
218 int delta = mMaxDequeue - mMaxDequeueRequested;
219 // Since we are supposed to increase mDequeuable by one already
220 int adjustable = mDequeueable + 1;
221 if (adjustable >= delta) {
222 mMaxDequeue = mMaxDequeueRequested;
223 mDequeueable -= (delta - 1);
224 } else {
225 mMaxDequeue -= adjustable;
226 mDequeueable = 0;
227 }
228 if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
229 *updateDequeue = true;
230 }
231 return true;
232 }
233 return false;
234}
235
236c2_status_t GraphicsTracker::configureGraphics(
237 const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
238 std::shared_ptr<BufferCache> prevCache;
239 int prevDequeueCommitted;
240
241 std::unique_lock<std::mutex> cl(mConfigLock);
242 {
243 std::unique_lock<std::mutex> l(mLock);
244 mInConfig = true;
245 prevCache = mBufferCache;
246 prevDequeueCommitted = mMaxDequeueCommitted;
247 }
248 // NOTE: Switching to the same surface is blocked from MediaCodec.
249 // Switching to the same surface might not work if tried, since disconnect()
250 // to the old surface in MediaCodec and allocate from the new surface from
251 // GraphicsTracker cannot be synchronized properly.
252 uint64_t bqId{0ULL};
253 ::android::status_t ret = ::android::OK;
254 if (igbp) {
255 ret = igbp->getUniqueId(&bqId);
256 }
257 if (ret != ::android::OK || prevCache->mGeneration == generation || prevCache->mBqId == bqId) {
258 return C2_BAD_VALUE;
259 }
260 ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
261 if (ret != ::android::OK) {
262 // TODO: sort out the error from igbp and return an error accordingly.
263 return C2_CORRUPTED;
264 }
265 std::shared_ptr<BufferCache> newCache = std::make_shared<BufferCache>(bqId, generation, igbp);
266 {
267 std::unique_lock<std::mutex> l(mLock);
268 mInConfig = false;
269 mBufferCache = newCache;
270 }
271 return C2_OK;
272}
273
274c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
275 std::shared_ptr<BufferCache> cache;
276
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000277 if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
278 ALOGE("max dequeue count %d is not valid", maxDequeueCount);
279 return C2_BAD_VALUE;
280 }
281
Sungtak Leef075f712023-07-20 23:37:45 +0000282 // max dequeue count which can be committed to IGBP.
283 // (Sometimes maxDequeueCount cannot be committed if the number of
284 // dequeued buffer count is bigger.)
285 int maxDequeueToCommit;
286 // max dequeue count which is committed to IGBP currently
287 // (actually mMaxDequeueCommitted, but needs to be read outside lock.)
288 int curMaxDequeueCommitted;
289 std::unique_lock<std::mutex> cl(mConfigLock);
290 {
291 std::unique_lock<std::mutex> l(mLock);
292 if (mMaxDequeueRequested == maxDequeueCount) {
293 return C2_OK;
294 }
295 mInConfig = true;
296 mMaxDequeueRequested = maxDequeueCount;
297 cache = mBufferCache;
298 curMaxDequeueCommitted = mMaxDequeueCommitted;
299 if (mMaxDequeue <= maxDequeueCount) {
300 maxDequeueToCommit = maxDequeueCount;
301 } else {
302 // Since mDequeuable is decreasing,
303 // a delievered ready to allocate event may not be fulfilled.
304 // Another waiting via a waitable object may be necessary in the case.
305 int delta = mMaxDequeue - maxDequeueCount;
306 if (delta <= mDequeueable) {
307 maxDequeueToCommit = maxDequeueCount;
308 mDequeueable -= delta;
309 } else {
310 maxDequeueToCommit = mMaxDequeue - mDequeueable;
311 mDequeueable = 0;
312 }
313 }
314 }
315
316 bool committed = true;
317 if (cache->mIgbp && maxDequeueToCommit != curMaxDequeueCommitted) {
318 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
319 committed = (ret == ::android::OK);
320 if (!committed) {
321 // This should not happen.
322 ALOGE("dequeueCount failed with error(%d)", (int)ret);
323 }
324 }
325
326 {
327 std::unique_lock<std::mutex> l(mLock);
328 mInConfig = false;
329 if (committed) {
330 mMaxDequeueCommitted = maxDequeueToCommit;
331 int delta = mMaxDequeueCommitted - mMaxDequeue;
332 if (delta > 0) {
333 mDequeueable += delta;
334 l.unlock();
335 writeIncDequeueable(delta);
336 }
337 }
338 }
339
340 if (!committed) {
341 return C2_CORRUPTED;
342 }
343 return C2_OK;
344}
345
346void GraphicsTracker::updateDequeueConf() {
347 std::shared_ptr<BufferCache> cache;
348 int dequeueCommit;
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000349 ALOGV("trying to update max dequeue count");
Sungtak Leef075f712023-07-20 23:37:45 +0000350 std::unique_lock<std::mutex> cl(mConfigLock);
351 {
352 std::unique_lock<std::mutex> l(mLock);
353 if (mMaxDequeue == mMaxDequeueRequested && mMaxDequeueCommitted != mMaxDequeueRequested) {
354 dequeueCommit = mMaxDequeue;
355 mInConfig = true;
356 cache = mBufferCache;
357 } else {
358 return;
359 }
360 }
361 bool committed = true;
362 if (cache->mIgbp) {
363 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
364 committed = (ret == ::android::OK);
365 if (!committed) {
366 // This should not happen.
367 ALOGE("dequeueCount failed with error(%d)", (int)ret);
368 }
369 }
370 int cleared = 0;
371 {
372 // cache == mCache here, since we locked config.
373 std::unique_lock<std::mutex> l(mLock);
374 mInConfig = false;
375 if (committed) {
376 if (cache->mIgbp && dequeueCommit < mMaxDequeueCommitted) {
377 // we are shrinking # of buffers, so clearing the cache.
378 for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
379 uint64_t bid = it->second->mId;
380 if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
381 ++cleared;
382 it = cache->mBuffers.erase(it);
383 } else {
384 ++it;
385 }
386 }
387 }
388 mMaxDequeueCommitted = dequeueCommit;
389 }
390 }
391 if (cleared > 0) {
392 ALOGD("%d buffers are cleared from cache, due to IGBP capacity change", cleared);
393 }
394
395}
396
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000397int GraphicsTracker::getCurDequeueable() {
398 std::unique_lock<std::mutex> l(mLock);
399 return mDequeueable;
400}
401
Sungtak Leef075f712023-07-20 23:37:45 +0000402void GraphicsTracker::stop() {
403 bool expected = false;
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000404 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef075f712023-07-20 23:37:45 +0000405 bool updated = mStopped.compare_exchange_strong(expected, true);
406 if (updated) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000407 int writeFd = mWritePipeFd.release();
408 ::close(writeFd);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000409 int readFd = mReadPipeFd.release();
410 ::close(readFd);
411 mEventCv.notify_one();
Sungtak Leef075f712023-07-20 23:37:45 +0000412 }
413}
414
415void GraphicsTracker::writeIncDequeueable(int inc) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000416 CHECK(inc > 0 && inc < kMaxDequeueMax);
417 thread_local char buf[kMaxDequeueMax];
418 int diff = 0;
419 {
Sungtak Leef075f712023-07-20 23:37:45 +0000420 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000421 if (mStopped) {
422 return;
423 }
424 CHECK(mWritePipeFd.get() >= 0);
425 int ret = ::write(mWritePipeFd.get(), buf, inc);
426 if (ret == inc) {
427 return;
428 }
429 diff = ret < 0 ? inc : inc - ret;
430
431 // Partial write or EINTR. This will not happen in a real scenario.
432 mIncDequeueable += diff;
433 if (mIncDequeueable > 0) {
434 l.unlock();
435 mEventCv.notify_one();
436 ALOGW("updating dequeueable to pipefd pending");
437 }
Sungtak Leef075f712023-07-20 23:37:45 +0000438 }
439}
440
441void GraphicsTracker::processEvent() {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000442 // This is for partial/failed writes to the writing end.
443 // This may not happen in the real scenario.
444 thread_local char buf[kMaxDequeueMax];
Sungtak Leef075f712023-07-20 23:37:45 +0000445 while (true) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000446 std::unique_lock<std::mutex> l(mEventLock);
447 if (mStopped) {
448 break;
449 }
450 if (mIncDequeueable > 0) {
451 int inc = mIncDequeueable > kMaxDequeueMax ? kMaxDequeueMax : mIncDequeueable;
452 int ret = ::write(mWritePipeFd.get(), buf, inc);
453 int written = ret <= 0 ? 0 : ret;
454 mIncDequeueable -= written;
455 if (mIncDequeueable > 0) {
456 l.unlock();
457 if (ret < 0) {
458 ALOGE("write to writing end failed %d", errno);
459 } else {
460 ALOGW("partial write %d(%d)", inc, written);
Sungtak Leef075f712023-07-20 23:37:45 +0000461 }
Sungtak Leef075f712023-07-20 23:37:45 +0000462 continue;
463 }
464 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000465 mEventCv.wait(l);
Sungtak Leef075f712023-07-20 23:37:45 +0000466 }
467}
468
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000469c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
470 *pipeFd = ::dup(mReadPipeFd.get());
471 if (*pipeFd < 0) {
472 if (mReadPipeFd.get() < 0) {
473 return C2_BAD_STATE;
Sungtak Leef075f712023-07-20 23:37:45 +0000474 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000475 // dup error
476 ALOGE("dup() for the reading end failed %d", errno);
Sungtak Leef075f712023-07-20 23:37:45 +0000477 return C2_NO_MEMORY;
478 }
479 return C2_OK;
480}
481
482c2_status_t GraphicsTracker::requestAllocate(std::shared_ptr<BufferCache> *cache) {
483 std::lock_guard<std::mutex> l(mLock);
484 if (mDequeueable > 0) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000485 char buf[1];
486 int ret = ::read(mReadPipeFd.get(), buf, 1);
Sungtak Leef075f712023-07-20 23:37:45 +0000487 if (ret < 0) {
488 if (errno == EINTR) {
489 // Do we really need to care for cancel due to signal handling?
490 return C2_CANCELED;
491 }
492 if (errno == EAGAIN) {
493 // proper usage of waitable object should not return this.
494 // but there could be alloc requests from HAL ignoring the internal status.
495 return C2_BLOCKING;
496 }
497 CHECK(errno != 0);
498 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000499 if (ret == 0) {
500 // writing end is closed
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000501 ALOGE("writing end for the waitable object seems to be closed");
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000502 return C2_BAD_STATE;
503 }
Sungtak Leef075f712023-07-20 23:37:45 +0000504 mDequeueable--;
505 *cache = mBufferCache;
506 return C2_OK;
507 }
508 return C2_BLOCKING;
509}
510
511// If {@code cached} is {@code true}, {@code pBuffer} should be read from the
512// current cached status. Otherwise, {@code pBuffer} should be written to
513// current caches status.
514void GraphicsTracker::commitAllocate(c2_status_t res, const std::shared_ptr<BufferCache> &cache,
515 bool cached, int slot, const sp<Fence> &fence,
516 std::shared_ptr<BufferItem> *pBuffer, bool *updateDequeue) {
517 std::unique_lock<std::mutex> l(mLock);
518 if (res == C2_OK) {
519 if (cached) {
520 auto it = cache->mBuffers.find(slot);
521 CHECK(it != cache->mBuffers.end());
522 it->second->mFence = fence;
523 *pBuffer = it->second;
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000524 ALOGV("an allocated buffer already cached, updated Fence");
Sungtak Leef075f712023-07-20 23:37:45 +0000525 } else if (cache.get() == mBufferCache.get() && mBufferCache->mIgbp) {
526 // Cache the buffer if it is allocated from the current IGBP
527 CHECK(slot >= 0);
528 auto ret = mBufferCache->mBuffers.emplace(slot, *pBuffer);
529 if (!ret.second) {
530 ret.first->second = *pBuffer;
531 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000532 ALOGV("an allocated buffer not cached from the current IGBP");
Sungtak Leef075f712023-07-20 23:37:45 +0000533 }
534 uint64_t bid = (*pBuffer)->mId;
535 auto mapRet = mDequeued.emplace(bid, *pBuffer);
536 CHECK(mapRet.second);
537 } else {
538 if (adjustDequeueConfLocked(updateDequeue)) {
539 return;
540 }
541 mDequeueable++;
542 l.unlock();
543 writeIncDequeueable(1);
544 }
545}
546
547
548// if a buffer is newly allocated, {@code cached} is {@code false},
549// and the buffer is in the {@code buffer}
550// otherwise, {@code cached} is {@code false} and the buffer should be
551// retrieved by commitAllocate();
552c2_status_t GraphicsTracker::_allocate(const std::shared_ptr<BufferCache> &cache,
553 uint32_t width, uint32_t height, PixelFormat format,
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000554 uint64_t usage,
Sungtak Leef075f712023-07-20 23:37:45 +0000555 bool *cached,
556 int *rSlotId,
557 sp<Fence> *rFence,
558 std::shared_ptr<BufferItem> *buffer) {
559 ::android::sp<IGraphicBufferProducer> igbp = cache->mIgbp;
560 uint32_t generation = cache->mGeneration;
561 if (!igbp) {
562 // allocate directly
563 AHardwareBuffer_Desc desc;
564 desc.width = width;
565 desc.height = height;
566 desc.layers = 1u;
567 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
568 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
569 desc.rfu0 = 0;
570 desc.rfu1 = 0;
571
572 AHardwareBuffer *buf;
573 int ret = AHardwareBuffer_allocate(&desc, &buf);
574 if (ret != ::android::OK) {
575 ALOGE("direct allocation of AHB failed(%d)", ret);
576 return ret == ::android::NO_MEMORY ? C2_NO_MEMORY : C2_CORRUPTED;
577 }
578 *cached = false;
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000579 *rSlotId = -1;
580 *rFence = Fence::NO_FENCE;
581 *buffer = std::make_shared<BufferItem>(generation, buf, usage);
582 AHardwareBuffer_release(buf); // remove an acquire count from
583 // AHwb_allocate().
Sungtak Leef075f712023-07-20 23:37:45 +0000584 if (!*buffer) {
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000585 ALOGE("direct allocation of AHB successful, but failed to create BufferItem");
Sungtak Leef075f712023-07-20 23:37:45 +0000586 return C2_NO_MEMORY;
587 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000588 if (!(*buffer)->mInit) {
589 ALOGE("direct allocation of AHB successful, but BufferItem init failed");
590 buffer->reset();
591 return C2_CORRUPTED;
592 }
593 ALOGV("allocate: direct allocate without igbp");
Sungtak Leef075f712023-07-20 23:37:45 +0000594 return C2_OK;
595 }
596
597 int slotId;
598 uint64_t outBufferAge;
599 ::android::FrameEventHistoryDelta outTimestamps;
600 sp<Fence> fence;
601
602 ::android::status_t status = igbp->dequeueBuffer(
603 &slotId, &fence, width, height, format, usage, &outBufferAge, &outTimestamps);
604 if (status < ::android::OK) {
605 ALOGE("dequeueBuffer() error %d", (int)status);
606 return C2_CORRUPTED;
607 }
608 cache->waitOnSlot(slotId);
609 bool exists = false;
610 {
611 std::unique_lock<std::mutex> l(mLock);
612 if (cache.get() == mBufferCache.get() &&
613 cache->mBuffers.find(slotId) != cache->mBuffers.end()) {
614 exists = true;
615 }
616 }
617 bool needsRealloc = status & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
618 if (needsRealloc || !exists) {
619 sp<GraphicBuffer> realloced;
620 status = igbp->requestBuffer(slotId, &realloced);
621 if (status != ::android::OK) {
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000622 ALOGE("allocate by dequeueBuffer() successful, but requestBuffer() failed %d",
623 status);
Sungtak Leef075f712023-07-20 23:37:45 +0000624 igbp->cancelBuffer(slotId, fence);
625 return C2_CORRUPTED;
626 }
627 *buffer = std::make_shared<BufferItem>(generation, slotId, realloced, fence);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000628 if (!*buffer) {
629 ALOGE("allocate by dequeueBuffer() successful, but creating BufferItem failed");
630 igbp->cancelBuffer(slotId, fence);
631 return C2_NO_MEMORY;
632 }
Sungtak Leef075f712023-07-20 23:37:45 +0000633 if (!(*buffer)->mInit) {
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000634 ALOGE("allocate by dequeueBuffer() successful, but BufferItem init failed");
Sungtak Leef075f712023-07-20 23:37:45 +0000635 buffer->reset();
636 igbp->cancelBuffer(slotId, fence);
637 return C2_CORRUPTED;
638 }
639 *cached = false;
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000640 } else {
641 *cached = true;
Sungtak Leef075f712023-07-20 23:37:45 +0000642 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000643 ALOGV("allocate: a new allocated buffer from igbp cached %d, slot: %d",
644 *cached, slotId);
Sungtak Leef075f712023-07-20 23:37:45 +0000645 *rSlotId = slotId;
646 *rFence = fence;
647 return C2_OK;
648}
649
650c2_status_t GraphicsTracker::allocate(
651 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
652 AHardwareBuffer **buf, sp<Fence> *rFence) {
653 if (mStopped.load() == true) {
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000654 ALOGE("cannot allocate due to being stopped");
Sungtak Leef075f712023-07-20 23:37:45 +0000655 return C2_BAD_STATE;
656 }
657 std::shared_ptr<BufferCache> cache;
658 c2_status_t res = requestAllocate(&cache);
659 if (res != C2_OK) {
660 return res;
661 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000662 ALOGV("allocatable or dequeueable");
Sungtak Leef075f712023-07-20 23:37:45 +0000663
664 bool cached = false;
665 int slotId;
666 sp<Fence> fence;
667 std::shared_ptr<BufferItem> buffer;
668 bool updateDequeue;
669 res = _allocate(cache, width, height, format, usage, &cached, &slotId, &fence, &buffer);
670 commitAllocate(res, cache, cached, slotId, fence, &buffer, &updateDequeue);
671 if (res == C2_OK) {
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000672 ALOGV("allocated a buffer width:%u height:%u pixelformat:%d usage:%llu",
673 width, height, format, (unsigned long long)usage);
Sungtak Leef075f712023-07-20 23:37:45 +0000674 *buf = buffer->mBuf;
675 *rFence = buffer->mFence;
676 // *buf should be valid even if buffer is dtor-ed.
677 AHardwareBuffer_acquire(*buf);
678 }
679 if (updateDequeue) {
680 updateDequeueConf();
681 }
682 return res;
683}
684
685c2_status_t GraphicsTracker::requestDeallocate(uint64_t bid, const sp<Fence> &fence,
686 bool *completed, bool *updateDequeue,
687 std::shared_ptr<BufferCache> *cache, int *slotId,
688 sp<Fence> *rFence) {
689 std::unique_lock<std::mutex> l(mLock);
690 if (mDeallocating.find(bid) != mDeallocating.end()) {
691 ALOGE("Tries to deallocate a buffer which is already deallocating or rendering");
692 return C2_DUPLICATE;
693 }
694 auto it = mDequeued.find(bid);
695 if (it == mDequeued.end()) {
696 ALOGE("Tried to deallocate non dequeued buffer");
697 return C2_NOT_FOUND;
698 }
699
700 std::shared_ptr<BufferItem> buffer = it->second;
701 if (buffer->mGeneration == mBufferCache->mGeneration && mBufferCache->mIgbp) {
702 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
703 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
704 *cache = mBufferCache;
705 *slotId = buffer->mSlot;
706 *rFence = ( fence == Fence::NO_FENCE) ? buffer->mFence : fence;
707 // mark this deallocating
708 mDeallocating.emplace(bid);
709 mBufferCache->blockSlot(buffer->mSlot);
710 *completed = false;
711 } else { // buffer is not from the current underlying Graphics.
712 mDequeued.erase(bid);
713 *completed = true;
714 if (adjustDequeueConfLocked(updateDequeue)) {
715 return C2_OK;
716 }
717 mDequeueable++;
718 l.unlock();
719 writeIncDequeueable(1);
720 }
721 return C2_OK;
722}
723
724void GraphicsTracker::commitDeallocate(
725 std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid) {
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000726 std::unique_lock<std::mutex> l(mLock);
Sungtak Leef075f712023-07-20 23:37:45 +0000727 size_t del1 = mDequeued.erase(bid);
728 size_t del2 = mDeallocating.erase(bid);
729 CHECK(del1 > 0 && del2 > 0);
Sungtak Leef075f712023-07-20 23:37:45 +0000730 if (cache) {
731 cache->unblockSlot(slotId);
732 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000733 mDequeueable++;
734 l.unlock();
735 writeIncDequeueable(1);
Sungtak Leef075f712023-07-20 23:37:45 +0000736}
737
738
739c2_status_t GraphicsTracker::deallocate(uint64_t bid, const sp<Fence> &fence) {
740 bool completed;
741 bool updateDequeue;
742 std::shared_ptr<BufferCache> cache;
743 int slotId;
744 sp<Fence> rFence;
745 c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
746 &cache, &slotId, &rFence);
747 if (res != C2_OK) {
748 return res;
749 }
750 if (completed == true) {
751 if (updateDequeue) {
752 updateDequeueConf();
753 }
754 return C2_OK;
755 }
756
757 // ignore return value since IGBP could be already stale.
758 // cache->mIgbp is not null, if completed is false.
759 (void)cache->mIgbp->cancelBuffer(slotId, rFence);
760
761 commitDeallocate(cache, slotId, bid);
762 return C2_OK;
763}
764
765c2_status_t GraphicsTracker::requestRender(uint64_t bid, std::shared_ptr<BufferCache> *cache,
766 std::shared_ptr<BufferItem> *pBuffer,
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000767 bool *fromCache,
Sungtak Leef075f712023-07-20 23:37:45 +0000768 bool *updateDequeue) {
769 std::unique_lock<std::mutex> l(mLock);
770 if (mDeallocating.find(bid) != mDeallocating.end()) {
771 ALOGE("Tries to render a buffer which is already deallocating or rendering");
772 return C2_DUPLICATE;
773 }
774 auto it = mDequeued.find(bid);
775 if (it == mDequeued.end()) {
776 ALOGE("Tried to render non dequeued buffer");
777 return C2_NOT_FOUND;
778 }
779 if (!mBufferCache->mIgbp) {
780 // Render requested without surface.
781 // reclaim the buffer for dequeue.
782 // TODO: is this correct for API wise?
783 mDequeued.erase(it);
784 if (adjustDequeueConfLocked(updateDequeue)) {
785 return C2_BAD_STATE;
786 }
787 mDequeueable++;
788 l.unlock();
789 writeIncDequeueable(1);
790 return C2_BAD_STATE;
791 }
792 std::shared_ptr<BufferItem> buffer = it->second;
793 *cache = mBufferCache;
794 if (buffer->mGeneration == mBufferCache->mGeneration) {
795 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
796 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
797 mBufferCache->blockSlot(buffer->mSlot);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000798 *fromCache = true;
799 } else {
800 *fromCache = false;
Sungtak Leef075f712023-07-20 23:37:45 +0000801 }
802 *pBuffer = buffer;
803 mDeallocating.emplace(bid);
804 return C2_OK;
805}
806
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000807void GraphicsTracker::commitRender(const std::shared_ptr<BufferCache> &cache,
Sungtak Leef075f712023-07-20 23:37:45 +0000808 const std::shared_ptr<BufferItem> &buffer,
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000809 const std::shared_ptr<BufferItem> &oldBuffer,
810 bool bufferReplaced,
Sungtak Leef075f712023-07-20 23:37:45 +0000811 bool *updateDequeue) {
812 std::unique_lock<std::mutex> l(mLock);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000813 uint64_t origBid = oldBuffer ? oldBuffer->mId : buffer->mId;
Sungtak Leef075f712023-07-20 23:37:45 +0000814
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000815 if (cache) {
816 cache->unblockSlot(buffer->mSlot);
817 if (oldBuffer) {
818 // migrated, register the new buffer to the cache.
819 cache->mBuffers.emplace(buffer->mSlot, buffer);
820 }
821 }
822 mDeallocating.erase(origBid);
823 mDequeued.erase(origBid);
824
825 if (cache.get() != mBufferCache.get() || bufferReplaced) {
Sungtak Leef075f712023-07-20 23:37:45 +0000826 // Surface changed, no need to wait for buffer being released.
Sungtak Leef075f712023-07-20 23:37:45 +0000827 if (adjustDequeueConfLocked(updateDequeue)) {
828 return;
829 }
830 mDequeueable++;
831 l.unlock();
832 writeIncDequeueable(1);
833 return;
834 }
Sungtak Leef075f712023-07-20 23:37:45 +0000835}
836
837c2_status_t GraphicsTracker::render(const C2ConstGraphicBlock& blk,
838 const IGraphicBufferProducer::QueueBufferInput &input,
839 IGraphicBufferProducer::QueueBufferOutput *output) {
840 uint64_t bid;
841 c2_status_t res = retrieveAHardwareBufferId(blk, &bid);
842 if (res != C2_OK) {
843 ALOGE("retrieving AHB-ID for GraphicBlock failed");
844 return C2_CORRUPTED;
845 }
846 std::shared_ptr<BufferCache> cache;
847 std::shared_ptr<BufferItem> buffer;
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000848 std::shared_ptr<BufferItem> oldBuffer;
Sungtak Leef075f712023-07-20 23:37:45 +0000849 bool updateDequeue = false;
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000850 bool fromCache = false;
851 res = requestRender(bid, &cache, &buffer, &fromCache, &updateDequeue);
Sungtak Leef075f712023-07-20 23:37:45 +0000852 if (res != C2_OK) {
853 if (updateDequeue) {
854 updateDequeueConf();
855 }
856 return res;
857 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000858 int cacheSlotId = fromCache ? buffer->mSlot : -1;
859 ALOGV("render prepared: igbp(%d) slot(%d)", bool(cache->mIgbp), cacheSlotId);
860 if (!fromCache) {
861 // The buffer does not come from the current cache.
862 // The buffer is needed to be migrated(attached).
Sungtak Leef075f712023-07-20 23:37:45 +0000863 uint64_t newUsage = 0ULL;
Sungtak Leef075f712023-07-20 23:37:45 +0000864
865 (void) cache->mIgbp->getConsumerUsage(&newUsage);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000866 std::shared_ptr<BufferItem> newBuffer =
867 buffer->migrateBuffer(newUsage, cache->mGeneration);
868 sp<GraphicBuffer> gb = newBuffer ? newBuffer->getGraphicBuffer() : nullptr;
869
870 if (!gb) {
871 ALOGE("render: realloc-ing a new buffer for migration failed");
Sungtak Leef075f712023-07-20 23:37:45 +0000872 std::shared_ptr<BufferCache> nullCache;
873 commitDeallocate(nullCache, -1, bid);
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000874 return C2_REFUSED;
Sungtak Leef075f712023-07-20 23:37:45 +0000875 }
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000876 if (cache->mIgbp->attachBuffer(&(newBuffer->mSlot), gb) != ::android::OK) {
877 ALOGE("render: attaching a new buffer to IGBP failed");
878 std::shared_ptr<BufferCache> nullCache;
879 commitDeallocate(nullCache, -1, bid);
880 return C2_REFUSED;
881 }
882 cache->waitOnSlot(newBuffer->mSlot);
883 cache->blockSlot(newBuffer->mSlot);
884 oldBuffer = buffer;
885 buffer = newBuffer;
886 }
887 ::android::status_t renderRes = cache->mIgbp->queueBuffer(buffer->mSlot, input, output);
888 ALOGV("render done: migration(%d), render(err = %d)", !fromCache, renderRes);
889 if (renderRes != ::android::OK) {
890 CHECK(renderRes != ::android::BAD_VALUE);
891 ALOGE("render: failed to queueBuffer() err = %d", renderRes);
892 (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
893 commitDeallocate(cache, buffer->mSlot, bid);
Sungtak Leef075f712023-07-20 23:37:45 +0000894 return C2_REFUSED;
895 }
896
897 updateDequeue = false;
Sungtak Lee5bce4ca2023-10-12 03:13:04 +0000898 commitRender(cache, buffer, oldBuffer, output->bufferReplaced, &updateDequeue);
Sungtak Leef075f712023-07-20 23:37:45 +0000899 if (updateDequeue) {
900 updateDequeueConf();
901 }
Sungtak Leef075f712023-07-20 23:37:45 +0000902 return C2_OK;
903}
904
905void GraphicsTracker::onReleased(uint32_t generation) {
906 bool updateDequeue = false;
907 {
908 std::unique_lock<std::mutex> l(mLock);
909 if (mBufferCache->mGeneration == generation) {
910 if (!adjustDequeueConfLocked(&updateDequeue)) {
911 mDequeueable++;
912 l.unlock();
913 writeIncDequeueable(1);
914 }
915 }
916 }
917 if (updateDequeue) {
918 updateDequeueConf();
919 }
920}
921
922} // namespace aidl::android::hardware::media::c2::implementation