blob: 5a2cb86136d86f13c9ae778ae5286dcc3d5e31e6 [file] [log] [blame]
Sungtak Leef075f712023-07-20 23:37:45 +00001/*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#include <sys/eventfd.h>
17
18#include <media/stagefright/foundation/ADebug.h>
19#include <private/android/AHardwareBufferHelpers.h>
20#include <vndk/hardware_buffer.h>
21
22#include <codec2/aidl/GraphicsTracker.h>
23
24namespace aidl::android::hardware::media::c2::implementation {
25
26namespace {
27
28c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
29 // TODO
30 (void)blk;
31 (void)bid;
32 return C2_OK;
33}
34
35} // anonymous namespace
36
37GraphicsTracker::BufferItem::BufferItem(
38 uint32_t generation, int slot, const sp<GraphicBuffer>& buf, const sp<Fence>& fence) :
39 mInit{false}, mGeneration{generation}, mSlot{slot} {
40 if (!buf) {
41 return;
42 }
43 AHardwareBuffer *pBuf = AHardwareBuffer_from_GraphicBuffer(buf.get());
44 int ret = AHardwareBuffer_getId(pBuf, &mId);
45 if (ret != ::android::OK) {
46 return;
47 }
48 mUsage = buf->getUsage();
49 AHardwareBuffer_acquire(pBuf);
50 mBuf = pBuf;
51 mFence = fence;
52 mInit = true;
53}
54
55GraphicsTracker::BufferItem::BufferItem(
56 uint32_t generation,
57 AHardwareBuffer_Desc *desc, AHardwareBuffer *pBuf) :
58 mInit{true}, mGeneration{generation}, mSlot{-1},
59 mBuf{pBuf}, mUsage{::android::AHardwareBuffer_convertToGrallocUsageBits(desc->usage)},
60 mFence{Fence::NO_FENCE} {
61}
62
63GraphicsTracker::BufferItem::~BufferItem() {
64 if (mInit) {
65 AHardwareBuffer_release(mBuf);
66 }
67}
68
69sp<GraphicBuffer> GraphicsTracker::BufferItem::updateBuffer(
70 uint64_t newUsage, uint32_t newGeneration) {
71 if (!mInit) {
72 return nullptr;
73 }
74 newUsage |= mUsage;
75 uint64_t ahbUsage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(newUsage);
76 AHardwareBuffer_Desc desc;
77 AHardwareBuffer_describe(mBuf, &desc);
78 // TODO: we need well-established buffer migration features from graphics.
79 // (b/273776738)
80 desc.usage = ahbUsage;
81 const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mBuf);
82 if (!handle) {
83 return nullptr;
84 }
85
86 AHardwareBuffer *newBuf;
87 int err = AHardwareBuffer_createFromHandle(&desc, handle,
88 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
89 &newBuf);
90 if (err != ::android::NO_ERROR) {
91 return nullptr;
92 }
93
94 GraphicBuffer *gb = ::android::AHardwareBuffer_to_GraphicBuffer(newBuf);
95 if (!gb) {
96 AHardwareBuffer_release(newBuf);
97 return nullptr;
98 }
99
100 gb->setGenerationNumber(newGeneration);
101 mUsage = newUsage;
102 mGeneration = newGeneration;
103 AHardwareBuffer_release(mBuf);
104 // acquire is already done when creating.
105 mBuf = newBuf;
106 return gb;
107}
108
109void GraphicsTracker::BufferCache::waitOnSlot(int slot) {
110 // TODO: log
111 CHECK(0 <= slot && slot < kNumSlots);
112 BlockedSlot *p = &mBlockedSlots[slot];
113 std::unique_lock<std::mutex> l(p->l);
114 while (p->blocked) {
115 p->cv.wait(l);
116 }
117}
118
119void GraphicsTracker::BufferCache::blockSlot(int slot) {
120 CHECK(0 <= slot && slot < kNumSlots);
121 BlockedSlot *p = &mBlockedSlots[slot];
122 std::unique_lock<std::mutex> l(p->l);
123 p->blocked = true;
124}
125
126void GraphicsTracker::BufferCache::unblockSlot(int slot) {
127 CHECK(0 <= slot && slot < kNumSlots);
128 BlockedSlot *p = &mBlockedSlots[slot];
129 std::unique_lock<std::mutex> l(p->l);
130 p->blocked = false;
131 l.unlock();
132 p->cv.notify_one();
133}
134
135GraphicsTracker::GraphicsTracker(int maxDequeueCount)
136 : mMaxDequeue{maxDequeueCount}, mMaxDequeueRequested{maxDequeueCount},
137 mMaxDequeueCommitted{maxDequeueCount},
138 mMaxDequeueRequestedSeqId{0UL}, mMaxDequeueCommittedSeqId{0ULL},
139 mDequeueable{maxDequeueCount},
140 mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
141 mInConfig{false}, mStopped{false} {
142 if (maxDequeueCount <= 0) {
143 mMaxDequeue = kDefaultMaxDequeue;
144 mMaxDequeueRequested = kDefaultMaxDequeue;
145 mMaxDequeueCommitted = kDefaultMaxDequeue;
146 mDequeueable = kDefaultMaxDequeue;
147 }
148 int allocEventFd = ::eventfd(mDequeueable, EFD_CLOEXEC | EFD_NONBLOCK | EFD_SEMAPHORE);
149 int statusEventFd = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
150
151 mAllocEventFd.reset(allocEventFd);
152 mStopEventFd.reset(statusEventFd);
153
154 mEventQueueThread = std::thread([this](){processEvent();});
155
156 CHECK(allocEventFd >= 0 && statusEventFd >= 0);
157 CHECK(mEventQueueThread.joinable());
158}
159
160GraphicsTracker::~GraphicsTracker() {
161 stop();
162 if (mEventQueueThread.joinable()) {
163 std::unique_lock<std::mutex> l(mEventLock);
164 mStopEventThread = true;
165 l.unlock();
166 mEventCv.notify_one();
167 mEventQueueThread.join();
168 }
169}
170
171bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
172 // TODO: can't we adjust during config? not committing it may safe?
173 *updateDequeue = false;
174 if (!mInConfig && mMaxDequeueRequested < mMaxDequeue) {
175 int delta = mMaxDequeue - mMaxDequeueRequested;
176 // Since we are supposed to increase mDequeuable by one already
177 int adjustable = mDequeueable + 1;
178 if (adjustable >= delta) {
179 mMaxDequeue = mMaxDequeueRequested;
180 mDequeueable -= (delta - 1);
181 } else {
182 mMaxDequeue -= adjustable;
183 mDequeueable = 0;
184 }
185 if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
186 *updateDequeue = true;
187 }
188 return true;
189 }
190 return false;
191}
192
193c2_status_t GraphicsTracker::configureGraphics(
194 const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
195 std::shared_ptr<BufferCache> prevCache;
196 int prevDequeueCommitted;
197
198 std::unique_lock<std::mutex> cl(mConfigLock);
199 {
200 std::unique_lock<std::mutex> l(mLock);
201 mInConfig = true;
202 prevCache = mBufferCache;
203 prevDequeueCommitted = mMaxDequeueCommitted;
204 }
205 // NOTE: Switching to the same surface is blocked from MediaCodec.
206 // Switching to the same surface might not work if tried, since disconnect()
207 // to the old surface in MediaCodec and allocate from the new surface from
208 // GraphicsTracker cannot be synchronized properly.
209 uint64_t bqId{0ULL};
210 ::android::status_t ret = ::android::OK;
211 if (igbp) {
212 ret = igbp->getUniqueId(&bqId);
213 }
214 if (ret != ::android::OK || prevCache->mGeneration == generation || prevCache->mBqId == bqId) {
215 return C2_BAD_VALUE;
216 }
217 ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
218 if (ret != ::android::OK) {
219 // TODO: sort out the error from igbp and return an error accordingly.
220 return C2_CORRUPTED;
221 }
222 std::shared_ptr<BufferCache> newCache = std::make_shared<BufferCache>(bqId, generation, igbp);
223 {
224 std::unique_lock<std::mutex> l(mLock);
225 mInConfig = false;
226 mBufferCache = newCache;
227 }
228 return C2_OK;
229}
230
231c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
232 std::shared_ptr<BufferCache> cache;
233
234 // max dequeue count which can be committed to IGBP.
235 // (Sometimes maxDequeueCount cannot be committed if the number of
236 // dequeued buffer count is bigger.)
237 int maxDequeueToCommit;
238 // max dequeue count which is committed to IGBP currently
239 // (actually mMaxDequeueCommitted, but needs to be read outside lock.)
240 int curMaxDequeueCommitted;
241 std::unique_lock<std::mutex> cl(mConfigLock);
242 {
243 std::unique_lock<std::mutex> l(mLock);
244 if (mMaxDequeueRequested == maxDequeueCount) {
245 return C2_OK;
246 }
247 mInConfig = true;
248 mMaxDequeueRequested = maxDequeueCount;
249 cache = mBufferCache;
250 curMaxDequeueCommitted = mMaxDequeueCommitted;
251 if (mMaxDequeue <= maxDequeueCount) {
252 maxDequeueToCommit = maxDequeueCount;
253 } else {
254 // Since mDequeuable is decreasing,
255 // a delievered ready to allocate event may not be fulfilled.
256 // Another waiting via a waitable object may be necessary in the case.
257 int delta = mMaxDequeue - maxDequeueCount;
258 if (delta <= mDequeueable) {
259 maxDequeueToCommit = maxDequeueCount;
260 mDequeueable -= delta;
261 } else {
262 maxDequeueToCommit = mMaxDequeue - mDequeueable;
263 mDequeueable = 0;
264 }
265 }
266 }
267
268 bool committed = true;
269 if (cache->mIgbp && maxDequeueToCommit != curMaxDequeueCommitted) {
270 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
271 committed = (ret == ::android::OK);
272 if (!committed) {
273 // This should not happen.
274 ALOGE("dequeueCount failed with error(%d)", (int)ret);
275 }
276 }
277
278 {
279 std::unique_lock<std::mutex> l(mLock);
280 mInConfig = false;
281 if (committed) {
282 mMaxDequeueCommitted = maxDequeueToCommit;
283 int delta = mMaxDequeueCommitted - mMaxDequeue;
284 if (delta > 0) {
285 mDequeueable += delta;
286 l.unlock();
287 writeIncDequeueable(delta);
288 }
289 }
290 }
291
292 if (!committed) {
293 return C2_CORRUPTED;
294 }
295 return C2_OK;
296}
297
298void GraphicsTracker::updateDequeueConf() {
299 std::shared_ptr<BufferCache> cache;
300 int dequeueCommit;
301 std::unique_lock<std::mutex> cl(mConfigLock);
302 {
303 std::unique_lock<std::mutex> l(mLock);
304 if (mMaxDequeue == mMaxDequeueRequested && mMaxDequeueCommitted != mMaxDequeueRequested) {
305 dequeueCommit = mMaxDequeue;
306 mInConfig = true;
307 cache = mBufferCache;
308 } else {
309 return;
310 }
311 }
312 bool committed = true;
313 if (cache->mIgbp) {
314 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
315 committed = (ret == ::android::OK);
316 if (!committed) {
317 // This should not happen.
318 ALOGE("dequeueCount failed with error(%d)", (int)ret);
319 }
320 }
321 int cleared = 0;
322 {
323 // cache == mCache here, since we locked config.
324 std::unique_lock<std::mutex> l(mLock);
325 mInConfig = false;
326 if (committed) {
327 if (cache->mIgbp && dequeueCommit < mMaxDequeueCommitted) {
328 // we are shrinking # of buffers, so clearing the cache.
329 for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
330 uint64_t bid = it->second->mId;
331 if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
332 ++cleared;
333 it = cache->mBuffers.erase(it);
334 } else {
335 ++it;
336 }
337 }
338 }
339 mMaxDequeueCommitted = dequeueCommit;
340 }
341 }
342 if (cleared > 0) {
343 ALOGD("%d buffers are cleared from cache, due to IGBP capacity change", cleared);
344 }
345
346}
347
348void GraphicsTracker::stop() {
349 bool expected = false;
350 bool updated = mStopped.compare_exchange_strong(expected, true);
351 if (updated) {
352 uint64_t val = 1ULL;
353 int ret = ::write(mStopEventFd.get(), &val, 8);
354 if (ret < 0) {
355 // EINTR maybe
356 std::unique_lock<std::mutex> l(mEventLock);
357 mStopRequest = true;
358 l.unlock();
359 mEventCv.notify_one();
360 ALOGW("stop() status update pending");
361 }
362 }
363}
364
365void GraphicsTracker::writeIncDequeueable(int inc) {
366 uint64_t val = inc;
367 int ret = ::write(mAllocEventFd.get(), &val, 8);
368 if (ret < 0) {
369 // EINTR due to signal handling maybe, this should be rare
370 std::unique_lock<std::mutex> l(mEventLock);
371 mIncDequeueable += inc;
372 l.unlock();
373 mEventCv.notify_one();
374 ALOGW("updating dequeueable to eventfd pending");
375 }
376}
377
378void GraphicsTracker::processEvent() {
379 // This is for write() failure of eventfds.
380 // write() failure other than EINTR should not happen.
381 int64_t acc = 0;
382 bool stopRequest = false;
383 bool stopCommitted = false;
384
385 while (true) {
386 {
387 std::unique_lock<std::mutex> l(mEventLock);
388 acc += mIncDequeueable;
389 mIncDequeueable = 0;
390 stopRequest |= mStopRequest;
391 mStopRequest = false;
392 if (acc == 0 && stopRequest == stopCommitted) {
393 if (mStopEventThread) {
394 break;
395 }
396 mEventCv.wait(l);
397 continue;
398 }
399 }
400
401 if (acc > 0) {
402 int ret = ::write(mAllocEventFd.get(), &acc, 8);
403 if (ret > 0) {
404 acc = 0;
405 }
406 }
407 if (stopRequest && !stopCommitted) {
408 uint64_t val = 1ULL;
409 int ret = ::write(mStopEventFd.get(), &val, 8);
410 if (ret > 0) {
411 stopCommitted = true;
412 }
413 }
414 if (mStopEventThread) {
415 break;
416 }
417 }
418}
419
420c2_status_t GraphicsTracker::getWaitableFds(int *allocFd, int *statusFd) {
421 *allocFd = ::dup(mAllocEventFd.get());
422 *statusFd = ::dup(mStopEventFd.get());
423
424 if (*allocFd < 0 || *statusFd < 0) {
425 if (*allocFd >= 0) {
426 ::close(*allocFd);
427 *allocFd = -1;
428 }
429 if (*statusFd >= 0) {
430 ::close(*statusFd);
431 *statusFd = -1;
432 }
433 return C2_NO_MEMORY;
434 }
435 return C2_OK;
436}
437
438c2_status_t GraphicsTracker::requestAllocate(std::shared_ptr<BufferCache> *cache) {
439 std::lock_guard<std::mutex> l(mLock);
440 if (mDequeueable > 0) {
441 uint64_t val;
442 int ret = ::read(mAllocEventFd.get(), &val, 8);
443 if (ret < 0) {
444 if (errno == EINTR) {
445 // Do we really need to care for cancel due to signal handling?
446 return C2_CANCELED;
447 }
448 if (errno == EAGAIN) {
449 // proper usage of waitable object should not return this.
450 // but there could be alloc requests from HAL ignoring the internal status.
451 return C2_BLOCKING;
452 }
453 CHECK(errno != 0);
454 }
455 mDequeueable--;
456 *cache = mBufferCache;
457 return C2_OK;
458 }
459 return C2_BLOCKING;
460}
461
462// If {@code cached} is {@code true}, {@code pBuffer} should be read from the
463// current cached status. Otherwise, {@code pBuffer} should be written to
464// current caches status.
465void GraphicsTracker::commitAllocate(c2_status_t res, const std::shared_ptr<BufferCache> &cache,
466 bool cached, int slot, const sp<Fence> &fence,
467 std::shared_ptr<BufferItem> *pBuffer, bool *updateDequeue) {
468 std::unique_lock<std::mutex> l(mLock);
469 if (res == C2_OK) {
470 if (cached) {
471 auto it = cache->mBuffers.find(slot);
472 CHECK(it != cache->mBuffers.end());
473 it->second->mFence = fence;
474 *pBuffer = it->second;
475 } else if (cache.get() == mBufferCache.get() && mBufferCache->mIgbp) {
476 // Cache the buffer if it is allocated from the current IGBP
477 CHECK(slot >= 0);
478 auto ret = mBufferCache->mBuffers.emplace(slot, *pBuffer);
479 if (!ret.second) {
480 ret.first->second = *pBuffer;
481 }
482 }
483 uint64_t bid = (*pBuffer)->mId;
484 auto mapRet = mDequeued.emplace(bid, *pBuffer);
485 CHECK(mapRet.second);
486 } else {
487 if (adjustDequeueConfLocked(updateDequeue)) {
488 return;
489 }
490 mDequeueable++;
491 l.unlock();
492 writeIncDequeueable(1);
493 }
494}
495
496
497// if a buffer is newly allocated, {@code cached} is {@code false},
498// and the buffer is in the {@code buffer}
499// otherwise, {@code cached} is {@code false} and the buffer should be
500// retrieved by commitAllocate();
501c2_status_t GraphicsTracker::_allocate(const std::shared_ptr<BufferCache> &cache,
502 uint32_t width, uint32_t height, PixelFormat format,
503 int64_t usage,
504 bool *cached,
505 int *rSlotId,
506 sp<Fence> *rFence,
507 std::shared_ptr<BufferItem> *buffer) {
508 ::android::sp<IGraphicBufferProducer> igbp = cache->mIgbp;
509 uint32_t generation = cache->mGeneration;
510 if (!igbp) {
511 // allocate directly
512 AHardwareBuffer_Desc desc;
513 desc.width = width;
514 desc.height = height;
515 desc.layers = 1u;
516 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
517 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
518 desc.rfu0 = 0;
519 desc.rfu1 = 0;
520
521 AHardwareBuffer *buf;
522 int ret = AHardwareBuffer_allocate(&desc, &buf);
523 if (ret != ::android::OK) {
524 ALOGE("direct allocation of AHB failed(%d)", ret);
525 return ret == ::android::NO_MEMORY ? C2_NO_MEMORY : C2_CORRUPTED;
526 }
527 *cached = false;
528 *buffer = std::make_shared<BufferItem>(generation, &desc, buf);
529 if (!*buffer) {
530 AHardwareBuffer_release(buf);
531 return C2_NO_MEMORY;
532 }
533 return C2_OK;
534 }
535
536 int slotId;
537 uint64_t outBufferAge;
538 ::android::FrameEventHistoryDelta outTimestamps;
539 sp<Fence> fence;
540
541 ::android::status_t status = igbp->dequeueBuffer(
542 &slotId, &fence, width, height, format, usage, &outBufferAge, &outTimestamps);
543 if (status < ::android::OK) {
544 ALOGE("dequeueBuffer() error %d", (int)status);
545 return C2_CORRUPTED;
546 }
547 cache->waitOnSlot(slotId);
548 bool exists = false;
549 {
550 std::unique_lock<std::mutex> l(mLock);
551 if (cache.get() == mBufferCache.get() &&
552 cache->mBuffers.find(slotId) != cache->mBuffers.end()) {
553 exists = true;
554 }
555 }
556 bool needsRealloc = status & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
557 if (needsRealloc || !exists) {
558 sp<GraphicBuffer> realloced;
559 status = igbp->requestBuffer(slotId, &realloced);
560 if (status != ::android::OK) {
561 igbp->cancelBuffer(slotId, fence);
562 return C2_CORRUPTED;
563 }
564 *buffer = std::make_shared<BufferItem>(generation, slotId, realloced, fence);
565 if (!(*buffer)->mInit) {
566 buffer->reset();
567 igbp->cancelBuffer(slotId, fence);
568 return C2_CORRUPTED;
569 }
570 *cached = false;
571 return C2_OK;
572 }
573 *cached = true;
574 *rSlotId = slotId;
575 *rFence = fence;
576 return C2_OK;
577}
578
579c2_status_t GraphicsTracker::allocate(
580 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
581 AHardwareBuffer **buf, sp<Fence> *rFence) {
582 if (mStopped.load() == true) {
583 return C2_BAD_STATE;
584 }
585 std::shared_ptr<BufferCache> cache;
586 c2_status_t res = requestAllocate(&cache);
587 if (res != C2_OK) {
588 return res;
589 }
590
591 bool cached = false;
592 int slotId;
593 sp<Fence> fence;
594 std::shared_ptr<BufferItem> buffer;
595 bool updateDequeue;
596 res = _allocate(cache, width, height, format, usage, &cached, &slotId, &fence, &buffer);
597 commitAllocate(res, cache, cached, slotId, fence, &buffer, &updateDequeue);
598 if (res == C2_OK) {
599 *buf = buffer->mBuf;
600 *rFence = buffer->mFence;
601 // *buf should be valid even if buffer is dtor-ed.
602 AHardwareBuffer_acquire(*buf);
603 }
604 if (updateDequeue) {
605 updateDequeueConf();
606 }
607 return res;
608}
609
610c2_status_t GraphicsTracker::requestDeallocate(uint64_t bid, const sp<Fence> &fence,
611 bool *completed, bool *updateDequeue,
612 std::shared_ptr<BufferCache> *cache, int *slotId,
613 sp<Fence> *rFence) {
614 std::unique_lock<std::mutex> l(mLock);
615 if (mDeallocating.find(bid) != mDeallocating.end()) {
616 ALOGE("Tries to deallocate a buffer which is already deallocating or rendering");
617 return C2_DUPLICATE;
618 }
619 auto it = mDequeued.find(bid);
620 if (it == mDequeued.end()) {
621 ALOGE("Tried to deallocate non dequeued buffer");
622 return C2_NOT_FOUND;
623 }
624
625 std::shared_ptr<BufferItem> buffer = it->second;
626 if (buffer->mGeneration == mBufferCache->mGeneration && mBufferCache->mIgbp) {
627 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
628 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
629 *cache = mBufferCache;
630 *slotId = buffer->mSlot;
631 *rFence = ( fence == Fence::NO_FENCE) ? buffer->mFence : fence;
632 // mark this deallocating
633 mDeallocating.emplace(bid);
634 mBufferCache->blockSlot(buffer->mSlot);
635 *completed = false;
636 } else { // buffer is not from the current underlying Graphics.
637 mDequeued.erase(bid);
638 *completed = true;
639 if (adjustDequeueConfLocked(updateDequeue)) {
640 return C2_OK;
641 }
642 mDequeueable++;
643 l.unlock();
644 writeIncDequeueable(1);
645 }
646 return C2_OK;
647}
648
649void GraphicsTracker::commitDeallocate(
650 std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid) {
651 std::lock_guard<std::mutex> l(mLock);
652 size_t del1 = mDequeued.erase(bid);
653 size_t del2 = mDeallocating.erase(bid);
654 CHECK(del1 > 0 && del2 > 0);
655 mDequeueable++;
656 if (cache) {
657 cache->unblockSlot(slotId);
658 }
659}
660
661
662c2_status_t GraphicsTracker::deallocate(uint64_t bid, const sp<Fence> &fence) {
663 bool completed;
664 bool updateDequeue;
665 std::shared_ptr<BufferCache> cache;
666 int slotId;
667 sp<Fence> rFence;
668 c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
669 &cache, &slotId, &rFence);
670 if (res != C2_OK) {
671 return res;
672 }
673 if (completed == true) {
674 if (updateDequeue) {
675 updateDequeueConf();
676 }
677 return C2_OK;
678 }
679
680 // ignore return value since IGBP could be already stale.
681 // cache->mIgbp is not null, if completed is false.
682 (void)cache->mIgbp->cancelBuffer(slotId, rFence);
683
684 commitDeallocate(cache, slotId, bid);
685 return C2_OK;
686}
687
688c2_status_t GraphicsTracker::requestRender(uint64_t bid, std::shared_ptr<BufferCache> *cache,
689 std::shared_ptr<BufferItem> *pBuffer,
690 bool *updateDequeue) {
691 std::unique_lock<std::mutex> l(mLock);
692 if (mDeallocating.find(bid) != mDeallocating.end()) {
693 ALOGE("Tries to render a buffer which is already deallocating or rendering");
694 return C2_DUPLICATE;
695 }
696 auto it = mDequeued.find(bid);
697 if (it == mDequeued.end()) {
698 ALOGE("Tried to render non dequeued buffer");
699 return C2_NOT_FOUND;
700 }
701 if (!mBufferCache->mIgbp) {
702 // Render requested without surface.
703 // reclaim the buffer for dequeue.
704 // TODO: is this correct for API wise?
705 mDequeued.erase(it);
706 if (adjustDequeueConfLocked(updateDequeue)) {
707 return C2_BAD_STATE;
708 }
709 mDequeueable++;
710 l.unlock();
711 writeIncDequeueable(1);
712 return C2_BAD_STATE;
713 }
714 std::shared_ptr<BufferItem> buffer = it->second;
715 *cache = mBufferCache;
716 if (buffer->mGeneration == mBufferCache->mGeneration) {
717 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
718 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
719 mBufferCache->blockSlot(buffer->mSlot);
720 }
721 *pBuffer = buffer;
722 mDeallocating.emplace(bid);
723 return C2_OK;
724}
725
726void GraphicsTracker::commitRender(uint64_t origBid,
727 const std::shared_ptr<BufferCache> &cache,
728 const std::shared_ptr<BufferItem> &buffer,
729 bool *updateDequeue) {
730 std::unique_lock<std::mutex> l(mLock);
731 uint64_t bid = buffer->mId;
732
733 if (cache.get() != mBufferCache.get()) {
734 // Surface changed, no need to wait for buffer being released.
735 mDeallocating.erase(bid);
736 mDequeued.erase(bid);
737 if (adjustDequeueConfLocked(updateDequeue)) {
738 return;
739 }
740 mDequeueable++;
741 l.unlock();
742 writeIncDequeueable(1);
743 return;
744 }
745
746 if (origBid != bid) {
747 // migration happened, need to register the buffer to Cache
748 mBufferCache->mBuffers.emplace(buffer->mSlot, buffer);
749 }
750 mDeallocating.erase(bid);
751 mDequeued.erase(bid);
752}
753
754c2_status_t GraphicsTracker::render(const C2ConstGraphicBlock& blk,
755 const IGraphicBufferProducer::QueueBufferInput &input,
756 IGraphicBufferProducer::QueueBufferOutput *output) {
757 uint64_t bid;
758 c2_status_t res = retrieveAHardwareBufferId(blk, &bid);
759 if (res != C2_OK) {
760 ALOGE("retrieving AHB-ID for GraphicBlock failed");
761 return C2_CORRUPTED;
762 }
763 std::shared_ptr<BufferCache> cache;
764 std::shared_ptr<BufferItem> buffer;
765 bool updateDequeue = false;
766 res = requestRender(bid, &cache, &buffer, &updateDequeue);
767 if (res != C2_OK) {
768 if (updateDequeue) {
769 updateDequeueConf();
770 }
771 return res;
772 }
773 ::android::status_t migrateRes = ::android::OK;
774 ::android::status_t renderRes = ::android::OK;
775 if (cache->mGeneration != buffer->mGeneration) {
776 uint64_t newUsage = 0ULL;
777 int slotId = -1;;
778
779 (void) cache->mIgbp->getConsumerUsage(&newUsage);
780 sp<GraphicBuffer> gb = buffer->updateBuffer(newUsage, cache->mGeneration);
781 if (gb) {
782 migrateRes = cache->mIgbp->attachBuffer(&(buffer->mSlot), gb);
783 } else {
784 ALOGW("realloc-ing a new buffer for migration failed");
785 migrateRes = ::android::INVALID_OPERATION;
786 }
787 }
788 if (migrateRes == ::android::OK) {
789 renderRes = cache->mIgbp->queueBuffer(buffer->mSlot, input, output);
790 if (renderRes != ::android::OK) {
791 CHECK(renderRes != ::android::BAD_VALUE);
792 }
793 }
794 if (migrateRes != ::android::OK || renderRes != ::android::OK) {
795 // since it is not renderable, just de-allocate
796 if (migrateRes != ::android::OK) {
797 std::shared_ptr<BufferCache> nullCache;
798 commitDeallocate(nullCache, -1, bid);
799 } else {
800 (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
801 commitDeallocate(cache, buffer->mSlot, bid);
802 }
803 ALOGE("migration error(%d), render error(%d)", (int)migrateRes, (int)renderRes);
804 return C2_REFUSED;
805 }
806
807 updateDequeue = false;
808 commitRender(bid, cache, buffer, &updateDequeue);
809 if (updateDequeue) {
810 updateDequeueConf();
811 }
812 if (output->bufferReplaced) {
813 // in case of buffer drop during render
814 onReleased(cache->mGeneration);
815 }
816 return C2_OK;
817}
818
819void GraphicsTracker::onReleased(uint32_t generation) {
820 bool updateDequeue = false;
821 {
822 std::unique_lock<std::mutex> l(mLock);
823 if (mBufferCache->mGeneration == generation) {
824 if (!adjustDequeueConfLocked(&updateDequeue)) {
825 mDequeueable++;
826 l.unlock();
827 writeIncDequeueable(1);
828 }
829 }
830 }
831 if (updateDequeue) {
832 updateDequeueConf();
833 }
834}
835
836} // namespace aidl::android::hardware::media::c2::implementation