blob: 2424f7b88449254045d349202bf92c06c0fc0083 [file] [log] [blame]
Sungtak Leef075f712023-07-20 23:37:45 +00001/*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Sungtak Leef6fe5f72023-09-17 23:53:50 +000016#include <fcntl.h>
17#include <unistd.h>
Sungtak Leef075f712023-07-20 23:37:45 +000018
19#include <media/stagefright/foundation/ADebug.h>
20#include <private/android/AHardwareBufferHelpers.h>
21#include <vndk/hardware_buffer.h>
22
23#include <codec2/aidl/GraphicsTracker.h>
24
25namespace aidl::android::hardware::media::c2::implementation {
26
27namespace {
28
Sungtak Leef6fe5f72023-09-17 23:53:50 +000029static constexpr int kMaxDequeueMin = 1;
30static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
31
Sungtak Leef075f712023-07-20 23:37:45 +000032c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
33 // TODO
34 (void)blk;
35 (void)bid;
36 return C2_OK;
37}
38
39} // anonymous namespace
40
41GraphicsTracker::BufferItem::BufferItem(
42 uint32_t generation, int slot, const sp<GraphicBuffer>& buf, const sp<Fence>& fence) :
43 mInit{false}, mGeneration{generation}, mSlot{slot} {
44 if (!buf) {
45 return;
46 }
47 AHardwareBuffer *pBuf = AHardwareBuffer_from_GraphicBuffer(buf.get());
48 int ret = AHardwareBuffer_getId(pBuf, &mId);
49 if (ret != ::android::OK) {
50 return;
51 }
52 mUsage = buf->getUsage();
53 AHardwareBuffer_acquire(pBuf);
54 mBuf = pBuf;
55 mFence = fence;
56 mInit = true;
57}
58
59GraphicsTracker::BufferItem::BufferItem(
60 uint32_t generation,
61 AHardwareBuffer_Desc *desc, AHardwareBuffer *pBuf) :
62 mInit{true}, mGeneration{generation}, mSlot{-1},
63 mBuf{pBuf}, mUsage{::android::AHardwareBuffer_convertToGrallocUsageBits(desc->usage)},
64 mFence{Fence::NO_FENCE} {
65}
66
67GraphicsTracker::BufferItem::~BufferItem() {
68 if (mInit) {
69 AHardwareBuffer_release(mBuf);
70 }
71}
72
73sp<GraphicBuffer> GraphicsTracker::BufferItem::updateBuffer(
74 uint64_t newUsage, uint32_t newGeneration) {
75 if (!mInit) {
76 return nullptr;
77 }
78 newUsage |= mUsage;
79 uint64_t ahbUsage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(newUsage);
80 AHardwareBuffer_Desc desc;
81 AHardwareBuffer_describe(mBuf, &desc);
82 // TODO: we need well-established buffer migration features from graphics.
83 // (b/273776738)
84 desc.usage = ahbUsage;
85 const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mBuf);
86 if (!handle) {
87 return nullptr;
88 }
89
90 AHardwareBuffer *newBuf;
91 int err = AHardwareBuffer_createFromHandle(&desc, handle,
92 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
93 &newBuf);
94 if (err != ::android::NO_ERROR) {
95 return nullptr;
96 }
97
98 GraphicBuffer *gb = ::android::AHardwareBuffer_to_GraphicBuffer(newBuf);
99 if (!gb) {
100 AHardwareBuffer_release(newBuf);
101 return nullptr;
102 }
103
104 gb->setGenerationNumber(newGeneration);
105 mUsage = newUsage;
106 mGeneration = newGeneration;
107 AHardwareBuffer_release(mBuf);
108 // acquire is already done when creating.
109 mBuf = newBuf;
110 return gb;
111}
112
113void GraphicsTracker::BufferCache::waitOnSlot(int slot) {
114 // TODO: log
115 CHECK(0 <= slot && slot < kNumSlots);
116 BlockedSlot *p = &mBlockedSlots[slot];
117 std::unique_lock<std::mutex> l(p->l);
118 while (p->blocked) {
119 p->cv.wait(l);
120 }
121}
122
123void GraphicsTracker::BufferCache::blockSlot(int slot) {
124 CHECK(0 <= slot && slot < kNumSlots);
125 BlockedSlot *p = &mBlockedSlots[slot];
126 std::unique_lock<std::mutex> l(p->l);
127 p->blocked = true;
128}
129
130void GraphicsTracker::BufferCache::unblockSlot(int slot) {
131 CHECK(0 <= slot && slot < kNumSlots);
132 BlockedSlot *p = &mBlockedSlots[slot];
133 std::unique_lock<std::mutex> l(p->l);
134 p->blocked = false;
135 l.unlock();
136 p->cv.notify_one();
137}
138
139GraphicsTracker::GraphicsTracker(int maxDequeueCount)
140 : mMaxDequeue{maxDequeueCount}, mMaxDequeueRequested{maxDequeueCount},
141 mMaxDequeueCommitted{maxDequeueCount},
142 mMaxDequeueRequestedSeqId{0UL}, mMaxDequeueCommittedSeqId{0ULL},
143 mDequeueable{maxDequeueCount},
144 mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
145 mInConfig{false}, mStopped{false} {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000146 if (maxDequeueCount < kMaxDequeueMin) {
147 mMaxDequeue = kMaxDequeueMin;
148 mMaxDequeueRequested = kMaxDequeueMin;
149 mMaxDequeueCommitted = kMaxDequeueMin;
150 mDequeueable = kMaxDequeueMin;
151 } else if(maxDequeueCount > kMaxDequeueMax) {
152 mMaxDequeue = kMaxDequeueMax;
153 mMaxDequeueRequested = kMaxDequeueMax;
154 mMaxDequeueCommitted = kMaxDequeueMax;
155 mDequeueable = kMaxDequeueMax;
Sungtak Leef075f712023-07-20 23:37:45 +0000156 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000157 int pipefd[2] = { -1, -1};
158 int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
Sungtak Leef075f712023-07-20 23:37:45 +0000159
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000160 mReadPipeFd.reset(pipefd[0]);
161 mWritePipeFd.reset(pipefd[1]);
Sungtak Leef075f712023-07-20 23:37:45 +0000162
163 mEventQueueThread = std::thread([this](){processEvent();});
164
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000165 CHECK(ret >= 0);
Sungtak Leef075f712023-07-20 23:37:45 +0000166 CHECK(mEventQueueThread.joinable());
167}
168
169GraphicsTracker::~GraphicsTracker() {
170 stop();
171 if (mEventQueueThread.joinable()) {
172 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef075f712023-07-20 23:37:45 +0000173 l.unlock();
174 mEventCv.notify_one();
175 mEventQueueThread.join();
176 }
177}
178
179bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
180 // TODO: can't we adjust during config? not committing it may safe?
181 *updateDequeue = false;
182 if (!mInConfig && mMaxDequeueRequested < mMaxDequeue) {
183 int delta = mMaxDequeue - mMaxDequeueRequested;
184 // Since we are supposed to increase mDequeuable by one already
185 int adjustable = mDequeueable + 1;
186 if (adjustable >= delta) {
187 mMaxDequeue = mMaxDequeueRequested;
188 mDequeueable -= (delta - 1);
189 } else {
190 mMaxDequeue -= adjustable;
191 mDequeueable = 0;
192 }
193 if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
194 *updateDequeue = true;
195 }
196 return true;
197 }
198 return false;
199}
200
201c2_status_t GraphicsTracker::configureGraphics(
202 const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
203 std::shared_ptr<BufferCache> prevCache;
204 int prevDequeueCommitted;
205
206 std::unique_lock<std::mutex> cl(mConfigLock);
207 {
208 std::unique_lock<std::mutex> l(mLock);
209 mInConfig = true;
210 prevCache = mBufferCache;
211 prevDequeueCommitted = mMaxDequeueCommitted;
212 }
213 // NOTE: Switching to the same surface is blocked from MediaCodec.
214 // Switching to the same surface might not work if tried, since disconnect()
215 // to the old surface in MediaCodec and allocate from the new surface from
216 // GraphicsTracker cannot be synchronized properly.
217 uint64_t bqId{0ULL};
218 ::android::status_t ret = ::android::OK;
219 if (igbp) {
220 ret = igbp->getUniqueId(&bqId);
221 }
222 if (ret != ::android::OK || prevCache->mGeneration == generation || prevCache->mBqId == bqId) {
223 return C2_BAD_VALUE;
224 }
225 ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
226 if (ret != ::android::OK) {
227 // TODO: sort out the error from igbp and return an error accordingly.
228 return C2_CORRUPTED;
229 }
230 std::shared_ptr<BufferCache> newCache = std::make_shared<BufferCache>(bqId, generation, igbp);
231 {
232 std::unique_lock<std::mutex> l(mLock);
233 mInConfig = false;
234 mBufferCache = newCache;
235 }
236 return C2_OK;
237}
238
239c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
240 std::shared_ptr<BufferCache> cache;
241
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000242 if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
243 ALOGE("max dequeue count %d is not valid", maxDequeueCount);
244 return C2_BAD_VALUE;
245 }
246
Sungtak Leef075f712023-07-20 23:37:45 +0000247 // max dequeue count which can be committed to IGBP.
248 // (Sometimes maxDequeueCount cannot be committed if the number of
249 // dequeued buffer count is bigger.)
250 int maxDequeueToCommit;
251 // max dequeue count which is committed to IGBP currently
252 // (actually mMaxDequeueCommitted, but needs to be read outside lock.)
253 int curMaxDequeueCommitted;
254 std::unique_lock<std::mutex> cl(mConfigLock);
255 {
256 std::unique_lock<std::mutex> l(mLock);
257 if (mMaxDequeueRequested == maxDequeueCount) {
258 return C2_OK;
259 }
260 mInConfig = true;
261 mMaxDequeueRequested = maxDequeueCount;
262 cache = mBufferCache;
263 curMaxDequeueCommitted = mMaxDequeueCommitted;
264 if (mMaxDequeue <= maxDequeueCount) {
265 maxDequeueToCommit = maxDequeueCount;
266 } else {
267 // Since mDequeuable is decreasing,
268 // a delievered ready to allocate event may not be fulfilled.
269 // Another waiting via a waitable object may be necessary in the case.
270 int delta = mMaxDequeue - maxDequeueCount;
271 if (delta <= mDequeueable) {
272 maxDequeueToCommit = maxDequeueCount;
273 mDequeueable -= delta;
274 } else {
275 maxDequeueToCommit = mMaxDequeue - mDequeueable;
276 mDequeueable = 0;
277 }
278 }
279 }
280
281 bool committed = true;
282 if (cache->mIgbp && maxDequeueToCommit != curMaxDequeueCommitted) {
283 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
284 committed = (ret == ::android::OK);
285 if (!committed) {
286 // This should not happen.
287 ALOGE("dequeueCount failed with error(%d)", (int)ret);
288 }
289 }
290
291 {
292 std::unique_lock<std::mutex> l(mLock);
293 mInConfig = false;
294 if (committed) {
295 mMaxDequeueCommitted = maxDequeueToCommit;
296 int delta = mMaxDequeueCommitted - mMaxDequeue;
297 if (delta > 0) {
298 mDequeueable += delta;
299 l.unlock();
300 writeIncDequeueable(delta);
301 }
302 }
303 }
304
305 if (!committed) {
306 return C2_CORRUPTED;
307 }
308 return C2_OK;
309}
310
311void GraphicsTracker::updateDequeueConf() {
312 std::shared_ptr<BufferCache> cache;
313 int dequeueCommit;
314 std::unique_lock<std::mutex> cl(mConfigLock);
315 {
316 std::unique_lock<std::mutex> l(mLock);
317 if (mMaxDequeue == mMaxDequeueRequested && mMaxDequeueCommitted != mMaxDequeueRequested) {
318 dequeueCommit = mMaxDequeue;
319 mInConfig = true;
320 cache = mBufferCache;
321 } else {
322 return;
323 }
324 }
325 bool committed = true;
326 if (cache->mIgbp) {
327 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
328 committed = (ret == ::android::OK);
329 if (!committed) {
330 // This should not happen.
331 ALOGE("dequeueCount failed with error(%d)", (int)ret);
332 }
333 }
334 int cleared = 0;
335 {
336 // cache == mCache here, since we locked config.
337 std::unique_lock<std::mutex> l(mLock);
338 mInConfig = false;
339 if (committed) {
340 if (cache->mIgbp && dequeueCommit < mMaxDequeueCommitted) {
341 // we are shrinking # of buffers, so clearing the cache.
342 for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
343 uint64_t bid = it->second->mId;
344 if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
345 ++cleared;
346 it = cache->mBuffers.erase(it);
347 } else {
348 ++it;
349 }
350 }
351 }
352 mMaxDequeueCommitted = dequeueCommit;
353 }
354 }
355 if (cleared > 0) {
356 ALOGD("%d buffers are cleared from cache, due to IGBP capacity change", cleared);
357 }
358
359}
360
361void GraphicsTracker::stop() {
362 bool expected = false;
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000363 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef075f712023-07-20 23:37:45 +0000364 bool updated = mStopped.compare_exchange_strong(expected, true);
365 if (updated) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000366 int writeFd = mWritePipeFd.release();
367 ::close(writeFd);
Sungtak Leef075f712023-07-20 23:37:45 +0000368 }
369}
370
371void GraphicsTracker::writeIncDequeueable(int inc) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000372 CHECK(inc > 0 && inc < kMaxDequeueMax);
373 thread_local char buf[kMaxDequeueMax];
374 int diff = 0;
375 {
Sungtak Leef075f712023-07-20 23:37:45 +0000376 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000377 if (mStopped) {
378 return;
379 }
380 CHECK(mWritePipeFd.get() >= 0);
381 int ret = ::write(mWritePipeFd.get(), buf, inc);
382 if (ret == inc) {
383 return;
384 }
385 diff = ret < 0 ? inc : inc - ret;
386
387 // Partial write or EINTR. This will not happen in a real scenario.
388 mIncDequeueable += diff;
389 if (mIncDequeueable > 0) {
390 l.unlock();
391 mEventCv.notify_one();
392 ALOGW("updating dequeueable to pipefd pending");
393 }
Sungtak Leef075f712023-07-20 23:37:45 +0000394 }
395}
396
397void GraphicsTracker::processEvent() {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000398 // This is for partial/failed writes to the writing end.
399 // This may not happen in the real scenario.
400 thread_local char buf[kMaxDequeueMax];
Sungtak Leef075f712023-07-20 23:37:45 +0000401 while (true) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000402 std::unique_lock<std::mutex> l(mEventLock);
403 if (mStopped) {
404 break;
405 }
406 if (mIncDequeueable > 0) {
407 int inc = mIncDequeueable > kMaxDequeueMax ? kMaxDequeueMax : mIncDequeueable;
408 int ret = ::write(mWritePipeFd.get(), buf, inc);
409 int written = ret <= 0 ? 0 : ret;
410 mIncDequeueable -= written;
411 if (mIncDequeueable > 0) {
412 l.unlock();
413 if (ret < 0) {
414 ALOGE("write to writing end failed %d", errno);
415 } else {
416 ALOGW("partial write %d(%d)", inc, written);
Sungtak Leef075f712023-07-20 23:37:45 +0000417 }
Sungtak Leef075f712023-07-20 23:37:45 +0000418 continue;
419 }
420 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000421 mEventCv.wait(l);
Sungtak Leef075f712023-07-20 23:37:45 +0000422 }
423}
424
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000425c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
426 *pipeFd = ::dup(mReadPipeFd.get());
427 if (*pipeFd < 0) {
428 if (mReadPipeFd.get() < 0) {
429 return C2_BAD_STATE;
Sungtak Leef075f712023-07-20 23:37:45 +0000430 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000431 // dup error
432 ALOGE("dup() for the reading end failed %d", errno);
Sungtak Leef075f712023-07-20 23:37:45 +0000433 return C2_NO_MEMORY;
434 }
435 return C2_OK;
436}
437
438c2_status_t GraphicsTracker::requestAllocate(std::shared_ptr<BufferCache> *cache) {
439 std::lock_guard<std::mutex> l(mLock);
440 if (mDequeueable > 0) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000441 char buf[1];
442 int ret = ::read(mReadPipeFd.get(), buf, 1);
Sungtak Leef075f712023-07-20 23:37:45 +0000443 if (ret < 0) {
444 if (errno == EINTR) {
445 // Do we really need to care for cancel due to signal handling?
446 return C2_CANCELED;
447 }
448 if (errno == EAGAIN) {
449 // proper usage of waitable object should not return this.
450 // but there could be alloc requests from HAL ignoring the internal status.
451 return C2_BLOCKING;
452 }
453 CHECK(errno != 0);
454 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000455 if (ret == 0) {
456 // writing end is closed
457 return C2_BAD_STATE;
458 }
Sungtak Leef075f712023-07-20 23:37:45 +0000459 mDequeueable--;
460 *cache = mBufferCache;
461 return C2_OK;
462 }
463 return C2_BLOCKING;
464}
465
466// If {@code cached} is {@code true}, {@code pBuffer} should be read from the
467// current cached status. Otherwise, {@code pBuffer} should be written to
468// current caches status.
469void GraphicsTracker::commitAllocate(c2_status_t res, const std::shared_ptr<BufferCache> &cache,
470 bool cached, int slot, const sp<Fence> &fence,
471 std::shared_ptr<BufferItem> *pBuffer, bool *updateDequeue) {
472 std::unique_lock<std::mutex> l(mLock);
473 if (res == C2_OK) {
474 if (cached) {
475 auto it = cache->mBuffers.find(slot);
476 CHECK(it != cache->mBuffers.end());
477 it->second->mFence = fence;
478 *pBuffer = it->second;
479 } else if (cache.get() == mBufferCache.get() && mBufferCache->mIgbp) {
480 // Cache the buffer if it is allocated from the current IGBP
481 CHECK(slot >= 0);
482 auto ret = mBufferCache->mBuffers.emplace(slot, *pBuffer);
483 if (!ret.second) {
484 ret.first->second = *pBuffer;
485 }
486 }
487 uint64_t bid = (*pBuffer)->mId;
488 auto mapRet = mDequeued.emplace(bid, *pBuffer);
489 CHECK(mapRet.second);
490 } else {
491 if (adjustDequeueConfLocked(updateDequeue)) {
492 return;
493 }
494 mDequeueable++;
495 l.unlock();
496 writeIncDequeueable(1);
497 }
498}
499
500
501// if a buffer is newly allocated, {@code cached} is {@code false},
502// and the buffer is in the {@code buffer}
503// otherwise, {@code cached} is {@code false} and the buffer should be
504// retrieved by commitAllocate();
505c2_status_t GraphicsTracker::_allocate(const std::shared_ptr<BufferCache> &cache,
506 uint32_t width, uint32_t height, PixelFormat format,
507 int64_t usage,
508 bool *cached,
509 int *rSlotId,
510 sp<Fence> *rFence,
511 std::shared_ptr<BufferItem> *buffer) {
512 ::android::sp<IGraphicBufferProducer> igbp = cache->mIgbp;
513 uint32_t generation = cache->mGeneration;
514 if (!igbp) {
515 // allocate directly
516 AHardwareBuffer_Desc desc;
517 desc.width = width;
518 desc.height = height;
519 desc.layers = 1u;
520 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
521 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
522 desc.rfu0 = 0;
523 desc.rfu1 = 0;
524
525 AHardwareBuffer *buf;
526 int ret = AHardwareBuffer_allocate(&desc, &buf);
527 if (ret != ::android::OK) {
528 ALOGE("direct allocation of AHB failed(%d)", ret);
529 return ret == ::android::NO_MEMORY ? C2_NO_MEMORY : C2_CORRUPTED;
530 }
531 *cached = false;
532 *buffer = std::make_shared<BufferItem>(generation, &desc, buf);
533 if (!*buffer) {
534 AHardwareBuffer_release(buf);
535 return C2_NO_MEMORY;
536 }
537 return C2_OK;
538 }
539
540 int slotId;
541 uint64_t outBufferAge;
542 ::android::FrameEventHistoryDelta outTimestamps;
543 sp<Fence> fence;
544
545 ::android::status_t status = igbp->dequeueBuffer(
546 &slotId, &fence, width, height, format, usage, &outBufferAge, &outTimestamps);
547 if (status < ::android::OK) {
548 ALOGE("dequeueBuffer() error %d", (int)status);
549 return C2_CORRUPTED;
550 }
551 cache->waitOnSlot(slotId);
552 bool exists = false;
553 {
554 std::unique_lock<std::mutex> l(mLock);
555 if (cache.get() == mBufferCache.get() &&
556 cache->mBuffers.find(slotId) != cache->mBuffers.end()) {
557 exists = true;
558 }
559 }
560 bool needsRealloc = status & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
561 if (needsRealloc || !exists) {
562 sp<GraphicBuffer> realloced;
563 status = igbp->requestBuffer(slotId, &realloced);
564 if (status != ::android::OK) {
565 igbp->cancelBuffer(slotId, fence);
566 return C2_CORRUPTED;
567 }
568 *buffer = std::make_shared<BufferItem>(generation, slotId, realloced, fence);
569 if (!(*buffer)->mInit) {
570 buffer->reset();
571 igbp->cancelBuffer(slotId, fence);
572 return C2_CORRUPTED;
573 }
574 *cached = false;
575 return C2_OK;
576 }
577 *cached = true;
578 *rSlotId = slotId;
579 *rFence = fence;
580 return C2_OK;
581}
582
583c2_status_t GraphicsTracker::allocate(
584 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
585 AHardwareBuffer **buf, sp<Fence> *rFence) {
586 if (mStopped.load() == true) {
587 return C2_BAD_STATE;
588 }
589 std::shared_ptr<BufferCache> cache;
590 c2_status_t res = requestAllocate(&cache);
591 if (res != C2_OK) {
592 return res;
593 }
594
595 bool cached = false;
596 int slotId;
597 sp<Fence> fence;
598 std::shared_ptr<BufferItem> buffer;
599 bool updateDequeue;
600 res = _allocate(cache, width, height, format, usage, &cached, &slotId, &fence, &buffer);
601 commitAllocate(res, cache, cached, slotId, fence, &buffer, &updateDequeue);
602 if (res == C2_OK) {
603 *buf = buffer->mBuf;
604 *rFence = buffer->mFence;
605 // *buf should be valid even if buffer is dtor-ed.
606 AHardwareBuffer_acquire(*buf);
607 }
608 if (updateDequeue) {
609 updateDequeueConf();
610 }
611 return res;
612}
613
614c2_status_t GraphicsTracker::requestDeallocate(uint64_t bid, const sp<Fence> &fence,
615 bool *completed, bool *updateDequeue,
616 std::shared_ptr<BufferCache> *cache, int *slotId,
617 sp<Fence> *rFence) {
618 std::unique_lock<std::mutex> l(mLock);
619 if (mDeallocating.find(bid) != mDeallocating.end()) {
620 ALOGE("Tries to deallocate a buffer which is already deallocating or rendering");
621 return C2_DUPLICATE;
622 }
623 auto it = mDequeued.find(bid);
624 if (it == mDequeued.end()) {
625 ALOGE("Tried to deallocate non dequeued buffer");
626 return C2_NOT_FOUND;
627 }
628
629 std::shared_ptr<BufferItem> buffer = it->second;
630 if (buffer->mGeneration == mBufferCache->mGeneration && mBufferCache->mIgbp) {
631 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
632 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
633 *cache = mBufferCache;
634 *slotId = buffer->mSlot;
635 *rFence = ( fence == Fence::NO_FENCE) ? buffer->mFence : fence;
636 // mark this deallocating
637 mDeallocating.emplace(bid);
638 mBufferCache->blockSlot(buffer->mSlot);
639 *completed = false;
640 } else { // buffer is not from the current underlying Graphics.
641 mDequeued.erase(bid);
642 *completed = true;
643 if (adjustDequeueConfLocked(updateDequeue)) {
644 return C2_OK;
645 }
646 mDequeueable++;
647 l.unlock();
648 writeIncDequeueable(1);
649 }
650 return C2_OK;
651}
652
653void GraphicsTracker::commitDeallocate(
654 std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid) {
655 std::lock_guard<std::mutex> l(mLock);
656 size_t del1 = mDequeued.erase(bid);
657 size_t del2 = mDeallocating.erase(bid);
658 CHECK(del1 > 0 && del2 > 0);
659 mDequeueable++;
660 if (cache) {
661 cache->unblockSlot(slotId);
662 }
663}
664
665
666c2_status_t GraphicsTracker::deallocate(uint64_t bid, const sp<Fence> &fence) {
667 bool completed;
668 bool updateDequeue;
669 std::shared_ptr<BufferCache> cache;
670 int slotId;
671 sp<Fence> rFence;
672 c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
673 &cache, &slotId, &rFence);
674 if (res != C2_OK) {
675 return res;
676 }
677 if (completed == true) {
678 if (updateDequeue) {
679 updateDequeueConf();
680 }
681 return C2_OK;
682 }
683
684 // ignore return value since IGBP could be already stale.
685 // cache->mIgbp is not null, if completed is false.
686 (void)cache->mIgbp->cancelBuffer(slotId, rFence);
687
688 commitDeallocate(cache, slotId, bid);
689 return C2_OK;
690}
691
692c2_status_t GraphicsTracker::requestRender(uint64_t bid, std::shared_ptr<BufferCache> *cache,
693 std::shared_ptr<BufferItem> *pBuffer,
694 bool *updateDequeue) {
695 std::unique_lock<std::mutex> l(mLock);
696 if (mDeallocating.find(bid) != mDeallocating.end()) {
697 ALOGE("Tries to render a buffer which is already deallocating or rendering");
698 return C2_DUPLICATE;
699 }
700 auto it = mDequeued.find(bid);
701 if (it == mDequeued.end()) {
702 ALOGE("Tried to render non dequeued buffer");
703 return C2_NOT_FOUND;
704 }
705 if (!mBufferCache->mIgbp) {
706 // Render requested without surface.
707 // reclaim the buffer for dequeue.
708 // TODO: is this correct for API wise?
709 mDequeued.erase(it);
710 if (adjustDequeueConfLocked(updateDequeue)) {
711 return C2_BAD_STATE;
712 }
713 mDequeueable++;
714 l.unlock();
715 writeIncDequeueable(1);
716 return C2_BAD_STATE;
717 }
718 std::shared_ptr<BufferItem> buffer = it->second;
719 *cache = mBufferCache;
720 if (buffer->mGeneration == mBufferCache->mGeneration) {
721 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
722 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
723 mBufferCache->blockSlot(buffer->mSlot);
724 }
725 *pBuffer = buffer;
726 mDeallocating.emplace(bid);
727 return C2_OK;
728}
729
730void GraphicsTracker::commitRender(uint64_t origBid,
731 const std::shared_ptr<BufferCache> &cache,
732 const std::shared_ptr<BufferItem> &buffer,
733 bool *updateDequeue) {
734 std::unique_lock<std::mutex> l(mLock);
735 uint64_t bid = buffer->mId;
736
737 if (cache.get() != mBufferCache.get()) {
738 // Surface changed, no need to wait for buffer being released.
739 mDeallocating.erase(bid);
740 mDequeued.erase(bid);
741 if (adjustDequeueConfLocked(updateDequeue)) {
742 return;
743 }
744 mDequeueable++;
745 l.unlock();
746 writeIncDequeueable(1);
747 return;
748 }
749
750 if (origBid != bid) {
751 // migration happened, need to register the buffer to Cache
752 mBufferCache->mBuffers.emplace(buffer->mSlot, buffer);
753 }
754 mDeallocating.erase(bid);
755 mDequeued.erase(bid);
756}
757
758c2_status_t GraphicsTracker::render(const C2ConstGraphicBlock& blk,
759 const IGraphicBufferProducer::QueueBufferInput &input,
760 IGraphicBufferProducer::QueueBufferOutput *output) {
761 uint64_t bid;
762 c2_status_t res = retrieveAHardwareBufferId(blk, &bid);
763 if (res != C2_OK) {
764 ALOGE("retrieving AHB-ID for GraphicBlock failed");
765 return C2_CORRUPTED;
766 }
767 std::shared_ptr<BufferCache> cache;
768 std::shared_ptr<BufferItem> buffer;
769 bool updateDequeue = false;
770 res = requestRender(bid, &cache, &buffer, &updateDequeue);
771 if (res != C2_OK) {
772 if (updateDequeue) {
773 updateDequeueConf();
774 }
775 return res;
776 }
777 ::android::status_t migrateRes = ::android::OK;
778 ::android::status_t renderRes = ::android::OK;
779 if (cache->mGeneration != buffer->mGeneration) {
780 uint64_t newUsage = 0ULL;
781 int slotId = -1;;
782
783 (void) cache->mIgbp->getConsumerUsage(&newUsage);
784 sp<GraphicBuffer> gb = buffer->updateBuffer(newUsage, cache->mGeneration);
785 if (gb) {
786 migrateRes = cache->mIgbp->attachBuffer(&(buffer->mSlot), gb);
787 } else {
788 ALOGW("realloc-ing a new buffer for migration failed");
789 migrateRes = ::android::INVALID_OPERATION;
790 }
791 }
792 if (migrateRes == ::android::OK) {
793 renderRes = cache->mIgbp->queueBuffer(buffer->mSlot, input, output);
794 if (renderRes != ::android::OK) {
795 CHECK(renderRes != ::android::BAD_VALUE);
796 }
797 }
798 if (migrateRes != ::android::OK || renderRes != ::android::OK) {
799 // since it is not renderable, just de-allocate
800 if (migrateRes != ::android::OK) {
801 std::shared_ptr<BufferCache> nullCache;
802 commitDeallocate(nullCache, -1, bid);
803 } else {
804 (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
805 commitDeallocate(cache, buffer->mSlot, bid);
806 }
807 ALOGE("migration error(%d), render error(%d)", (int)migrateRes, (int)renderRes);
808 return C2_REFUSED;
809 }
810
811 updateDequeue = false;
812 commitRender(bid, cache, buffer, &updateDequeue);
813 if (updateDequeue) {
814 updateDequeueConf();
815 }
816 if (output->bufferReplaced) {
817 // in case of buffer drop during render
818 onReleased(cache->mGeneration);
819 }
820 return C2_OK;
821}
822
823void GraphicsTracker::onReleased(uint32_t generation) {
824 bool updateDequeue = false;
825 {
826 std::unique_lock<std::mutex> l(mLock);
827 if (mBufferCache->mGeneration == generation) {
828 if (!adjustDequeueConfLocked(&updateDequeue)) {
829 mDequeueable++;
830 l.unlock();
831 writeIncDequeueable(1);
832 }
833 }
834 }
835 if (updateDequeue) {
836 updateDequeueConf();
837 }
838}
839
840} // namespace aidl::android::hardware::media::c2::implementation