blob: 573ded800dfe79d615b4559e476ceb534280414a [file] [log] [blame]
Sungtak Leef075f712023-07-20 23:37:45 +00001/*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Sungtak Leef6fe5f72023-09-17 23:53:50 +000016#include <fcntl.h>
17#include <unistd.h>
Sungtak Leef075f712023-07-20 23:37:45 +000018
19#include <media/stagefright/foundation/ADebug.h>
20#include <private/android/AHardwareBufferHelpers.h>
21#include <vndk/hardware_buffer.h>
22
Sungtak Lee72dfba62023-09-07 23:26:30 +000023#include <C2BlockInternal.h>
Sungtak Leef075f712023-07-20 23:37:45 +000024#include <codec2/aidl/GraphicsTracker.h>
25
26namespace aidl::android::hardware::media::c2::implementation {
27
28namespace {
29
Sungtak Leef6fe5f72023-09-17 23:53:50 +000030static constexpr int kMaxDequeueMin = 1;
31static constexpr int kMaxDequeueMax = ::android::BufferQueueDefs::NUM_BUFFER_SLOTS - 2;
32
Sungtak Leef075f712023-07-20 23:37:45 +000033c2_status_t retrieveAHardwareBufferId(const C2ConstGraphicBlock &blk, uint64_t *bid) {
Sungtak Lee72dfba62023-09-07 23:26:30 +000034 std::shared_ptr<const _C2BlockPoolData> bpData = _C2BlockFactory::GetGraphicBlockPoolData(blk);
35 if (bpData->getType() != _C2BlockPoolData::TYPE_AHWBUFFER) {
36 return C2_BAD_VALUE;
37 }
38 if (__builtin_available(android __ANDROID_API_T__, *)) {
39 AHardwareBuffer *pBuf;
40 if (!_C2BlockFactory::GetAHardwareBuffer(bpData, &pBuf)) {
41 return C2_CORRUPTED;
42 }
43 int ret = AHardwareBuffer_getId(pBuf, bid);
44 if (ret != ::android::OK) {
45 return C2_CORRUPTED;
46 }
47 return C2_OK;
48 } else {
49 return C2_OMITTED;
50 }
Sungtak Leef075f712023-07-20 23:37:45 +000051}
52
53} // anonymous namespace
54
55GraphicsTracker::BufferItem::BufferItem(
56 uint32_t generation, int slot, const sp<GraphicBuffer>& buf, const sp<Fence>& fence) :
57 mInit{false}, mGeneration{generation}, mSlot{slot} {
58 if (!buf) {
59 return;
60 }
Sungtak Lee72dfba62023-09-07 23:26:30 +000061 if (__builtin_available(android __ANDROID_API_T__, *)) {
62 AHardwareBuffer *pBuf = AHardwareBuffer_from_GraphicBuffer(buf.get());
63 int ret = AHardwareBuffer_getId(pBuf, &mId);
64 if (ret != ::android::OK) {
65 return;
66 }
67 mUsage = buf->getUsage();
68 AHardwareBuffer_acquire(pBuf);
69 mBuf = pBuf;
70 mFence = fence;
71 mInit = true;
Sungtak Leef075f712023-07-20 23:37:45 +000072 }
Sungtak Leef075f712023-07-20 23:37:45 +000073}
74
75GraphicsTracker::BufferItem::BufferItem(
76 uint32_t generation,
77 AHardwareBuffer_Desc *desc, AHardwareBuffer *pBuf) :
78 mInit{true}, mGeneration{generation}, mSlot{-1},
79 mBuf{pBuf}, mUsage{::android::AHardwareBuffer_convertToGrallocUsageBits(desc->usage)},
80 mFence{Fence::NO_FENCE} {
81}
82
83GraphicsTracker::BufferItem::~BufferItem() {
84 if (mInit) {
85 AHardwareBuffer_release(mBuf);
86 }
87}
88
89sp<GraphicBuffer> GraphicsTracker::BufferItem::updateBuffer(
90 uint64_t newUsage, uint32_t newGeneration) {
91 if (!mInit) {
92 return nullptr;
93 }
94 newUsage |= mUsage;
95 uint64_t ahbUsage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(newUsage);
96 AHardwareBuffer_Desc desc;
97 AHardwareBuffer_describe(mBuf, &desc);
98 // TODO: we need well-established buffer migration features from graphics.
99 // (b/273776738)
100 desc.usage = ahbUsage;
101 const native_handle_t *handle = AHardwareBuffer_getNativeHandle(mBuf);
102 if (!handle) {
103 return nullptr;
104 }
105
106 AHardwareBuffer *newBuf;
107 int err = AHardwareBuffer_createFromHandle(&desc, handle,
108 AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
109 &newBuf);
110 if (err != ::android::NO_ERROR) {
111 return nullptr;
112 }
113
114 GraphicBuffer *gb = ::android::AHardwareBuffer_to_GraphicBuffer(newBuf);
115 if (!gb) {
116 AHardwareBuffer_release(newBuf);
117 return nullptr;
118 }
119
120 gb->setGenerationNumber(newGeneration);
121 mUsage = newUsage;
122 mGeneration = newGeneration;
123 AHardwareBuffer_release(mBuf);
124 // acquire is already done when creating.
125 mBuf = newBuf;
126 return gb;
127}
128
129void GraphicsTracker::BufferCache::waitOnSlot(int slot) {
130 // TODO: log
131 CHECK(0 <= slot && slot < kNumSlots);
132 BlockedSlot *p = &mBlockedSlots[slot];
133 std::unique_lock<std::mutex> l(p->l);
134 while (p->blocked) {
135 p->cv.wait(l);
136 }
137}
138
139void GraphicsTracker::BufferCache::blockSlot(int slot) {
140 CHECK(0 <= slot && slot < kNumSlots);
141 BlockedSlot *p = &mBlockedSlots[slot];
142 std::unique_lock<std::mutex> l(p->l);
143 p->blocked = true;
144}
145
146void GraphicsTracker::BufferCache::unblockSlot(int slot) {
147 CHECK(0 <= slot && slot < kNumSlots);
148 BlockedSlot *p = &mBlockedSlots[slot];
149 std::unique_lock<std::mutex> l(p->l);
150 p->blocked = false;
151 l.unlock();
152 p->cv.notify_one();
153}
154
155GraphicsTracker::GraphicsTracker(int maxDequeueCount)
156 : mMaxDequeue{maxDequeueCount}, mMaxDequeueRequested{maxDequeueCount},
157 mMaxDequeueCommitted{maxDequeueCount},
158 mMaxDequeueRequestedSeqId{0UL}, mMaxDequeueCommittedSeqId{0ULL},
159 mDequeueable{maxDequeueCount},
160 mTotalDequeued{0}, mTotalCancelled{0}, mTotalDropped{0}, mTotalReleased{0},
161 mInConfig{false}, mStopped{false} {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000162 if (maxDequeueCount < kMaxDequeueMin) {
163 mMaxDequeue = kMaxDequeueMin;
164 mMaxDequeueRequested = kMaxDequeueMin;
165 mMaxDequeueCommitted = kMaxDequeueMin;
166 mDequeueable = kMaxDequeueMin;
167 } else if(maxDequeueCount > kMaxDequeueMax) {
168 mMaxDequeue = kMaxDequeueMax;
169 mMaxDequeueRequested = kMaxDequeueMax;
170 mMaxDequeueCommitted = kMaxDequeueMax;
171 mDequeueable = kMaxDequeueMax;
Sungtak Leef075f712023-07-20 23:37:45 +0000172 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000173 int pipefd[2] = { -1, -1};
174 int ret = ::pipe2(pipefd, O_CLOEXEC | O_NONBLOCK);
Sungtak Leef075f712023-07-20 23:37:45 +0000175
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000176 mReadPipeFd.reset(pipefd[0]);
177 mWritePipeFd.reset(pipefd[1]);
Sungtak Leef075f712023-07-20 23:37:45 +0000178
179 mEventQueueThread = std::thread([this](){processEvent();});
180
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000181 CHECK(ret >= 0);
Sungtak Leef075f712023-07-20 23:37:45 +0000182 CHECK(mEventQueueThread.joinable());
183}
184
185GraphicsTracker::~GraphicsTracker() {
186 stop();
187 if (mEventQueueThread.joinable()) {
188 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef075f712023-07-20 23:37:45 +0000189 l.unlock();
190 mEventCv.notify_one();
191 mEventQueueThread.join();
192 }
193}
194
195bool GraphicsTracker::adjustDequeueConfLocked(bool *updateDequeue) {
196 // TODO: can't we adjust during config? not committing it may safe?
197 *updateDequeue = false;
198 if (!mInConfig && mMaxDequeueRequested < mMaxDequeue) {
199 int delta = mMaxDequeue - mMaxDequeueRequested;
200 // Since we are supposed to increase mDequeuable by one already
201 int adjustable = mDequeueable + 1;
202 if (adjustable >= delta) {
203 mMaxDequeue = mMaxDequeueRequested;
204 mDequeueable -= (delta - 1);
205 } else {
206 mMaxDequeue -= adjustable;
207 mDequeueable = 0;
208 }
209 if (mMaxDequeueRequested == mMaxDequeue && mMaxDequeueRequested != mMaxDequeueCommitted) {
210 *updateDequeue = true;
211 }
212 return true;
213 }
214 return false;
215}
216
217c2_status_t GraphicsTracker::configureGraphics(
218 const sp<IGraphicBufferProducer>& igbp, uint32_t generation) {
219 std::shared_ptr<BufferCache> prevCache;
220 int prevDequeueCommitted;
221
222 std::unique_lock<std::mutex> cl(mConfigLock);
223 {
224 std::unique_lock<std::mutex> l(mLock);
225 mInConfig = true;
226 prevCache = mBufferCache;
227 prevDequeueCommitted = mMaxDequeueCommitted;
228 }
229 // NOTE: Switching to the same surface is blocked from MediaCodec.
230 // Switching to the same surface might not work if tried, since disconnect()
231 // to the old surface in MediaCodec and allocate from the new surface from
232 // GraphicsTracker cannot be synchronized properly.
233 uint64_t bqId{0ULL};
234 ::android::status_t ret = ::android::OK;
235 if (igbp) {
236 ret = igbp->getUniqueId(&bqId);
237 }
238 if (ret != ::android::OK || prevCache->mGeneration == generation || prevCache->mBqId == bqId) {
239 return C2_BAD_VALUE;
240 }
241 ret = igbp->setMaxDequeuedBufferCount(prevDequeueCommitted);
242 if (ret != ::android::OK) {
243 // TODO: sort out the error from igbp and return an error accordingly.
244 return C2_CORRUPTED;
245 }
246 std::shared_ptr<BufferCache> newCache = std::make_shared<BufferCache>(bqId, generation, igbp);
247 {
248 std::unique_lock<std::mutex> l(mLock);
249 mInConfig = false;
250 mBufferCache = newCache;
251 }
252 return C2_OK;
253}
254
255c2_status_t GraphicsTracker::configureMaxDequeueCount(int maxDequeueCount) {
256 std::shared_ptr<BufferCache> cache;
257
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000258 if (maxDequeueCount < kMaxDequeueMin || maxDequeueCount > kMaxDequeueMax) {
259 ALOGE("max dequeue count %d is not valid", maxDequeueCount);
260 return C2_BAD_VALUE;
261 }
262
Sungtak Leef075f712023-07-20 23:37:45 +0000263 // max dequeue count which can be committed to IGBP.
264 // (Sometimes maxDequeueCount cannot be committed if the number of
265 // dequeued buffer count is bigger.)
266 int maxDequeueToCommit;
267 // max dequeue count which is committed to IGBP currently
268 // (actually mMaxDequeueCommitted, but needs to be read outside lock.)
269 int curMaxDequeueCommitted;
270 std::unique_lock<std::mutex> cl(mConfigLock);
271 {
272 std::unique_lock<std::mutex> l(mLock);
273 if (mMaxDequeueRequested == maxDequeueCount) {
274 return C2_OK;
275 }
276 mInConfig = true;
277 mMaxDequeueRequested = maxDequeueCount;
278 cache = mBufferCache;
279 curMaxDequeueCommitted = mMaxDequeueCommitted;
280 if (mMaxDequeue <= maxDequeueCount) {
281 maxDequeueToCommit = maxDequeueCount;
282 } else {
283 // Since mDequeuable is decreasing,
284 // a delievered ready to allocate event may not be fulfilled.
285 // Another waiting via a waitable object may be necessary in the case.
286 int delta = mMaxDequeue - maxDequeueCount;
287 if (delta <= mDequeueable) {
288 maxDequeueToCommit = maxDequeueCount;
289 mDequeueable -= delta;
290 } else {
291 maxDequeueToCommit = mMaxDequeue - mDequeueable;
292 mDequeueable = 0;
293 }
294 }
295 }
296
297 bool committed = true;
298 if (cache->mIgbp && maxDequeueToCommit != curMaxDequeueCommitted) {
299 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(maxDequeueToCommit);
300 committed = (ret == ::android::OK);
301 if (!committed) {
302 // This should not happen.
303 ALOGE("dequeueCount failed with error(%d)", (int)ret);
304 }
305 }
306
307 {
308 std::unique_lock<std::mutex> l(mLock);
309 mInConfig = false;
310 if (committed) {
311 mMaxDequeueCommitted = maxDequeueToCommit;
312 int delta = mMaxDequeueCommitted - mMaxDequeue;
313 if (delta > 0) {
314 mDequeueable += delta;
315 l.unlock();
316 writeIncDequeueable(delta);
317 }
318 }
319 }
320
321 if (!committed) {
322 return C2_CORRUPTED;
323 }
324 return C2_OK;
325}
326
327void GraphicsTracker::updateDequeueConf() {
328 std::shared_ptr<BufferCache> cache;
329 int dequeueCommit;
330 std::unique_lock<std::mutex> cl(mConfigLock);
331 {
332 std::unique_lock<std::mutex> l(mLock);
333 if (mMaxDequeue == mMaxDequeueRequested && mMaxDequeueCommitted != mMaxDequeueRequested) {
334 dequeueCommit = mMaxDequeue;
335 mInConfig = true;
336 cache = mBufferCache;
337 } else {
338 return;
339 }
340 }
341 bool committed = true;
342 if (cache->mIgbp) {
343 ::android::status_t ret = cache->mIgbp->setMaxDequeuedBufferCount(dequeueCommit);
344 committed = (ret == ::android::OK);
345 if (!committed) {
346 // This should not happen.
347 ALOGE("dequeueCount failed with error(%d)", (int)ret);
348 }
349 }
350 int cleared = 0;
351 {
352 // cache == mCache here, since we locked config.
353 std::unique_lock<std::mutex> l(mLock);
354 mInConfig = false;
355 if (committed) {
356 if (cache->mIgbp && dequeueCommit < mMaxDequeueCommitted) {
357 // we are shrinking # of buffers, so clearing the cache.
358 for (auto it = cache->mBuffers.begin(); it != cache->mBuffers.end();) {
359 uint64_t bid = it->second->mId;
360 if (mDequeued.count(bid) == 0 || mDeallocating.count(bid) > 0) {
361 ++cleared;
362 it = cache->mBuffers.erase(it);
363 } else {
364 ++it;
365 }
366 }
367 }
368 mMaxDequeueCommitted = dequeueCommit;
369 }
370 }
371 if (cleared > 0) {
372 ALOGD("%d buffers are cleared from cache, due to IGBP capacity change", cleared);
373 }
374
375}
376
377void GraphicsTracker::stop() {
378 bool expected = false;
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000379 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef075f712023-07-20 23:37:45 +0000380 bool updated = mStopped.compare_exchange_strong(expected, true);
381 if (updated) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000382 int writeFd = mWritePipeFd.release();
383 ::close(writeFd);
Sungtak Leef075f712023-07-20 23:37:45 +0000384 }
385}
386
387void GraphicsTracker::writeIncDequeueable(int inc) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000388 CHECK(inc > 0 && inc < kMaxDequeueMax);
389 thread_local char buf[kMaxDequeueMax];
390 int diff = 0;
391 {
Sungtak Leef075f712023-07-20 23:37:45 +0000392 std::unique_lock<std::mutex> l(mEventLock);
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000393 if (mStopped) {
394 return;
395 }
396 CHECK(mWritePipeFd.get() >= 0);
397 int ret = ::write(mWritePipeFd.get(), buf, inc);
398 if (ret == inc) {
399 return;
400 }
401 diff = ret < 0 ? inc : inc - ret;
402
403 // Partial write or EINTR. This will not happen in a real scenario.
404 mIncDequeueable += diff;
405 if (mIncDequeueable > 0) {
406 l.unlock();
407 mEventCv.notify_one();
408 ALOGW("updating dequeueable to pipefd pending");
409 }
Sungtak Leef075f712023-07-20 23:37:45 +0000410 }
411}
412
413void GraphicsTracker::processEvent() {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000414 // This is for partial/failed writes to the writing end.
415 // This may not happen in the real scenario.
416 thread_local char buf[kMaxDequeueMax];
Sungtak Leef075f712023-07-20 23:37:45 +0000417 while (true) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000418 std::unique_lock<std::mutex> l(mEventLock);
419 if (mStopped) {
420 break;
421 }
422 if (mIncDequeueable > 0) {
423 int inc = mIncDequeueable > kMaxDequeueMax ? kMaxDequeueMax : mIncDequeueable;
424 int ret = ::write(mWritePipeFd.get(), buf, inc);
425 int written = ret <= 0 ? 0 : ret;
426 mIncDequeueable -= written;
427 if (mIncDequeueable > 0) {
428 l.unlock();
429 if (ret < 0) {
430 ALOGE("write to writing end failed %d", errno);
431 } else {
432 ALOGW("partial write %d(%d)", inc, written);
Sungtak Leef075f712023-07-20 23:37:45 +0000433 }
Sungtak Leef075f712023-07-20 23:37:45 +0000434 continue;
435 }
436 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000437 mEventCv.wait(l);
Sungtak Leef075f712023-07-20 23:37:45 +0000438 }
439}
440
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000441c2_status_t GraphicsTracker::getWaitableFd(int *pipeFd) {
442 *pipeFd = ::dup(mReadPipeFd.get());
443 if (*pipeFd < 0) {
444 if (mReadPipeFd.get() < 0) {
445 return C2_BAD_STATE;
Sungtak Leef075f712023-07-20 23:37:45 +0000446 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000447 // dup error
448 ALOGE("dup() for the reading end failed %d", errno);
Sungtak Leef075f712023-07-20 23:37:45 +0000449 return C2_NO_MEMORY;
450 }
451 return C2_OK;
452}
453
454c2_status_t GraphicsTracker::requestAllocate(std::shared_ptr<BufferCache> *cache) {
455 std::lock_guard<std::mutex> l(mLock);
456 if (mDequeueable > 0) {
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000457 char buf[1];
458 int ret = ::read(mReadPipeFd.get(), buf, 1);
Sungtak Leef075f712023-07-20 23:37:45 +0000459 if (ret < 0) {
460 if (errno == EINTR) {
461 // Do we really need to care for cancel due to signal handling?
462 return C2_CANCELED;
463 }
464 if (errno == EAGAIN) {
465 // proper usage of waitable object should not return this.
466 // but there could be alloc requests from HAL ignoring the internal status.
467 return C2_BLOCKING;
468 }
469 CHECK(errno != 0);
470 }
Sungtak Leef6fe5f72023-09-17 23:53:50 +0000471 if (ret == 0) {
472 // writing end is closed
473 return C2_BAD_STATE;
474 }
Sungtak Leef075f712023-07-20 23:37:45 +0000475 mDequeueable--;
476 *cache = mBufferCache;
477 return C2_OK;
478 }
479 return C2_BLOCKING;
480}
481
482// If {@code cached} is {@code true}, {@code pBuffer} should be read from the
483// current cached status. Otherwise, {@code pBuffer} should be written to
484// current caches status.
485void GraphicsTracker::commitAllocate(c2_status_t res, const std::shared_ptr<BufferCache> &cache,
486 bool cached, int slot, const sp<Fence> &fence,
487 std::shared_ptr<BufferItem> *pBuffer, bool *updateDequeue) {
488 std::unique_lock<std::mutex> l(mLock);
489 if (res == C2_OK) {
490 if (cached) {
491 auto it = cache->mBuffers.find(slot);
492 CHECK(it != cache->mBuffers.end());
493 it->second->mFence = fence;
494 *pBuffer = it->second;
495 } else if (cache.get() == mBufferCache.get() && mBufferCache->mIgbp) {
496 // Cache the buffer if it is allocated from the current IGBP
497 CHECK(slot >= 0);
498 auto ret = mBufferCache->mBuffers.emplace(slot, *pBuffer);
499 if (!ret.second) {
500 ret.first->second = *pBuffer;
501 }
502 }
503 uint64_t bid = (*pBuffer)->mId;
504 auto mapRet = mDequeued.emplace(bid, *pBuffer);
505 CHECK(mapRet.second);
506 } else {
507 if (adjustDequeueConfLocked(updateDequeue)) {
508 return;
509 }
510 mDequeueable++;
511 l.unlock();
512 writeIncDequeueable(1);
513 }
514}
515
516
517// if a buffer is newly allocated, {@code cached} is {@code false},
518// and the buffer is in the {@code buffer}
519// otherwise, {@code cached} is {@code false} and the buffer should be
520// retrieved by commitAllocate();
521c2_status_t GraphicsTracker::_allocate(const std::shared_ptr<BufferCache> &cache,
522 uint32_t width, uint32_t height, PixelFormat format,
523 int64_t usage,
524 bool *cached,
525 int *rSlotId,
526 sp<Fence> *rFence,
527 std::shared_ptr<BufferItem> *buffer) {
528 ::android::sp<IGraphicBufferProducer> igbp = cache->mIgbp;
529 uint32_t generation = cache->mGeneration;
530 if (!igbp) {
531 // allocate directly
532 AHardwareBuffer_Desc desc;
533 desc.width = width;
534 desc.height = height;
535 desc.layers = 1u;
536 desc.format = ::android::AHardwareBuffer_convertFromPixelFormat(format);
537 desc.usage = ::android::AHardwareBuffer_convertFromGrallocUsageBits(usage);
538 desc.rfu0 = 0;
539 desc.rfu1 = 0;
540
541 AHardwareBuffer *buf;
542 int ret = AHardwareBuffer_allocate(&desc, &buf);
543 if (ret != ::android::OK) {
544 ALOGE("direct allocation of AHB failed(%d)", ret);
545 return ret == ::android::NO_MEMORY ? C2_NO_MEMORY : C2_CORRUPTED;
546 }
547 *cached = false;
548 *buffer = std::make_shared<BufferItem>(generation, &desc, buf);
549 if (!*buffer) {
550 AHardwareBuffer_release(buf);
551 return C2_NO_MEMORY;
552 }
553 return C2_OK;
554 }
555
556 int slotId;
557 uint64_t outBufferAge;
558 ::android::FrameEventHistoryDelta outTimestamps;
559 sp<Fence> fence;
560
561 ::android::status_t status = igbp->dequeueBuffer(
562 &slotId, &fence, width, height, format, usage, &outBufferAge, &outTimestamps);
563 if (status < ::android::OK) {
564 ALOGE("dequeueBuffer() error %d", (int)status);
565 return C2_CORRUPTED;
566 }
567 cache->waitOnSlot(slotId);
568 bool exists = false;
569 {
570 std::unique_lock<std::mutex> l(mLock);
571 if (cache.get() == mBufferCache.get() &&
572 cache->mBuffers.find(slotId) != cache->mBuffers.end()) {
573 exists = true;
574 }
575 }
576 bool needsRealloc = status & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
577 if (needsRealloc || !exists) {
578 sp<GraphicBuffer> realloced;
579 status = igbp->requestBuffer(slotId, &realloced);
580 if (status != ::android::OK) {
581 igbp->cancelBuffer(slotId, fence);
582 return C2_CORRUPTED;
583 }
584 *buffer = std::make_shared<BufferItem>(generation, slotId, realloced, fence);
585 if (!(*buffer)->mInit) {
586 buffer->reset();
587 igbp->cancelBuffer(slotId, fence);
588 return C2_CORRUPTED;
589 }
590 *cached = false;
591 return C2_OK;
592 }
593 *cached = true;
594 *rSlotId = slotId;
595 *rFence = fence;
596 return C2_OK;
597}
598
599c2_status_t GraphicsTracker::allocate(
600 uint32_t width, uint32_t height, PixelFormat format, uint64_t usage,
601 AHardwareBuffer **buf, sp<Fence> *rFence) {
602 if (mStopped.load() == true) {
603 return C2_BAD_STATE;
604 }
605 std::shared_ptr<BufferCache> cache;
606 c2_status_t res = requestAllocate(&cache);
607 if (res != C2_OK) {
608 return res;
609 }
610
611 bool cached = false;
612 int slotId;
613 sp<Fence> fence;
614 std::shared_ptr<BufferItem> buffer;
615 bool updateDequeue;
616 res = _allocate(cache, width, height, format, usage, &cached, &slotId, &fence, &buffer);
617 commitAllocate(res, cache, cached, slotId, fence, &buffer, &updateDequeue);
618 if (res == C2_OK) {
619 *buf = buffer->mBuf;
620 *rFence = buffer->mFence;
621 // *buf should be valid even if buffer is dtor-ed.
622 AHardwareBuffer_acquire(*buf);
623 }
624 if (updateDequeue) {
625 updateDequeueConf();
626 }
627 return res;
628}
629
630c2_status_t GraphicsTracker::requestDeallocate(uint64_t bid, const sp<Fence> &fence,
631 bool *completed, bool *updateDequeue,
632 std::shared_ptr<BufferCache> *cache, int *slotId,
633 sp<Fence> *rFence) {
634 std::unique_lock<std::mutex> l(mLock);
635 if (mDeallocating.find(bid) != mDeallocating.end()) {
636 ALOGE("Tries to deallocate a buffer which is already deallocating or rendering");
637 return C2_DUPLICATE;
638 }
639 auto it = mDequeued.find(bid);
640 if (it == mDequeued.end()) {
641 ALOGE("Tried to deallocate non dequeued buffer");
642 return C2_NOT_FOUND;
643 }
644
645 std::shared_ptr<BufferItem> buffer = it->second;
646 if (buffer->mGeneration == mBufferCache->mGeneration && mBufferCache->mIgbp) {
647 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
648 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
649 *cache = mBufferCache;
650 *slotId = buffer->mSlot;
651 *rFence = ( fence == Fence::NO_FENCE) ? buffer->mFence : fence;
652 // mark this deallocating
653 mDeallocating.emplace(bid);
654 mBufferCache->blockSlot(buffer->mSlot);
655 *completed = false;
656 } else { // buffer is not from the current underlying Graphics.
657 mDequeued.erase(bid);
658 *completed = true;
659 if (adjustDequeueConfLocked(updateDequeue)) {
660 return C2_OK;
661 }
662 mDequeueable++;
663 l.unlock();
664 writeIncDequeueable(1);
665 }
666 return C2_OK;
667}
668
669void GraphicsTracker::commitDeallocate(
670 std::shared_ptr<BufferCache> &cache, int slotId, uint64_t bid) {
671 std::lock_guard<std::mutex> l(mLock);
672 size_t del1 = mDequeued.erase(bid);
673 size_t del2 = mDeallocating.erase(bid);
674 CHECK(del1 > 0 && del2 > 0);
675 mDequeueable++;
676 if (cache) {
677 cache->unblockSlot(slotId);
678 }
679}
680
681
682c2_status_t GraphicsTracker::deallocate(uint64_t bid, const sp<Fence> &fence) {
683 bool completed;
684 bool updateDequeue;
685 std::shared_ptr<BufferCache> cache;
686 int slotId;
687 sp<Fence> rFence;
688 c2_status_t res = requestDeallocate(bid, fence, &completed, &updateDequeue,
689 &cache, &slotId, &rFence);
690 if (res != C2_OK) {
691 return res;
692 }
693 if (completed == true) {
694 if (updateDequeue) {
695 updateDequeueConf();
696 }
697 return C2_OK;
698 }
699
700 // ignore return value since IGBP could be already stale.
701 // cache->mIgbp is not null, if completed is false.
702 (void)cache->mIgbp->cancelBuffer(slotId, rFence);
703
704 commitDeallocate(cache, slotId, bid);
705 return C2_OK;
706}
707
708c2_status_t GraphicsTracker::requestRender(uint64_t bid, std::shared_ptr<BufferCache> *cache,
709 std::shared_ptr<BufferItem> *pBuffer,
710 bool *updateDequeue) {
711 std::unique_lock<std::mutex> l(mLock);
712 if (mDeallocating.find(bid) != mDeallocating.end()) {
713 ALOGE("Tries to render a buffer which is already deallocating or rendering");
714 return C2_DUPLICATE;
715 }
716 auto it = mDequeued.find(bid);
717 if (it == mDequeued.end()) {
718 ALOGE("Tried to render non dequeued buffer");
719 return C2_NOT_FOUND;
720 }
721 if (!mBufferCache->mIgbp) {
722 // Render requested without surface.
723 // reclaim the buffer for dequeue.
724 // TODO: is this correct for API wise?
725 mDequeued.erase(it);
726 if (adjustDequeueConfLocked(updateDequeue)) {
727 return C2_BAD_STATE;
728 }
729 mDequeueable++;
730 l.unlock();
731 writeIncDequeueable(1);
732 return C2_BAD_STATE;
733 }
734 std::shared_ptr<BufferItem> buffer = it->second;
735 *cache = mBufferCache;
736 if (buffer->mGeneration == mBufferCache->mGeneration) {
737 auto it = mBufferCache->mBuffers.find(buffer->mSlot);
738 CHECK(it != mBufferCache->mBuffers.end() && it->second.get() == buffer.get());
739 mBufferCache->blockSlot(buffer->mSlot);
740 }
741 *pBuffer = buffer;
742 mDeallocating.emplace(bid);
743 return C2_OK;
744}
745
746void GraphicsTracker::commitRender(uint64_t origBid,
747 const std::shared_ptr<BufferCache> &cache,
748 const std::shared_ptr<BufferItem> &buffer,
749 bool *updateDequeue) {
750 std::unique_lock<std::mutex> l(mLock);
751 uint64_t bid = buffer->mId;
752
753 if (cache.get() != mBufferCache.get()) {
754 // Surface changed, no need to wait for buffer being released.
755 mDeallocating.erase(bid);
756 mDequeued.erase(bid);
757 if (adjustDequeueConfLocked(updateDequeue)) {
758 return;
759 }
760 mDequeueable++;
761 l.unlock();
762 writeIncDequeueable(1);
763 return;
764 }
765
766 if (origBid != bid) {
767 // migration happened, need to register the buffer to Cache
768 mBufferCache->mBuffers.emplace(buffer->mSlot, buffer);
769 }
770 mDeallocating.erase(bid);
771 mDequeued.erase(bid);
772}
773
774c2_status_t GraphicsTracker::render(const C2ConstGraphicBlock& blk,
775 const IGraphicBufferProducer::QueueBufferInput &input,
776 IGraphicBufferProducer::QueueBufferOutput *output) {
777 uint64_t bid;
778 c2_status_t res = retrieveAHardwareBufferId(blk, &bid);
779 if (res != C2_OK) {
780 ALOGE("retrieving AHB-ID for GraphicBlock failed");
781 return C2_CORRUPTED;
782 }
783 std::shared_ptr<BufferCache> cache;
784 std::shared_ptr<BufferItem> buffer;
785 bool updateDequeue = false;
786 res = requestRender(bid, &cache, &buffer, &updateDequeue);
787 if (res != C2_OK) {
788 if (updateDequeue) {
789 updateDequeueConf();
790 }
791 return res;
792 }
793 ::android::status_t migrateRes = ::android::OK;
794 ::android::status_t renderRes = ::android::OK;
795 if (cache->mGeneration != buffer->mGeneration) {
796 uint64_t newUsage = 0ULL;
797 int slotId = -1;;
798
799 (void) cache->mIgbp->getConsumerUsage(&newUsage);
800 sp<GraphicBuffer> gb = buffer->updateBuffer(newUsage, cache->mGeneration);
801 if (gb) {
802 migrateRes = cache->mIgbp->attachBuffer(&(buffer->mSlot), gb);
803 } else {
804 ALOGW("realloc-ing a new buffer for migration failed");
805 migrateRes = ::android::INVALID_OPERATION;
806 }
807 }
808 if (migrateRes == ::android::OK) {
809 renderRes = cache->mIgbp->queueBuffer(buffer->mSlot, input, output);
810 if (renderRes != ::android::OK) {
811 CHECK(renderRes != ::android::BAD_VALUE);
812 }
813 }
814 if (migrateRes != ::android::OK || renderRes != ::android::OK) {
815 // since it is not renderable, just de-allocate
816 if (migrateRes != ::android::OK) {
817 std::shared_ptr<BufferCache> nullCache;
818 commitDeallocate(nullCache, -1, bid);
819 } else {
820 (void) cache->mIgbp->cancelBuffer(buffer->mSlot, input.fence);
821 commitDeallocate(cache, buffer->mSlot, bid);
822 }
823 ALOGE("migration error(%d), render error(%d)", (int)migrateRes, (int)renderRes);
824 return C2_REFUSED;
825 }
826
827 updateDequeue = false;
828 commitRender(bid, cache, buffer, &updateDequeue);
829 if (updateDequeue) {
830 updateDequeueConf();
831 }
832 if (output->bufferReplaced) {
833 // in case of buffer drop during render
834 onReleased(cache->mGeneration);
835 }
836 return C2_OK;
837}
838
839void GraphicsTracker::onReleased(uint32_t generation) {
840 bool updateDequeue = false;
841 {
842 std::unique_lock<std::mutex> l(mLock);
843 if (mBufferCache->mGeneration == generation) {
844 if (!adjustDequeueConfLocked(&updateDequeue)) {
845 mDequeueable++;
846 l.unlock();
847 writeIncDequeueable(1);
848 }
849 }
850 }
851 if (updateDequeue) {
852 updateDequeueConf();
853 }
854}
855
856} // namespace aidl::android::hardware::media::c2::implementation