blob: aeb5406bbd75a81f6072460e1ad1c41baa80dd51 [file] [log] [blame]
Robert Carr78c25dd2019-08-15 14:10:33 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Valerie Haud3b90d22019-11-06 09:37:31 -080017#undef LOG_TAG
18#define LOG_TAG "BLASTBufferQueue"
19
Valerie Haua32c5522019-12-09 10:11:08 -080020#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Vishnu Naire1a42322020-10-02 17:42:04 -070021//#define LOG_NDEBUG 0
Valerie Haua32c5522019-12-09 10:11:08 -080022
liulijuneb489f62022-10-17 22:02:14 +080023#include <cutils/atomic.h>
Robert Carr78c25dd2019-08-15 14:10:33 -070024#include <gui/BLASTBufferQueue.h>
25#include <gui/BufferItemConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080026#include <gui/BufferQueueConsumer.h>
27#include <gui/BufferQueueCore.h>
28#include <gui/BufferQueueProducer.h>
Valerie Hau45e4b3b2019-12-03 10:49:17 -080029#include <gui/GLConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080030#include <gui/IProducerListener.h>
Robert Carr05086b22020-10-13 18:22:51 -070031#include <gui/Surface.h>
chaviw57ae4b22022-02-03 16:51:39 -060032#include <gui/TraceUtils.h>
Vishnu Nair89496122020-12-14 17:14:53 -080033#include <utils/Singleton.h>
Valerie Haua32c5522019-12-09 10:11:08 -080034#include <utils/Trace.h>
35
Ady Abraham0bde6b52021-05-18 13:57:02 -070036#include <private/gui/ComposerService.h>
Huihong Luo02186fb2022-02-23 14:21:54 -080037#include <private/gui/ComposerServiceAIDL.h>
Ady Abraham0bde6b52021-05-18 13:57:02 -070038
Chavi Weingartene0237bb2023-02-06 21:48:32 +000039#include <android-base/thread_annotations.h>
Robert Carr78c25dd2019-08-15 14:10:33 -070040#include <chrono>
41
42using namespace std::chrono_literals;
43
Vishnu Nairdab94092020-09-29 16:09:04 -070044namespace {
chaviw3277faf2021-05-19 16:45:23 -050045inline const char* boolToString(bool b) {
Vishnu Nairdab94092020-09-29 16:09:04 -070046 return b ? "true" : "false";
47}
48} // namespace
49
Robert Carr78c25dd2019-08-15 14:10:33 -070050namespace android {
51
Vishnu Nairdab94092020-09-29 16:09:04 -070052// Macros to include adapter info in log messages
chaviwd7deef72021-10-06 11:53:40 -050053#define BQA_LOGD(x, ...) \
54 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070055#define BQA_LOGV(x, ...) \
56 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairc6f89ee2020-12-11 14:27:32 -080057// enable logs for a single layer
58//#define BQA_LOGV(x, ...) \
59// ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
60// mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070061#define BQA_LOGE(x, ...) \
62 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
63
chaviw57ae4b22022-02-03 16:51:39 -060064#define BBQ_TRACE(x, ...) \
65 ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
66 mNumAcquired, ##__VA_ARGS__)
67
Chavi Weingartene0237bb2023-02-06 21:48:32 +000068#define UNIQUE_LOCK_WITH_ASSERTION(mutex) \
69 std::unique_lock _lock{mutex}; \
70 base::ScopedLockAssertion assumeLocked(mutex);
71
Valerie Hau871d6352020-01-29 08:44:02 -080072void BLASTBufferItemConsumer::onDisconnect() {
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000073 Mutex::Autolock lock(mMutex);
74 mPreviouslyConnected = mCurrentlyConnected;
75 mCurrentlyConnected = false;
76 if (mPreviouslyConnected) {
77 mDisconnectEvents.push(mCurrentFrameNumber);
Valerie Hau871d6352020-01-29 08:44:02 -080078 }
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000079 mFrameEventHistory.onDisconnect();
Valerie Hau871d6352020-01-29 08:44:02 -080080}
81
82void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
83 FrameEventHistoryDelta* outDelta) {
Hongguang Chen621ec582021-02-16 15:42:35 -080084 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -080085 if (newTimestamps) {
86 // BufferQueueProducer only adds a new timestamp on
87 // queueBuffer
88 mCurrentFrameNumber = newTimestamps->frameNumber;
89 mFrameEventHistory.addQueue(*newTimestamps);
90 }
91 if (outDelta) {
92 // frame event histories will be processed
93 // only after the producer connects and requests
94 // deltas for the first time. Forward this intent
95 // to SF-side to turn event processing back on
96 mPreviouslyConnected = mCurrentlyConnected;
97 mCurrentlyConnected = true;
98 mFrameEventHistory.getAndResetDelta(outDelta);
99 }
100}
101
102void BLASTBufferItemConsumer::updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
103 const sp<Fence>& glDoneFence,
104 const sp<Fence>& presentFence,
105 const sp<Fence>& prevReleaseFence,
106 CompositorTiming compositorTiming,
107 nsecs_t latchTime, nsecs_t dequeueReadyTime) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800108 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800109
110 // if the producer is not connected, don't bother updating,
111 // the next producer that connects won't access this frame event
112 if (!mCurrentlyConnected) return;
113 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
114 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
115 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
116
117 mFrameEventHistory.addLatch(frameNumber, latchTime);
118 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
119 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
120 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
121 compositorTiming);
122}
123
124void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
125 bool disconnect = false;
Hongguang Chen621ec582021-02-16 15:42:35 -0800126 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800127 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
128 disconnect = true;
129 mDisconnectEvents.pop();
130 }
131 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
132}
133
Hongguang Chen621ec582021-02-16 15:42:35 -0800134void BLASTBufferItemConsumer::onSidebandStreamChanged() {
Ady Abrahamdbca1352021-12-15 11:58:56 -0800135 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
136 if (bbq != nullptr) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800137 sp<NativeHandle> stream = getSidebandStream();
Ady Abrahamdbca1352021-12-15 11:58:56 -0800138 bbq->setSidebandStream(stream);
Hongguang Chen621ec582021-02-16 15:42:35 -0800139 }
140}
141
Brian Lindahlc794b692023-01-31 15:42:47 -0700142void BLASTBufferItemConsumer::resizeFrameEventHistory(size_t newSize) {
143 Mutex::Autolock lock(mMutex);
144 mFrameEventHistory.resize(newSize);
145}
146
Vishnu Naird2aaab12022-02-10 14:49:09 -0800147BLASTBufferQueue::BLASTBufferQueue(const std::string& name, bool updateDestinationFrame)
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800148 : mSurfaceControl(nullptr),
149 mSize(1, 1),
Vishnu Nairea0de002020-11-17 17:42:37 -0800150 mRequestedSize(mSize),
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800151 mFormat(PIXEL_FORMAT_RGBA_8888),
Tianhao Yao4861b102022-02-03 20:18:35 +0000152 mTransactionReadyCallback(nullptr),
Vishnu Naird2aaab12022-02-10 14:49:09 -0800153 mSyncTransaction(nullptr),
154 mUpdateDestinationFrame(updateDestinationFrame) {
Vishnu Nair89496122020-12-14 17:14:53 -0800155 createBufferQueue(&mProducer, &mConsumer);
Valerie Hau0889c622020-02-19 15:04:47 -0800156 // since the adapter is in the client process, set dequeue timeout
157 // explicitly so that dequeueBuffer will block
158 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
Valerie Hau65b8e872020-02-13 09:45:14 -0800159
Vishnu Nairdebd1cb2021-03-16 10:06:01 -0700160 // safe default, most producers are expected to override this
161 mProducer->setMaxDequeuedBufferCount(2);
Vishnu Nair1618c672021-02-05 13:08:26 -0800162 mBufferItemConsumer = new BLASTBufferItemConsumer(mConsumer,
163 GraphicBuffer::USAGE_HW_COMPOSER |
164 GraphicBuffer::USAGE_HW_TEXTURE,
Ady Abrahamdbca1352021-12-15 11:58:56 -0800165 1, false, this);
liulijuneb489f62022-10-17 22:02:14 +0800166 static std::atomic<uint32_t> nextId = 0;
167 mProducerId = nextId++;
168 mName = name + "#" + std::to_string(mProducerId);
169 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(mProducerId);
170 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(mProducerId);
Vishnu Nairdab94092020-09-29 16:09:04 -0700171 mBufferItemConsumer->setName(String8(consumerName.c_str()));
Robert Carr78c25dd2019-08-15 14:10:33 -0700172 mBufferItemConsumer->setFrameAvailableListener(this);
Robert Carr9f133d72020-04-01 15:51:46 -0700173
Huihong Luo02186fb2022-02-23 14:21:54 -0800174 ComposerServiceAIDL::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
Ady Abraham0bde6b52021-05-18 13:57:02 -0700175 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
chaviw69058fb2021-09-27 09:37:30 -0500176 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
Valerie Haua32c5522019-12-09 10:11:08 -0800177 mNumAcquired = 0;
178 mNumFrameAvailable = 0;
Robert Carr4c1b6462021-12-21 10:30:50 -0800179
180 TransactionCompletedListener::getInstance()->addQueueStallListener(
Patrick Williamsf1e5df12022-10-17 21:37:42 +0000181 [&](const std::string& reason) {
182 std::function<void(const std::string&)> callbackCopy;
183 {
184 std::unique_lock _lock{mMutex};
185 callbackCopy = mTransactionHangCallback;
186 }
187 if (callbackCopy) callbackCopy(reason);
188 },
189 this);
Robert Carr4c1b6462021-12-21 10:30:50 -0800190
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800191 BQA_LOGV("BLASTBufferQueue created");
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800192}
193
194BLASTBufferQueue::BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface,
195 int width, int height, int32_t format)
196 : BLASTBufferQueue(name) {
197 update(surface, width, height, format);
Robert Carr78c25dd2019-08-15 14:10:33 -0700198}
199
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800200BLASTBufferQueue::~BLASTBufferQueue() {
Robert Carr4c1b6462021-12-21 10:30:50 -0800201 TransactionCompletedListener::getInstance()->removeQueueStallListener(this);
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800202 if (mPendingTransactions.empty()) {
203 return;
204 }
205 BQA_LOGE("Applying pending transactions on dtor %d",
206 static_cast<uint32_t>(mPendingTransactions.size()));
207 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800208 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
Robert Carr79dc06a2022-02-22 15:28:59 -0800209 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
210 t.setApplyToken(mApplyToken).apply(false, true);
chaviw3b4bdcf2022-03-17 09:27:03 -0500211
212 if (mTransactionReadyCallback) {
213 mTransactionReadyCallback(mSyncTransaction);
214 }
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800215}
216
chaviw565ee542021-01-14 10:21:23 -0800217void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
Vishnu Naird2aaab12022-02-10 14:49:09 -0800218 int32_t format) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800219 LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
220
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000221 std::lock_guard _lock{mMutex};
chaviw565ee542021-01-14 10:21:23 -0800222 if (mFormat != format) {
223 mFormat = format;
chaviw497e81c2021-02-04 17:09:47 -0800224 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
chaviw565ee542021-01-14 10:21:23 -0800225 }
226
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800227 const bool surfaceControlChanged = !SurfaceControl::isSameSurface(mSurfaceControl, surface);
Vishnu Nairab066512022-01-04 22:28:00 +0000228 if (surfaceControlChanged && mSurfaceControl != nullptr) {
229 BQA_LOGD("Updating SurfaceControl without recreating BBQ");
230 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800231 bool applyTransaction = false;
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800232
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700233 // Always update the native object even though they might have the same layer handle, so we can
234 // get the updated transform hint from WM.
235 mSurfaceControl = surface;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800236 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800237 if (surfaceControlChanged) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800238 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
239 layer_state_t::eEnableBackpressure);
240 applyTransaction = true;
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800241 }
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800242 mTransformHint = mSurfaceControl->getTransformHint();
243 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700244 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
245 mTransformHint);
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800246
Vishnu Nairea0de002020-11-17 17:42:37 -0800247 ui::Size newSize(width, height);
248 if (mRequestedSize != newSize) {
249 mRequestedSize.set(newSize);
250 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000251 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Vishnu Nair53c936c2020-12-03 11:46:37 -0800252 // If the buffer supports scaling, update the frame immediately since the client may
253 // want to scale the existing buffer to the new size.
254 mSize = mRequestedSize;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800255 if (mUpdateDestinationFrame) {
256 t.setDestinationFrame(mSurfaceControl, Rect(newSize));
257 applyTransaction = true;
258 }
Vishnu Nair53c936c2020-12-03 11:46:37 -0800259 }
Robert Carrfc416512020-04-02 12:32:44 -0700260 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800261 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800262 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
263 t.setApplyToken(mApplyToken).apply(false, true);
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800264 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700265}
266
chaviwd7deef72021-10-06 11:53:40 -0500267static std::optional<SurfaceControlStats> findMatchingStat(
268 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
269 for (auto stat : stats) {
270 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
271 return stat;
272 }
273 }
274 return std::nullopt;
275}
276
277static void transactionCommittedCallbackThunk(void* context, nsecs_t latchTime,
278 const sp<Fence>& presentFence,
279 const std::vector<SurfaceControlStats>& stats) {
280 if (context == nullptr) {
281 return;
282 }
283 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
284 bq->transactionCommittedCallback(latchTime, presentFence, stats);
285}
286
287void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
288 const sp<Fence>& /*presentFence*/,
289 const std::vector<SurfaceControlStats>& stats) {
290 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000291 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600292 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500293 BQA_LOGV("transactionCommittedCallback");
294 if (!mSurfaceControlsWithPendingCallback.empty()) {
295 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
296 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
297 if (stat) {
298 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
299
300 // We need to check if we were waiting for a transaction callback in order to
301 // process any pending buffers and unblock. It's possible to get transaction
chaviwc1cf4022022-06-03 13:32:33 -0500302 // callbacks for previous requests so we need to ensure that there are no pending
303 // frame numbers that were in a sync. We remove the frame from mSyncedFrameNumbers
304 // set and then check if it's empty. If there are no more pending syncs, we can
305 // proceed with flushing the shadow queue.
306 // We also want to check if mSyncTransaction is null because it's possible another
chaviwd7deef72021-10-06 11:53:40 -0500307 // sync request came in while waiting, but it hasn't started processing yet. In that
308 // case, we don't actually want to flush the frames in between since they will get
309 // processed and merged with the sync transaction and released earlier than if they
310 // were sent to SF
chaviwc1cf4022022-06-03 13:32:33 -0500311 mSyncedFrameNumbers.erase(currFrameNumber);
312 if (mSyncedFrameNumbers.empty() && mSyncTransaction == nullptr) {
chaviwd7deef72021-10-06 11:53:40 -0500313 flushShadowQueue();
314 }
315 } else {
chaviw768bfa02021-11-01 09:50:57 -0500316 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
chaviwd7deef72021-10-06 11:53:40 -0500317 }
318 } else {
319 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
320 "empty.");
321 }
chaviwd7deef72021-10-06 11:53:40 -0500322 decStrong((void*)transactionCommittedCallbackThunk);
323 }
324}
325
Robert Carr78c25dd2019-08-15 14:10:33 -0700326static void transactionCallbackThunk(void* context, nsecs_t latchTime,
327 const sp<Fence>& presentFence,
328 const std::vector<SurfaceControlStats>& stats) {
329 if (context == nullptr) {
330 return;
331 }
Robert Carrfbcbb4c2020-11-02 14:14:34 -0800332 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
Robert Carr78c25dd2019-08-15 14:10:33 -0700333 bq->transactionCallback(latchTime, presentFence, stats);
334}
335
336void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
337 const std::vector<SurfaceControlStats>& stats) {
chaviw71c2cc42020-10-23 16:42:02 -0700338 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000339 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600340 BBQ_TRACE();
chaviw71c2cc42020-10-23 16:42:02 -0700341 BQA_LOGV("transactionCallback");
chaviw71c2cc42020-10-23 16:42:02 -0700342
chaviw42026162021-04-16 15:46:12 -0500343 if (!mSurfaceControlsWithPendingCallback.empty()) {
344 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
345 mSurfaceControlsWithPendingCallback.pop();
chaviwd7deef72021-10-06 11:53:40 -0500346 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
347 if (statsOptional) {
348 SurfaceControlStats stat = *statsOptional;
Vishnu Nair71fcf912022-10-18 09:14:20 -0700349 if (stat.transformHint) {
350 mTransformHint = *stat.transformHint;
351 mBufferItemConsumer->setTransformHint(mTransformHint);
352 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
353 }
Vishnu Nairde66dc72021-06-17 17:54:41 -0700354 // Update frametime stamps if the frame was latched and presented, indicated by a
355 // valid latch time.
356 if (stat.latchTime > 0) {
357 mBufferItemConsumer
358 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
359 stat.frameEventStats.refreshStartTime,
360 stat.frameEventStats.gpuCompositionDoneFence,
361 stat.presentFence, stat.previousReleaseFence,
362 stat.frameEventStats.compositorTiming,
363 stat.latchTime,
364 stat.frameEventStats.dequeueReadyTime);
365 }
Robert Carr405e2f62021-12-31 16:59:34 -0800366 auto currFrameNumber = stat.frameEventStats.frameNumber;
367 std::vector<ReleaseCallbackId> staleReleases;
368 for (const auto& [key, value]: mSubmitted) {
369 if (currFrameNumber > key.framenumber) {
370 staleReleases.push_back(key);
371 }
372 }
373 for (const auto& staleRelease : staleReleases) {
Robert Carr405e2f62021-12-31 16:59:34 -0800374 releaseBufferCallbackLocked(staleRelease,
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700375 stat.previousReleaseFence
376 ? stat.previousReleaseFence
377 : Fence::NO_FENCE,
378 stat.currentMaxAcquiredBufferCount,
379 true /* fakeRelease */);
Robert Carr405e2f62021-12-31 16:59:34 -0800380 }
chaviwd7deef72021-10-06 11:53:40 -0500381 } else {
chaviw768bfa02021-11-01 09:50:57 -0500382 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
chaviw42026162021-04-16 15:46:12 -0500383 }
384 } else {
385 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
386 "empty.");
Valerie Haua32c5522019-12-09 10:11:08 -0800387 }
chaviw71c2cc42020-10-23 16:42:02 -0700388
chaviw71c2cc42020-10-23 16:42:02 -0700389 decStrong((void*)transactionCallbackThunk);
Robert Carr78c25dd2019-08-15 14:10:33 -0700390 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700391}
392
Vishnu Nair1506b182021-02-22 14:35:15 -0800393// Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
394// BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
395// So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
396// Otherwise, this is a no-op.
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700397static void releaseBufferCallbackThunk(wp<BLASTBufferQueue> context, const ReleaseCallbackId& id,
chaviw69058fb2021-09-27 09:37:30 -0500398 const sp<Fence>& releaseFence,
399 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800400 sp<BLASTBufferQueue> blastBufferQueue = context.promote();
Vishnu Nair1506b182021-02-22 14:35:15 -0800401 if (blastBufferQueue) {
chaviw69058fb2021-09-27 09:37:30 -0500402 blastBufferQueue->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700403 } else {
404 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800405 }
406}
407
chaviwd7deef72021-10-06 11:53:40 -0500408void BLASTBufferQueue::flushShadowQueue() {
409 BQA_LOGV("flushShadowQueue");
410 int numFramesToFlush = mNumFrameAvailable;
411 while (numFramesToFlush > 0) {
412 acquireNextBufferLocked(std::nullopt);
413 numFramesToFlush--;
414 }
415}
416
chaviw69058fb2021-09-27 09:37:30 -0500417void BLASTBufferQueue::releaseBufferCallback(
418 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
419 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000420 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600421 BBQ_TRACE();
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700422 releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
423 false /* fakeRelease */);
Robert Carr405e2f62021-12-31 16:59:34 -0800424}
425
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700426void BLASTBufferQueue::releaseBufferCallbackLocked(
427 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
428 std::optional<uint32_t> currentMaxAcquiredBufferCount, bool fakeRelease) {
Robert Carr405e2f62021-12-31 16:59:34 -0800429 ATRACE_CALL();
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700430 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800431
Ady Abraham899dcdb2021-06-15 16:56:21 -0700432 // Calculate how many buffers we need to hold before we release them back
433 // to the buffer queue. This will prevent higher latency when we are running
434 // on a lower refresh rate than the max supported. We only do that for EGL
435 // clients as others don't care about latency
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000436 const auto it = mSubmitted.find(id);
437 const bool isEGL = it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
Ady Abraham899dcdb2021-06-15 16:56:21 -0700438
chaviw69058fb2021-09-27 09:37:30 -0500439 if (currentMaxAcquiredBufferCount) {
440 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
441 }
442
liulijunf90df632022-11-14 14:24:48 +0800443 const uint32_t numPendingBuffersToHold =
444 isEGL ? std::max(0, mMaxAcquiredBuffers - (int32_t)mCurrentMaxAcquiredBufferCount) : 0;
Robert Carr405e2f62021-12-31 16:59:34 -0800445
446 auto rb = ReleasedBuffer{id, releaseFence};
447 if (std::find(mPendingRelease.begin(), mPendingRelease.end(), rb) == mPendingRelease.end()) {
448 mPendingRelease.emplace_back(rb);
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700449 if (fakeRelease) {
450 BQA_LOGE("Faking releaseBufferCallback from transactionCompleteCallback %" PRIu64,
451 id.framenumber);
452 BBQ_TRACE("FakeReleaseCallback");
453 }
Robert Carr405e2f62021-12-31 16:59:34 -0800454 }
Ady Abraham899dcdb2021-06-15 16:56:21 -0700455
456 // Release all buffers that are beyond the ones that we need to hold
457 while (mPendingRelease.size() > numPendingBuffersToHold) {
chaviw0acd33a2021-11-02 11:55:37 -0500458 const auto releasedBuffer = mPendingRelease.front();
Ady Abraham899dcdb2021-06-15 16:56:21 -0700459 mPendingRelease.pop_front();
chaviw0acd33a2021-11-02 11:55:37 -0500460 releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
chaviwc1cf4022022-06-03 13:32:33 -0500461 // Don't process the transactions here if mSyncedFrameNumbers is not empty. That means
462 // are still transactions that have sync buffers in them that have not been applied or
463 // dropped. Instead, let onFrameAvailable handle processing them since it will merge with
464 // the syncTransaction.
465 if (mSyncedFrameNumbers.empty()) {
chaviwd7deef72021-10-06 11:53:40 -0500466 acquireNextBufferLocked(std::nullopt);
467 }
Vishnu Nair1506b182021-02-22 14:35:15 -0800468 }
469
Ady Abraham899dcdb2021-06-15 16:56:21 -0700470 ATRACE_INT("PendingRelease", mPendingRelease.size());
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700471 ATRACE_INT(mQueuedBufferTrace.c_str(),
472 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800473 mCallbackCV.notify_all();
474}
475
chaviw0acd33a2021-11-02 11:55:37 -0500476void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
477 const sp<Fence>& releaseFence) {
478 auto it = mSubmitted.find(callbackId);
479 if (it == mSubmitted.end()) {
480 BQA_LOGE("ERROR: releaseBufferCallback without corresponding submitted buffer %s",
481 callbackId.to_string().c_str());
482 return;
483 }
484 mNumAcquired--;
chaviw57ae4b22022-02-03 16:51:39 -0600485 BBQ_TRACE("frame=%" PRIu64, callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500486 BQA_LOGV("released %s", callbackId.to_string().c_str());
487 mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
488 mSubmitted.erase(it);
chaviwc1cf4022022-06-03 13:32:33 -0500489 // Remove the frame number from mSyncedFrameNumbers since we can get a release callback
490 // without getting a transaction committed if the buffer was dropped.
491 mSyncedFrameNumbers.erase(callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500492}
493
Chavi Weingarten70670e62023-02-22 17:36:40 +0000494static ui::Size getBufferSize(const BufferItem& item) {
495 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
496 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
497
498 // Take the buffer's orientation into account
499 if (item.mTransform & ui::Transform::ROT_90) {
500 std::swap(bufWidth, bufHeight);
501 }
502 return ui::Size(bufWidth, bufHeight);
503}
504
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000505status_t BLASTBufferQueue::acquireNextBufferLocked(
chaviwd7deef72021-10-06 11:53:40 -0500506 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800507 // Check if we have frames available and we have not acquired the maximum number of buffers.
508 // Even with this check, the consumer can fail to acquire an additional buffer if the consumer
509 // has already acquired (mMaxAcquiredBuffers + 1) and the new buffer is not droppable. In this
510 // case mBufferItemConsumer->acquireBuffer will return with NO_BUFFER_AVAILABLE.
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000511 if (mNumFrameAvailable == 0) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800512 BQA_LOGV("Can't acquire next buffer. No available frames");
513 return BufferQueue::NO_BUFFER_AVAILABLE;
514 }
515
516 if (mNumAcquired >= (mMaxAcquiredBuffers + 2)) {
517 BQA_LOGV("Can't acquire next buffer. Already acquired max frames %d max:%d + 2",
518 mNumAcquired, mMaxAcquiredBuffers);
519 return BufferQueue::NO_BUFFER_AVAILABLE;
Valerie Haud3b90d22019-11-06 09:37:31 -0800520 }
521
Valerie Haua32c5522019-12-09 10:11:08 -0800522 if (mSurfaceControl == nullptr) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700523 BQA_LOGE("ERROR : surface control is null");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000524 return NAME_NOT_FOUND;
Valerie Haud3b90d22019-11-06 09:37:31 -0800525 }
526
Robert Carr78c25dd2019-08-15 14:10:33 -0700527 SurfaceComposerClient::Transaction localTransaction;
528 bool applyTransaction = true;
529 SurfaceComposerClient::Transaction* t = &localTransaction;
chaviwd7deef72021-10-06 11:53:40 -0500530 if (transaction) {
531 t = *transaction;
Robert Carr78c25dd2019-08-15 14:10:33 -0700532 applyTransaction = false;
533 }
534
Valerie Haua32c5522019-12-09 10:11:08 -0800535 BufferItem bufferItem;
Valerie Haud3b90d22019-11-06 09:37:31 -0800536
Vishnu Nairc6f89ee2020-12-11 14:27:32 -0800537 status_t status =
538 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800539 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
540 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000541 return status;
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800542 } else if (status != OK) {
Vishnu Nairbf255772020-10-16 10:54:41 -0700543 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000544 return status;
Robert Carr78c25dd2019-08-15 14:10:33 -0700545 }
chaviw57ae4b22022-02-03 16:51:39 -0600546
Valerie Haua32c5522019-12-09 10:11:08 -0800547 auto buffer = bufferItem.mGraphicBuffer;
548 mNumFrameAvailable--;
chaviw57ae4b22022-02-03 16:51:39 -0600549 BBQ_TRACE("frame=%" PRIu64, bufferItem.mFrameNumber);
Valerie Haua32c5522019-12-09 10:11:08 -0800550
551 if (buffer == nullptr) {
552 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Vishnu Nairbf255772020-10-16 10:54:41 -0700553 BQA_LOGE("Buffer was empty");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000554 return BAD_VALUE;
Valerie Haua32c5522019-12-09 10:11:08 -0800555 }
556
Vishnu Nair670b3f72020-09-29 17:52:18 -0700557 if (rejectBuffer(bufferItem)) {
Vishnu Naira4fbca52021-07-07 16:52:34 -0700558 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
Vishnu Nairea0de002020-11-17 17:42:37 -0800559 "buffer{size=%dx%d transform=%d}",
560 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
561 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
562 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000563 return acquireNextBufferLocked(transaction);
Vishnu Nair670b3f72020-09-29 17:52:18 -0700564 }
565
Valerie Haua32c5522019-12-09 10:11:08 -0800566 mNumAcquired++;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700567 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
568 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
569 mSubmitted[releaseCallbackId] = bufferItem;
Robert Carr78c25dd2019-08-15 14:10:33 -0700570
Valerie Hau871d6352020-01-29 08:44:02 -0800571 bool needsDisconnect = false;
572 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
573
574 // if producer disconnected before, notify SurfaceFlinger
575 if (needsDisconnect) {
576 t->notifyProducerDisconnect(mSurfaceControl);
577 }
578
Robert Carr78c25dd2019-08-15 14:10:33 -0700579 // Ensure BLASTBufferQueue stays alive until we receive the transaction complete callback.
580 incStrong((void*)transactionCallbackThunk);
581
Chavi Weingarten70670e62023-02-22 17:36:40 +0000582 // Only update mSize for destination bounds if the incoming buffer matches the requested size.
583 // Otherwise, it could cause stretching since the destination bounds will update before the
584 // buffer with the new size is acquired.
585 if (mRequestedSize == getBufferSize(bufferItem)) {
586 mSize = mRequestedSize;
587 }
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700588 Rect crop = computeCrop(bufferItem);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000589 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
590 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700591 bufferItem.mScalingMode, crop);
Vishnu Nair53c936c2020-12-03 11:46:37 -0800592
Vishnu Nair1506b182021-02-22 14:35:15 -0800593 auto releaseBufferCallback =
594 std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
chaviw69058fb2021-09-27 09:37:30 -0500595 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
chaviwba4320c2021-09-15 15:20:53 -0500596 sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
liulijuneb489f62022-10-17 22:02:14 +0800597 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, mProducerId,
598 releaseBufferCallback);
John Reck137069e2020-12-10 22:07:37 -0500599 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
600 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
601 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
Robert Carr78c25dd2019-08-15 14:10:33 -0700602 t->addTransactionCompletedCallback(transactionCallbackThunk, static_cast<void*>(this));
chaviwf2dace72021-11-17 17:36:50 -0600603
chaviw42026162021-04-16 15:46:12 -0500604 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
Robert Carr78c25dd2019-08-15 14:10:33 -0700605
Vishnu Naird2aaab12022-02-10 14:49:09 -0800606 if (mUpdateDestinationFrame) {
607 t->setDestinationFrame(mSurfaceControl, Rect(mSize));
608 } else {
609 const bool ignoreDestinationFrame =
610 bufferItem.mScalingMode == NATIVE_WINDOW_SCALING_MODE_FREEZE;
611 t->setFlags(mSurfaceControl,
612 ignoreDestinationFrame ? layer_state_t::eIgnoreDestinationFrame : 0,
613 layer_state_t::eIgnoreDestinationFrame);
Vishnu Nair084514a2021-07-30 16:07:42 -0700614 }
Vishnu Nair6bdec7d2021-05-10 15:01:13 -0700615 t->setBufferCrop(mSurfaceControl, crop);
Valerie Haua32c5522019-12-09 10:11:08 -0800616 t->setTransform(mSurfaceControl, bufferItem.mTransform);
Valerie Hau2882e982020-01-23 13:33:10 -0800617 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
Vishnu Naird2aaab12022-02-10 14:49:09 -0800618 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
Ady Abrahamf0c56492020-12-17 18:04:15 -0800619 if (!bufferItem.mIsAutoTimestamp) {
620 t->setDesiredPresentTime(bufferItem.mTimestamp);
621 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700622
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800623 // Drop stale frame timeline infos
624 while (!mPendingFrameTimelines.empty() &&
625 mPendingFrameTimelines.front().first < bufferItem.mFrameNumber) {
626 ATRACE_FORMAT_INSTANT("dropping stale frameNumber: %" PRIu64 " vsyncId: %" PRId64,
627 mPendingFrameTimelines.front().first,
628 mPendingFrameTimelines.front().second.vsyncId);
629 mPendingFrameTimelines.pop();
630 }
631
632 if (!mPendingFrameTimelines.empty() &&
633 mPendingFrameTimelines.front().first == bufferItem.mFrameNumber) {
634 ATRACE_FORMAT_INSTANT("Transaction::setFrameTimelineInfo frameNumber: %" PRIu64
635 " vsyncId: %" PRId64,
636 bufferItem.mFrameNumber,
637 mPendingFrameTimelines.front().second.vsyncId);
638 t->setFrameTimelineInfo(mPendingFrameTimelines.front().second);
639 mPendingFrameTimelines.pop();
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100640 }
641
Vishnu Nairadf632b2021-01-07 14:05:08 -0800642 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000643 std::lock_guard _lock{mTimestampMutex};
Vishnu Nairadf632b2021-01-07 14:05:08 -0800644 auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
645 if (dequeueTime != mDequeueTimestamps.end()) {
646 Parcel p;
647 p.writeInt64(dequeueTime->second);
Huihong Luod3d8f8e2022-03-08 14:48:46 -0800648 t->setMetadata(mSurfaceControl, gui::METADATA_DEQUEUE_TIME, p);
Vishnu Nairadf632b2021-01-07 14:05:08 -0800649 mDequeueTimestamps.erase(dequeueTime);
650 }
651 }
Vishnu Naircf26a0a2020-11-13 12:56:20 -0800652
chaviw6a195272021-09-03 16:14:25 -0500653 mergePendingTransactions(t, bufferItem.mFrameNumber);
Robert Carr78c25dd2019-08-15 14:10:33 -0700654 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800655 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
656 t->setApplyToken(mApplyToken).apply(false, true);
657 mAppliedLastTransaction = true;
658 mLastAppliedFrameNumber = bufferItem.mFrameNumber;
659 } else {
660 t->setBufferHasBarrier(mSurfaceControl, mLastAppliedFrameNumber);
661 mAppliedLastTransaction = false;
Robert Carr78c25dd2019-08-15 14:10:33 -0700662 }
Vishnu Nairdab94092020-09-29 16:09:04 -0700663
chaviwd7deef72021-10-06 11:53:40 -0500664 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
Vishnu Nair1506b182021-02-22 14:35:15 -0800665 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
Vishnu Naira4fbca52021-07-07 16:52:34 -0700666 " graphicBufferId=%" PRIu64 "%s transform=%d",
chaviw3277faf2021-05-19 16:45:23 -0500667 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
Vishnu Nair1506b182021-02-22 14:35:15 -0800668 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700669 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
Vishnu Naira4fbca52021-07-07 16:52:34 -0700670 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000671 return OK;
Robert Carr78c25dd2019-08-15 14:10:33 -0700672}
673
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800674Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
675 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800676 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800677 }
678 return item.mCrop;
679}
680
chaviwd7deef72021-10-06 11:53:40 -0500681void BLASTBufferQueue::acquireAndReleaseBuffer() {
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000682 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500683 BufferItem bufferItem;
chaviw6ebdf5f2021-10-14 11:57:22 -0500684 status_t status =
685 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
686 if (status != OK) {
687 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
688 statusToString(status).c_str());
689 return;
690 }
chaviwd7deef72021-10-06 11:53:40 -0500691 mNumFrameAvailable--;
chaviw6ebdf5f2021-10-14 11:57:22 -0500692 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
chaviwd7deef72021-10-06 11:53:40 -0500693}
694
Vishnu Nairaef1de92020-10-22 12:15:53 -0700695void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000696 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
697 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
chaviwc1cf4022022-06-03 13:32:33 -0500698
Tianhao Yao4861b102022-02-03 20:18:35 +0000699 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000700 UNIQUE_LOCK_WITH_ASSERTION(mMutex);
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000701 BBQ_TRACE();
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000702 bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800703
Tianhao Yao4861b102022-02-03 20:18:35 +0000704 const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
705 BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
Valerie Haud3b90d22019-11-06 09:37:31 -0800706
Tianhao Yao4861b102022-02-03 20:18:35 +0000707 if (syncTransactionSet) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000708 // If we are going to re-use the same mSyncTransaction, release the buffer that may
709 // already be set in the Transaction. This is to allow us a free slot early to continue
710 // processing a new buffer.
711 if (!mAcquireSingleBuffer) {
712 auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
713 if (bufferData) {
714 BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
715 bufferData->frameNumber);
716 releaseBuffer(bufferData->generateReleaseCallbackId(),
717 bufferData->acquireFence);
Tianhao Yao4861b102022-02-03 20:18:35 +0000718 }
719 }
chaviw0acd33a2021-11-02 11:55:37 -0500720
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000721 if (waitForTransactionCallback) {
722 // We are waiting on a previous sync's transaction callback so allow another sync
723 // transaction to proceed.
724 //
725 // We need to first flush out the transactions that were in between the two syncs.
726 // We do this by merging them into mSyncTransaction so any buffer merging will get
727 // a release callback invoked.
728 while (mNumFrameAvailable > 0) {
729 // flush out the shadow queue
730 acquireAndReleaseBuffer();
731 }
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800732 } else {
733 // Make sure the frame available count is 0 before proceeding with a sync to ensure
734 // the correct frame is used for the sync. The only way mNumFrameAvailable would be
735 // greater than 0 is if we already ran out of buffers previously. This means we
736 // need to flush the buffers before proceeding with the sync.
737 while (mNumFrameAvailable > 0) {
738 BQA_LOGD("waiting until no queued buffers");
739 mCallbackCV.wait(_lock);
740 }
chaviwd7deef72021-10-06 11:53:40 -0500741 }
742 }
743
Tianhao Yao4861b102022-02-03 20:18:35 +0000744 // add to shadow queue
745 mNumFrameAvailable++;
chaviwc1cf4022022-06-03 13:32:33 -0500746 if (waitForTransactionCallback && mNumFrameAvailable >= 2) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000747 acquireAndReleaseBuffer();
748 }
749 ATRACE_INT(mQueuedBufferTrace.c_str(),
750 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
751
752 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s",
753 item.mFrameNumber, boolToString(syncTransactionSet));
754
755 if (syncTransactionSet) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800756 // Add to mSyncedFrameNumbers before waiting in case any buffers are released
757 // while waiting for a free buffer. The release and commit callback will try to
758 // acquire buffers if there are any available, but we don't want it to acquire
759 // in the case where a sync transaction wants the buffer.
760 mSyncedFrameNumbers.emplace(item.mFrameNumber);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000761 // If there's no available buffer and we're in a sync transaction, we need to wait
762 // instead of returning since we guarantee a buffer will be acquired for the sync.
763 while (acquireNextBufferLocked(mSyncTransaction) == BufferQueue::NO_BUFFER_AVAILABLE) {
764 BQA_LOGD("waiting for available buffer");
765 mCallbackCV.wait(_lock);
766 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000767
768 // Only need a commit callback when syncing to ensure the buffer that's synced has been
769 // sent to SF
770 incStrong((void*)transactionCommittedCallbackThunk);
771 mSyncTransaction->addTransactionCommittedCallback(transactionCommittedCallbackThunk,
772 static_cast<void*>(this));
Tianhao Yao4861b102022-02-03 20:18:35 +0000773 if (mAcquireSingleBuffer) {
774 prevCallback = mTransactionReadyCallback;
775 prevTransaction = mSyncTransaction;
776 mTransactionReadyCallback = nullptr;
777 mSyncTransaction = nullptr;
778 }
chaviwc1cf4022022-06-03 13:32:33 -0500779 } else if (!waitForTransactionCallback) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000780 acquireNextBufferLocked(std::nullopt);
Valerie Hau0188adf2020-02-13 08:29:20 -0800781 }
782 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000783 if (prevCallback) {
784 prevCallback(prevTransaction);
chaviwd7deef72021-10-06 11:53:40 -0500785 }
Valerie Haud3b90d22019-11-06 09:37:31 -0800786}
787
Vishnu Nairaef1de92020-10-22 12:15:53 -0700788void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
789 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
790 // Do nothing since we are not storing unacquired buffer items locally.
791}
792
Vishnu Nairadf632b2021-01-07 14:05:08 -0800793void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000794 std::lock_guard _lock{mTimestampMutex};
Vishnu Nairadf632b2021-01-07 14:05:08 -0800795 mDequeueTimestamps[bufferId] = systemTime();
796};
797
798void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000799 std::lock_guard _lock{mTimestampMutex};
Vishnu Nairadf632b2021-01-07 14:05:08 -0800800 mDequeueTimestamps.erase(bufferId);
801};
802
Chavi Weingartenc398c012023-04-12 17:26:02 +0000803bool BLASTBufferQueue::syncNextTransaction(
Tianhao Yao4861b102022-02-03 20:18:35 +0000804 std::function<void(SurfaceComposerClient::Transaction*)> callback,
805 bool acquireSingleBuffer) {
Chavi Weingartenc398c012023-04-12 17:26:02 +0000806 LOG_ALWAYS_FATAL_IF(!callback,
807 "BLASTBufferQueue: callback passed in to syncNextTransaction must not be "
808 "NULL");
chaviw3b4bdcf2022-03-17 09:27:03 -0500809
Chavi Weingartenc398c012023-04-12 17:26:02 +0000810 std::lock_guard _lock{mMutex};
811 BBQ_TRACE();
812 if (mTransactionReadyCallback) {
813 ALOGW("Attempting to overwrite transaction callback in syncNextTransaction");
814 return false;
Tianhao Yao4861b102022-02-03 20:18:35 +0000815 }
chaviw3b4bdcf2022-03-17 09:27:03 -0500816
Chavi Weingartenc398c012023-04-12 17:26:02 +0000817 mTransactionReadyCallback = callback;
818 mSyncTransaction = new SurfaceComposerClient::Transaction();
819 mAcquireSingleBuffer = acquireSingleBuffer;
820 return true;
Tianhao Yao4861b102022-02-03 20:18:35 +0000821}
822
823void BLASTBufferQueue::stopContinuousSyncTransaction() {
824 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
825 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
826 {
827 std::lock_guard _lock{mMutex};
Chavi Weingartenc398c012023-04-12 17:26:02 +0000828 if (mAcquireSingleBuffer || !mTransactionReadyCallback) {
829 ALOGW("Attempting to stop continuous sync when none are active");
830 return;
Tianhao Yao4861b102022-02-03 20:18:35 +0000831 }
Chavi Weingartenc398c012023-04-12 17:26:02 +0000832
833 prevCallback = mTransactionReadyCallback;
834 prevTransaction = mSyncTransaction;
835
Tianhao Yao4861b102022-02-03 20:18:35 +0000836 mTransactionReadyCallback = nullptr;
837 mSyncTransaction = nullptr;
838 mAcquireSingleBuffer = true;
839 }
Chavi Weingartenc398c012023-04-12 17:26:02 +0000840
Tianhao Yao4861b102022-02-03 20:18:35 +0000841 if (prevCallback) {
842 prevCallback(prevTransaction);
843 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700844}
845
Chavi Weingartenc398c012023-04-12 17:26:02 +0000846void BLASTBufferQueue::clearSyncTransaction() {
847 std::lock_guard _lock{mMutex};
848 if (!mAcquireSingleBuffer) {
849 ALOGW("Attempting to clear sync transaction when none are active");
850 return;
851 }
852
853 mTransactionReadyCallback = nullptr;
854 mSyncTransaction = nullptr;
855}
856
Vishnu Nairea0de002020-11-17 17:42:37 -0800857bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700858 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
859 // Only reject buffers if scaling mode is freeze.
860 return false;
861 }
862
Chavi Weingarten70670e62023-02-22 17:36:40 +0000863 ui::Size bufferSize = getBufferSize(item);
Vishnu Nairea0de002020-11-17 17:42:37 -0800864 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800865 return false;
866 }
Vishnu Naire1a42322020-10-02 17:42:04 -0700867
Vishnu Nair670b3f72020-09-29 17:52:18 -0700868 // reject buffers if the buffer size doesn't match.
Vishnu Nairea0de002020-11-17 17:42:37 -0800869 return mSize != bufferSize;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700870}
Vishnu Nairbf255772020-10-16 10:54:41 -0700871
Robert Carr05086b22020-10-13 18:22:51 -0700872class BBQSurface : public Surface {
Robert Carr9c006e02020-10-14 13:41:57 -0700873private:
Vishnu Nair95b6d512021-08-30 15:31:08 -0700874 std::mutex mMutex;
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000875 sp<BLASTBufferQueue> mBbq GUARDED_BY(mMutex);
876 bool mDestroyed GUARDED_BY(mMutex) = false;
Vishnu Nair95b6d512021-08-30 15:31:08 -0700877
Robert Carr05086b22020-10-13 18:22:51 -0700878public:
Vishnu Nair992496b2020-10-22 17:27:21 -0700879 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
880 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
881 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
Robert Carr9c006e02020-10-14 13:41:57 -0700882
Robert Carr05086b22020-10-13 18:22:51 -0700883 void allocateBuffers() override {
884 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
885 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
886 auto gbp = getIGraphicBufferProducer();
887 std::thread ([reqWidth, reqHeight, gbp=getIGraphicBufferProducer(),
888 reqFormat=mReqFormat, reqUsage=mReqUsage] () {
889 gbp->allocateBuffers(reqWidth, reqHeight,
890 reqFormat, reqUsage);
891
892 }).detach();
893 }
Robert Carr9c006e02020-10-14 13:41:57 -0700894
Marin Shalamanovc5986772021-03-16 16:09:49 +0100895 status_t setFrameRate(float frameRate, int8_t compatibility,
896 int8_t changeFrameRateStrategy) override {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000897 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700898 if (mDestroyed) {
899 return DEAD_OBJECT;
900 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100901 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
902 "BBQSurface::setFrameRate")) {
Robert Carr9c006e02020-10-14 13:41:57 -0700903 return BAD_VALUE;
904 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100905 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
Robert Carr9c006e02020-10-14 13:41:57 -0700906 }
Robert Carr9b611b72020-10-19 12:00:23 -0700907
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800908 status_t setFrameTimelineInfo(uint64_t frameNumber,
909 const FrameTimelineInfo& frameTimelineInfo) override {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000910 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700911 if (mDestroyed) {
912 return DEAD_OBJECT;
913 }
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800914 return mBbq->setFrameTimelineInfo(frameNumber, frameTimelineInfo);
Robert Carr9b611b72020-10-19 12:00:23 -0700915 }
Vishnu Nair95b6d512021-08-30 15:31:08 -0700916
917 void destroy() override {
918 Surface::destroy();
919
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000920 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700921 mDestroyed = true;
922 mBbq = nullptr;
923 }
Robert Carr05086b22020-10-13 18:22:51 -0700924};
925
Robert Carr9c006e02020-10-14 13:41:57 -0700926// TODO: Can we coalesce this with frame updates? Need to confirm
927// no timing issues.
Marin Shalamanov46084422020-10-13 12:33:42 +0200928status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
929 bool shouldBeSeamless) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000930 std::lock_guard _lock{mMutex};
Robert Carr9c006e02020-10-14 13:41:57 -0700931 SurfaceComposerClient::Transaction t;
932
Marin Shalamanov46084422020-10-13 12:33:42 +0200933 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
Robert Carr9c006e02020-10-14 13:41:57 -0700934}
935
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800936status_t BLASTBufferQueue::setFrameTimelineInfo(uint64_t frameNumber,
937 const FrameTimelineInfo& frameTimelineInfo) {
938 ATRACE_FORMAT("%s(%s) frameNumber: %" PRIu64 " vsyncId: %" PRId64, __func__, mName.c_str(),
939 frameNumber, frameTimelineInfo.vsyncId);
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000940 std::lock_guard _lock{mMutex};
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800941 mPendingFrameTimelines.push({frameNumber, frameTimelineInfo});
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100942 return OK;
Robert Carr9b611b72020-10-19 12:00:23 -0700943}
944
Hongguang Chen621ec582021-02-16 15:42:35 -0800945void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000946 std::lock_guard _lock{mMutex};
Hongguang Chen621ec582021-02-16 15:42:35 -0800947 SurfaceComposerClient::Transaction t;
948
949 t.setSidebandStream(mSurfaceControl, stream).apply();
950}
951
Vishnu Nair992496b2020-10-22 17:27:21 -0700952sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000953 std::lock_guard _lock{mMutex};
Vishnu Nair992496b2020-10-22 17:27:21 -0700954 sp<IBinder> scHandle = nullptr;
955 if (includeSurfaceControlHandle && mSurfaceControl) {
956 scHandle = mSurfaceControl->getHandle();
957 }
958 return new BBQSurface(mProducer, true, scHandle, this);
Robert Carr05086b22020-10-13 18:22:51 -0700959}
960
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800961void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
962 uint64_t frameNumber) {
963 std::lock_guard _lock{mMutex};
964 if (mLastAcquiredFrameNumber >= frameNumber) {
965 // Apply the transaction since we have already acquired the desired frame.
966 t->apply();
967 } else {
chaviwaad6cf52021-03-23 17:27:20 -0500968 mPendingTransactions.emplace_back(frameNumber, *t);
969 // Clear the transaction so it can't be applied elsewhere.
970 t->clear();
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800971 }
972}
973
chaviw6a195272021-09-03 16:14:25 -0500974void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
975 std::lock_guard _lock{mMutex};
976
977 SurfaceComposerClient::Transaction t;
978 mergePendingTransactions(&t, frameNumber);
Robert Carr79dc06a2022-02-22 15:28:59 -0800979 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
980 t.setApplyToken(mApplyToken).apply(false, true);
chaviw6a195272021-09-03 16:14:25 -0500981}
982
983void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
984 uint64_t frameNumber) {
985 auto mergeTransaction =
986 [&t, currentFrameNumber = frameNumber](
987 std::tuple<uint64_t, SurfaceComposerClient::Transaction> pendingTransaction) {
988 auto& [targetFrameNumber, transaction] = pendingTransaction;
989 if (currentFrameNumber < targetFrameNumber) {
990 return false;
991 }
992 t->merge(std::move(transaction));
993 return true;
994 };
995
996 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
997 mPendingTransactions.end(), mergeTransaction),
998 mPendingTransactions.end());
999}
1000
chaviwd84085a2022-02-08 11:07:04 -06001001SurfaceComposerClient::Transaction* BLASTBufferQueue::gatherPendingTransactions(
1002 uint64_t frameNumber) {
1003 std::lock_guard _lock{mMutex};
1004 SurfaceComposerClient::Transaction* t = new SurfaceComposerClient::Transaction();
1005 mergePendingTransactions(t, frameNumber);
1006 return t;
1007}
1008
Vishnu Nair89496122020-12-14 17:14:53 -08001009// Maintains a single worker thread per process that services a list of runnables.
1010class AsyncWorker : public Singleton<AsyncWorker> {
1011private:
1012 std::thread mThread;
1013 bool mDone = false;
1014 std::deque<std::function<void()>> mRunnables;
1015 std::mutex mMutex;
1016 std::condition_variable mCv;
1017 void run() {
1018 std::unique_lock<std::mutex> lock(mMutex);
1019 while (!mDone) {
Vishnu Nair89496122020-12-14 17:14:53 -08001020 while (!mRunnables.empty()) {
Vishnu Nair51e4dc82021-10-01 15:32:33 -07001021 std::deque<std::function<void()>> runnables = std::move(mRunnables);
1022 mRunnables.clear();
1023 lock.unlock();
1024 // Run outside the lock since the runnable might trigger another
1025 // post to the async worker.
1026 execute(runnables);
1027 lock.lock();
Vishnu Nair89496122020-12-14 17:14:53 -08001028 }
Wonsik Kim567533e2021-05-04 19:31:29 -07001029 mCv.wait(lock);
Vishnu Nair89496122020-12-14 17:14:53 -08001030 }
1031 }
1032
Vishnu Nair51e4dc82021-10-01 15:32:33 -07001033 void execute(std::deque<std::function<void()>>& runnables) {
1034 while (!runnables.empty()) {
1035 std::function<void()> runnable = runnables.front();
1036 runnables.pop_front();
1037 runnable();
1038 }
1039 }
1040
Vishnu Nair89496122020-12-14 17:14:53 -08001041public:
1042 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
1043
1044 ~AsyncWorker() {
1045 mDone = true;
1046 mCv.notify_all();
1047 if (mThread.joinable()) {
1048 mThread.join();
1049 }
1050 }
1051
1052 void post(std::function<void()> runnable) {
1053 std::unique_lock<std::mutex> lock(mMutex);
1054 mRunnables.emplace_back(std::move(runnable));
1055 mCv.notify_one();
1056 }
1057};
1058ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
1059
1060// Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
1061class AsyncProducerListener : public BnProducerListener {
1062private:
1063 const sp<IProducerListener> mListener;
1064
1065public:
1066 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
1067
1068 void onBufferReleased() override {
1069 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
1070 }
1071
1072 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
1073 AsyncWorker::getInstance().post(
1074 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
1075 }
1076};
1077
1078// Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
1079// can be non-blocking when the producer is in the client process.
1080class BBQBufferQueueProducer : public BufferQueueProducer {
1081public:
Brian Lindahlc794b692023-01-31 15:42:47 -07001082 BBQBufferQueueProducer(const sp<BufferQueueCore>& core, wp<BLASTBufferQueue> bbq)
1083 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/),
1084 mBLASTBufferQueue(std::move(bbq)) {}
Vishnu Nair89496122020-12-14 17:14:53 -08001085
1086 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
1087 QueueBufferOutput* output) override {
1088 if (!listener) {
1089 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
1090 }
1091
1092 return BufferQueueProducer::connect(new AsyncProducerListener(listener), api,
1093 producerControlledByApp, output);
1094 }
Vishnu Nair17dde612020-12-28 11:39:59 -08001095
Brian Lindahlc794b692023-01-31 15:42:47 -07001096 // We want to resize the frame history when changing the size of the buffer queue
1097 status_t setMaxDequeuedBufferCount(int maxDequeuedBufferCount) override {
1098 int maxBufferCount;
1099 status_t status = BufferQueueProducer::setMaxDequeuedBufferCount(maxDequeuedBufferCount,
1100 &maxBufferCount);
1101 // if we can't determine the max buffer count, then just skip growing the history size
1102 if (status == OK) {
1103 size_t newFrameHistorySize = maxBufferCount + 2; // +2 because triple buffer rendering
1104 // optimize away resizing the frame history unless it will grow
1105 if (newFrameHistorySize > FrameEventHistory::INITIAL_MAX_FRAME_HISTORY) {
1106 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1107 if (bbq != nullptr) {
1108 ALOGV("increasing frame history size to %zu", newFrameHistorySize);
1109 bbq->resizeFrameEventHistory(newFrameHistorySize);
1110 }
1111 }
1112 }
1113 return status;
1114 }
1115
Vishnu Nair17dde612020-12-28 11:39:59 -08001116 int query(int what, int* value) override {
1117 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
1118 *value = 1;
1119 return NO_ERROR;
1120 }
1121 return BufferQueueProducer::query(what, value);
1122 }
Brian Lindahlc794b692023-01-31 15:42:47 -07001123
1124private:
1125 const wp<BLASTBufferQueue> mBLASTBufferQueue;
Vishnu Nair89496122020-12-14 17:14:53 -08001126};
1127
1128// Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
1129// This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
1130// emulating one way binder call behavior. Without this, if the listener calls back into the queue,
1131// we can deadlock.
1132void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
1133 sp<IGraphicBufferConsumer>* outConsumer) {
1134 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
1135 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
1136
1137 sp<BufferQueueCore> core(new BufferQueueCore());
1138 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
1139
Brian Lindahlc794b692023-01-31 15:42:47 -07001140 sp<IGraphicBufferProducer> producer(new BBQBufferQueueProducer(core, this));
Vishnu Nair89496122020-12-14 17:14:53 -08001141 LOG_ALWAYS_FATAL_IF(producer == nullptr,
1142 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
1143
Vishnu Nair8b30dd12021-01-25 14:16:54 -08001144 sp<BufferQueueConsumer> consumer(new BufferQueueConsumer(core));
1145 consumer->setAllowExtraAcquire(true);
Vishnu Nair89496122020-12-14 17:14:53 -08001146 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
1147 "BLASTBufferQueue: failed to create BufferQueueConsumer");
1148
1149 *outProducer = producer;
1150 *outConsumer = consumer;
1151}
1152
Brian Lindahlc794b692023-01-31 15:42:47 -07001153void BLASTBufferQueue::resizeFrameEventHistory(size_t newSize) {
1154 // This can be null during creation of the buffer queue, but resizing won't do anything at that
1155 // point in time, so just ignore. This can go away once the class relationships and lifetimes of
1156 // objects are cleaned up with a major refactor of BufferQueue as a whole.
1157 if (mBufferItemConsumer != nullptr) {
1158 std::unique_lock _lock{mMutex};
1159 mBufferItemConsumer->resizeFrameEventHistory(newSize);
1160 }
1161}
1162
chaviw497e81c2021-02-04 17:09:47 -08001163PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
1164 PixelFormat convertedFormat = format;
1165 switch (format) {
1166 case PIXEL_FORMAT_TRANSPARENT:
1167 case PIXEL_FORMAT_TRANSLUCENT:
1168 convertedFormat = PIXEL_FORMAT_RGBA_8888;
1169 break;
1170 case PIXEL_FORMAT_OPAQUE:
1171 convertedFormat = PIXEL_FORMAT_RGBX_8888;
1172 break;
1173 }
1174 return convertedFormat;
1175}
1176
Robert Carr82d07c92021-05-10 11:36:43 -07001177uint32_t BLASTBufferQueue::getLastTransformHint() const {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001178 std::lock_guard _lock{mMutex};
Robert Carr82d07c92021-05-10 11:36:43 -07001179 if (mSurfaceControl != nullptr) {
1180 return mSurfaceControl->getTransformHint();
1181 } else {
1182 return 0;
1183 }
1184}
1185
chaviw0b020f82021-08-20 12:00:47 -05001186uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001187 std::lock_guard _lock{mMutex};
chaviw0b020f82021-08-20 12:00:47 -05001188 return mLastAcquiredFrameNumber;
1189}
1190
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001191bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001192 std::lock_guard _lock{mMutex};
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001193 return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
1194}
1195
Patrick Williamsf1e5df12022-10-17 21:37:42 +00001196void BLASTBufferQueue::setTransactionHangCallback(
1197 std::function<void(const std::string&)> callback) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001198 std::lock_guard _lock{mMutex};
Robert Carr4c1b6462021-12-21 10:30:50 -08001199 mTransactionHangCallback = callback;
1200}
1201
Robert Carr78c25dd2019-08-15 14:10:33 -07001202} // namespace android