blob: 97e45c6d47dbf145a6b70ebbe86251a06c4e2c75 [file] [log] [blame]
Robert Carr78c25dd2019-08-15 14:10:33 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Valerie Haud3b90d22019-11-06 09:37:31 -080017#undef LOG_TAG
18#define LOG_TAG "BLASTBufferQueue"
19
Valerie Haua32c5522019-12-09 10:11:08 -080020#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Vishnu Naire1a42322020-10-02 17:42:04 -070021//#define LOG_NDEBUG 0
Valerie Haua32c5522019-12-09 10:11:08 -080022
Robert Carr78c25dd2019-08-15 14:10:33 -070023#include <gui/BLASTBufferQueue.h>
24#include <gui/BufferItemConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080025#include <gui/BufferQueueConsumer.h>
26#include <gui/BufferQueueCore.h>
27#include <gui/BufferQueueProducer.h>
Valerie Hau45e4b3b2019-12-03 10:49:17 -080028#include <gui/GLConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080029#include <gui/IProducerListener.h>
Robert Carr05086b22020-10-13 18:22:51 -070030#include <gui/Surface.h>
chaviw57ae4b22022-02-03 16:51:39 -060031#include <gui/TraceUtils.h>
Vishnu Nair89496122020-12-14 17:14:53 -080032#include <utils/Singleton.h>
Valerie Haua32c5522019-12-09 10:11:08 -080033#include <utils/Trace.h>
34
Ady Abraham0bde6b52021-05-18 13:57:02 -070035#include <private/gui/ComposerService.h>
Huihong Luo02186fb2022-02-23 14:21:54 -080036#include <private/gui/ComposerServiceAIDL.h>
Ady Abraham0bde6b52021-05-18 13:57:02 -070037
Robert Carr78c25dd2019-08-15 14:10:33 -070038#include <chrono>
39
40using namespace std::chrono_literals;
41
Vishnu Nairdab94092020-09-29 16:09:04 -070042namespace {
chaviw3277faf2021-05-19 16:45:23 -050043inline const char* boolToString(bool b) {
Vishnu Nairdab94092020-09-29 16:09:04 -070044 return b ? "true" : "false";
45}
46} // namespace
47
Robert Carr78c25dd2019-08-15 14:10:33 -070048namespace android {
49
Vishnu Nairdab94092020-09-29 16:09:04 -070050// Macros to include adapter info in log messages
chaviwd7deef72021-10-06 11:53:40 -050051#define BQA_LOGD(x, ...) \
52 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070053#define BQA_LOGV(x, ...) \
54 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairc6f89ee2020-12-11 14:27:32 -080055// enable logs for a single layer
56//#define BQA_LOGV(x, ...) \
57// ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
58// mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070059#define BQA_LOGE(x, ...) \
60 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
61
chaviw57ae4b22022-02-03 16:51:39 -060062#define BBQ_TRACE(x, ...) \
63 ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
64 mNumAcquired, ##__VA_ARGS__)
65
Valerie Hau871d6352020-01-29 08:44:02 -080066void BLASTBufferItemConsumer::onDisconnect() {
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000067 Mutex::Autolock lock(mMutex);
68 mPreviouslyConnected = mCurrentlyConnected;
69 mCurrentlyConnected = false;
70 if (mPreviouslyConnected) {
71 mDisconnectEvents.push(mCurrentFrameNumber);
Valerie Hau871d6352020-01-29 08:44:02 -080072 }
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000073 mFrameEventHistory.onDisconnect();
Valerie Hau871d6352020-01-29 08:44:02 -080074}
75
76void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
77 FrameEventHistoryDelta* outDelta) {
Hongguang Chen621ec582021-02-16 15:42:35 -080078 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -080079 if (newTimestamps) {
80 // BufferQueueProducer only adds a new timestamp on
81 // queueBuffer
82 mCurrentFrameNumber = newTimestamps->frameNumber;
83 mFrameEventHistory.addQueue(*newTimestamps);
84 }
85 if (outDelta) {
86 // frame event histories will be processed
87 // only after the producer connects and requests
88 // deltas for the first time. Forward this intent
89 // to SF-side to turn event processing back on
90 mPreviouslyConnected = mCurrentlyConnected;
91 mCurrentlyConnected = true;
92 mFrameEventHistory.getAndResetDelta(outDelta);
93 }
94}
95
96void BLASTBufferItemConsumer::updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
97 const sp<Fence>& glDoneFence,
98 const sp<Fence>& presentFence,
99 const sp<Fence>& prevReleaseFence,
100 CompositorTiming compositorTiming,
101 nsecs_t latchTime, nsecs_t dequeueReadyTime) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800102 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800103
104 // if the producer is not connected, don't bother updating,
105 // the next producer that connects won't access this frame event
106 if (!mCurrentlyConnected) return;
107 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
108 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
109 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
110
111 mFrameEventHistory.addLatch(frameNumber, latchTime);
112 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
113 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
114 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
115 compositorTiming);
116}
117
118void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
119 bool disconnect = false;
Hongguang Chen621ec582021-02-16 15:42:35 -0800120 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800121 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
122 disconnect = true;
123 mDisconnectEvents.pop();
124 }
125 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
126}
127
Hongguang Chen621ec582021-02-16 15:42:35 -0800128void BLASTBufferItemConsumer::onSidebandStreamChanged() {
Ady Abrahamdbca1352021-12-15 11:58:56 -0800129 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
130 if (bbq != nullptr) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800131 sp<NativeHandle> stream = getSidebandStream();
Ady Abrahamdbca1352021-12-15 11:58:56 -0800132 bbq->setSidebandStream(stream);
Hongguang Chen621ec582021-02-16 15:42:35 -0800133 }
134}
135
Vishnu Naird2aaab12022-02-10 14:49:09 -0800136BLASTBufferQueue::BLASTBufferQueue(const std::string& name, bool updateDestinationFrame)
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800137 : mSurfaceControl(nullptr),
138 mSize(1, 1),
Vishnu Nairea0de002020-11-17 17:42:37 -0800139 mRequestedSize(mSize),
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800140 mFormat(PIXEL_FORMAT_RGBA_8888),
Tianhao Yao4861b102022-02-03 20:18:35 +0000141 mTransactionReadyCallback(nullptr),
Vishnu Naird2aaab12022-02-10 14:49:09 -0800142 mSyncTransaction(nullptr),
143 mUpdateDestinationFrame(updateDestinationFrame) {
Vishnu Nair89496122020-12-14 17:14:53 -0800144 createBufferQueue(&mProducer, &mConsumer);
Valerie Hau0889c622020-02-19 15:04:47 -0800145 // since the adapter is in the client process, set dequeue timeout
146 // explicitly so that dequeueBuffer will block
147 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
Valerie Hau65b8e872020-02-13 09:45:14 -0800148
Vishnu Nairdebd1cb2021-03-16 10:06:01 -0700149 // safe default, most producers are expected to override this
150 mProducer->setMaxDequeuedBufferCount(2);
Vishnu Nair1618c672021-02-05 13:08:26 -0800151 mBufferItemConsumer = new BLASTBufferItemConsumer(mConsumer,
152 GraphicBuffer::USAGE_HW_COMPOSER |
153 GraphicBuffer::USAGE_HW_TEXTURE,
Ady Abrahamdbca1352021-12-15 11:58:56 -0800154 1, false, this);
Valerie Haua32c5522019-12-09 10:11:08 -0800155 static int32_t id = 0;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700156 mName = name + "#" + std::to_string(id);
Vishnu Nairdab94092020-09-29 16:09:04 -0700157 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(id);
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700158 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(id);
Valerie Haua32c5522019-12-09 10:11:08 -0800159 id++;
Vishnu Nairdab94092020-09-29 16:09:04 -0700160 mBufferItemConsumer->setName(String8(consumerName.c_str()));
Robert Carr78c25dd2019-08-15 14:10:33 -0700161 mBufferItemConsumer->setFrameAvailableListener(this);
Robert Carr9f133d72020-04-01 15:51:46 -0700162
Huihong Luo02186fb2022-02-23 14:21:54 -0800163 ComposerServiceAIDL::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
Ady Abraham0bde6b52021-05-18 13:57:02 -0700164 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
chaviw69058fb2021-09-27 09:37:30 -0500165 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
Valerie Haua32c5522019-12-09 10:11:08 -0800166 mNumAcquired = 0;
167 mNumFrameAvailable = 0;
Robert Carr4c1b6462021-12-21 10:30:50 -0800168
169 TransactionCompletedListener::getInstance()->addQueueStallListener(
Patrick Williamsf1e5df12022-10-17 21:37:42 +0000170 [&](const std::string& reason) {
171 std::function<void(const std::string&)> callbackCopy;
172 {
173 std::unique_lock _lock{mMutex};
174 callbackCopy = mTransactionHangCallback;
175 }
176 if (callbackCopy) callbackCopy(reason);
177 },
178 this);
Robert Carr4c1b6462021-12-21 10:30:50 -0800179
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800180 BQA_LOGV("BLASTBufferQueue created");
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800181}
182
183BLASTBufferQueue::BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface,
184 int width, int height, int32_t format)
185 : BLASTBufferQueue(name) {
186 update(surface, width, height, format);
Robert Carr78c25dd2019-08-15 14:10:33 -0700187}
188
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800189BLASTBufferQueue::~BLASTBufferQueue() {
Robert Carr4c1b6462021-12-21 10:30:50 -0800190 TransactionCompletedListener::getInstance()->removeQueueStallListener(this);
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800191 if (mPendingTransactions.empty()) {
192 return;
193 }
194 BQA_LOGE("Applying pending transactions on dtor %d",
195 static_cast<uint32_t>(mPendingTransactions.size()));
196 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800197 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
Robert Carr79dc06a2022-02-22 15:28:59 -0800198 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
199 t.setApplyToken(mApplyToken).apply(false, true);
chaviw3b4bdcf2022-03-17 09:27:03 -0500200
201 if (mTransactionReadyCallback) {
202 mTransactionReadyCallback(mSyncTransaction);
203 }
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800204}
205
chaviw565ee542021-01-14 10:21:23 -0800206void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
Vishnu Naird2aaab12022-02-10 14:49:09 -0800207 int32_t format) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800208 LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
209
Robert Carr78c25dd2019-08-15 14:10:33 -0700210 std::unique_lock _lock{mMutex};
chaviw565ee542021-01-14 10:21:23 -0800211 if (mFormat != format) {
212 mFormat = format;
chaviw497e81c2021-02-04 17:09:47 -0800213 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
chaviw565ee542021-01-14 10:21:23 -0800214 }
215
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800216 const bool surfaceControlChanged = !SurfaceControl::isSameSurface(mSurfaceControl, surface);
Vishnu Nairab066512022-01-04 22:28:00 +0000217 if (surfaceControlChanged && mSurfaceControl != nullptr) {
218 BQA_LOGD("Updating SurfaceControl without recreating BBQ");
219 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800220 bool applyTransaction = false;
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800221
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700222 // Always update the native object even though they might have the same layer handle, so we can
223 // get the updated transform hint from WM.
224 mSurfaceControl = surface;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800225 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800226 if (surfaceControlChanged) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800227 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
228 layer_state_t::eEnableBackpressure);
229 applyTransaction = true;
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800230 }
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800231 mTransformHint = mSurfaceControl->getTransformHint();
232 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700233 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
234 mTransformHint);
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800235
Vishnu Nairea0de002020-11-17 17:42:37 -0800236 ui::Size newSize(width, height);
237 if (mRequestedSize != newSize) {
238 mRequestedSize.set(newSize);
239 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000240 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Vishnu Nair53c936c2020-12-03 11:46:37 -0800241 // If the buffer supports scaling, update the frame immediately since the client may
242 // want to scale the existing buffer to the new size.
243 mSize = mRequestedSize;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800244 if (mUpdateDestinationFrame) {
245 t.setDestinationFrame(mSurfaceControl, Rect(newSize));
246 applyTransaction = true;
247 }
Vishnu Nair53c936c2020-12-03 11:46:37 -0800248 }
Robert Carrfc416512020-04-02 12:32:44 -0700249 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800250 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800251 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
252 t.setApplyToken(mApplyToken).apply(false, true);
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800253 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700254}
255
chaviwd7deef72021-10-06 11:53:40 -0500256static std::optional<SurfaceControlStats> findMatchingStat(
257 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
258 for (auto stat : stats) {
259 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
260 return stat;
261 }
262 }
263 return std::nullopt;
264}
265
266static void transactionCommittedCallbackThunk(void* context, nsecs_t latchTime,
267 const sp<Fence>& presentFence,
268 const std::vector<SurfaceControlStats>& stats) {
269 if (context == nullptr) {
270 return;
271 }
272 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
273 bq->transactionCommittedCallback(latchTime, presentFence, stats);
274}
275
276void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
277 const sp<Fence>& /*presentFence*/,
278 const std::vector<SurfaceControlStats>& stats) {
279 {
280 std::unique_lock _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600281 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500282 BQA_LOGV("transactionCommittedCallback");
283 if (!mSurfaceControlsWithPendingCallback.empty()) {
284 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
285 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
286 if (stat) {
287 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
288
289 // We need to check if we were waiting for a transaction callback in order to
290 // process any pending buffers and unblock. It's possible to get transaction
chaviwc1cf4022022-06-03 13:32:33 -0500291 // callbacks for previous requests so we need to ensure that there are no pending
292 // frame numbers that were in a sync. We remove the frame from mSyncedFrameNumbers
293 // set and then check if it's empty. If there are no more pending syncs, we can
294 // proceed with flushing the shadow queue.
295 // We also want to check if mSyncTransaction is null because it's possible another
chaviwd7deef72021-10-06 11:53:40 -0500296 // sync request came in while waiting, but it hasn't started processing yet. In that
297 // case, we don't actually want to flush the frames in between since they will get
298 // processed and merged with the sync transaction and released earlier than if they
299 // were sent to SF
chaviwc1cf4022022-06-03 13:32:33 -0500300 mSyncedFrameNumbers.erase(currFrameNumber);
301 if (mSyncedFrameNumbers.empty() && mSyncTransaction == nullptr) {
chaviwd7deef72021-10-06 11:53:40 -0500302 flushShadowQueue();
303 }
304 } else {
chaviw768bfa02021-11-01 09:50:57 -0500305 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
chaviwd7deef72021-10-06 11:53:40 -0500306 }
307 } else {
308 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
309 "empty.");
310 }
chaviwd7deef72021-10-06 11:53:40 -0500311 decStrong((void*)transactionCommittedCallbackThunk);
312 }
313}
314
Robert Carr78c25dd2019-08-15 14:10:33 -0700315static void transactionCallbackThunk(void* context, nsecs_t latchTime,
316 const sp<Fence>& presentFence,
317 const std::vector<SurfaceControlStats>& stats) {
318 if (context == nullptr) {
319 return;
320 }
Robert Carrfbcbb4c2020-11-02 14:14:34 -0800321 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
Robert Carr78c25dd2019-08-15 14:10:33 -0700322 bq->transactionCallback(latchTime, presentFence, stats);
323}
324
325void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
326 const std::vector<SurfaceControlStats>& stats) {
chaviw71c2cc42020-10-23 16:42:02 -0700327 {
328 std::unique_lock _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600329 BBQ_TRACE();
chaviw71c2cc42020-10-23 16:42:02 -0700330 BQA_LOGV("transactionCallback");
chaviw71c2cc42020-10-23 16:42:02 -0700331
chaviw42026162021-04-16 15:46:12 -0500332 if (!mSurfaceControlsWithPendingCallback.empty()) {
333 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
334 mSurfaceControlsWithPendingCallback.pop();
chaviwd7deef72021-10-06 11:53:40 -0500335 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
336 if (statsOptional) {
337 SurfaceControlStats stat = *statsOptional;
Vishnu Nair71fcf912022-10-18 09:14:20 -0700338 if (stat.transformHint) {
339 mTransformHint = *stat.transformHint;
340 mBufferItemConsumer->setTransformHint(mTransformHint);
341 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
342 }
Vishnu Nairde66dc72021-06-17 17:54:41 -0700343 // Update frametime stamps if the frame was latched and presented, indicated by a
344 // valid latch time.
345 if (stat.latchTime > 0) {
346 mBufferItemConsumer
347 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
348 stat.frameEventStats.refreshStartTime,
349 stat.frameEventStats.gpuCompositionDoneFence,
350 stat.presentFence, stat.previousReleaseFence,
351 stat.frameEventStats.compositorTiming,
352 stat.latchTime,
353 stat.frameEventStats.dequeueReadyTime);
354 }
Robert Carr405e2f62021-12-31 16:59:34 -0800355 auto currFrameNumber = stat.frameEventStats.frameNumber;
356 std::vector<ReleaseCallbackId> staleReleases;
357 for (const auto& [key, value]: mSubmitted) {
358 if (currFrameNumber > key.framenumber) {
359 staleReleases.push_back(key);
360 }
361 }
362 for (const auto& staleRelease : staleReleases) {
Robert Carr405e2f62021-12-31 16:59:34 -0800363 releaseBufferCallbackLocked(staleRelease,
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700364 stat.previousReleaseFence
365 ? stat.previousReleaseFence
366 : Fence::NO_FENCE,
367 stat.currentMaxAcquiredBufferCount,
368 true /* fakeRelease */);
Robert Carr405e2f62021-12-31 16:59:34 -0800369 }
chaviwd7deef72021-10-06 11:53:40 -0500370 } else {
chaviw768bfa02021-11-01 09:50:57 -0500371 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
chaviw42026162021-04-16 15:46:12 -0500372 }
373 } else {
374 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
375 "empty.");
Valerie Haua32c5522019-12-09 10:11:08 -0800376 }
chaviw71c2cc42020-10-23 16:42:02 -0700377
chaviw71c2cc42020-10-23 16:42:02 -0700378 decStrong((void*)transactionCallbackThunk);
Robert Carr78c25dd2019-08-15 14:10:33 -0700379 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700380}
381
Vishnu Nair1506b182021-02-22 14:35:15 -0800382// Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
383// BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
384// So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
385// Otherwise, this is a no-op.
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700386static void releaseBufferCallbackThunk(wp<BLASTBufferQueue> context, const ReleaseCallbackId& id,
chaviw69058fb2021-09-27 09:37:30 -0500387 const sp<Fence>& releaseFence,
388 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800389 sp<BLASTBufferQueue> blastBufferQueue = context.promote();
Vishnu Nair1506b182021-02-22 14:35:15 -0800390 if (blastBufferQueue) {
chaviw69058fb2021-09-27 09:37:30 -0500391 blastBufferQueue->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700392 } else {
393 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800394 }
395}
396
chaviwd7deef72021-10-06 11:53:40 -0500397void BLASTBufferQueue::flushShadowQueue() {
398 BQA_LOGV("flushShadowQueue");
399 int numFramesToFlush = mNumFrameAvailable;
400 while (numFramesToFlush > 0) {
401 acquireNextBufferLocked(std::nullopt);
402 numFramesToFlush--;
403 }
404}
405
chaviw69058fb2021-09-27 09:37:30 -0500406void BLASTBufferQueue::releaseBufferCallback(
407 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
408 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
chaviw57ae4b22022-02-03 16:51:39 -0600409 BBQ_TRACE();
Robert Carr405e2f62021-12-31 16:59:34 -0800410
Vishnu Nair1506b182021-02-22 14:35:15 -0800411 std::unique_lock _lock{mMutex};
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700412 releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
413 false /* fakeRelease */);
Robert Carr405e2f62021-12-31 16:59:34 -0800414}
415
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700416void BLASTBufferQueue::releaseBufferCallbackLocked(
417 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
418 std::optional<uint32_t> currentMaxAcquiredBufferCount, bool fakeRelease) {
Robert Carr405e2f62021-12-31 16:59:34 -0800419 ATRACE_CALL();
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700420 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800421
Ady Abraham899dcdb2021-06-15 16:56:21 -0700422 // Calculate how many buffers we need to hold before we release them back
423 // to the buffer queue. This will prevent higher latency when we are running
424 // on a lower refresh rate than the max supported. We only do that for EGL
425 // clients as others don't care about latency
426 const bool isEGL = [&] {
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700427 const auto it = mSubmitted.find(id);
Ady Abraham899dcdb2021-06-15 16:56:21 -0700428 return it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
429 }();
430
chaviw69058fb2021-09-27 09:37:30 -0500431 if (currentMaxAcquiredBufferCount) {
432 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
433 }
434
Ady Abraham899dcdb2021-06-15 16:56:21 -0700435 const auto numPendingBuffersToHold =
chaviw69058fb2021-09-27 09:37:30 -0500436 isEGL ? std::max(0u, mMaxAcquiredBuffers - mCurrentMaxAcquiredBufferCount) : 0;
Robert Carr405e2f62021-12-31 16:59:34 -0800437
438 auto rb = ReleasedBuffer{id, releaseFence};
439 if (std::find(mPendingRelease.begin(), mPendingRelease.end(), rb) == mPendingRelease.end()) {
440 mPendingRelease.emplace_back(rb);
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700441 if (fakeRelease) {
442 BQA_LOGE("Faking releaseBufferCallback from transactionCompleteCallback %" PRIu64,
443 id.framenumber);
444 BBQ_TRACE("FakeReleaseCallback");
445 }
Robert Carr405e2f62021-12-31 16:59:34 -0800446 }
Ady Abraham899dcdb2021-06-15 16:56:21 -0700447
448 // Release all buffers that are beyond the ones that we need to hold
449 while (mPendingRelease.size() > numPendingBuffersToHold) {
chaviw0acd33a2021-11-02 11:55:37 -0500450 const auto releasedBuffer = mPendingRelease.front();
Ady Abraham899dcdb2021-06-15 16:56:21 -0700451 mPendingRelease.pop_front();
chaviw0acd33a2021-11-02 11:55:37 -0500452 releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
chaviwc1cf4022022-06-03 13:32:33 -0500453 // Don't process the transactions here if mSyncedFrameNumbers is not empty. That means
454 // are still transactions that have sync buffers in them that have not been applied or
455 // dropped. Instead, let onFrameAvailable handle processing them since it will merge with
456 // the syncTransaction.
457 if (mSyncedFrameNumbers.empty()) {
chaviwd7deef72021-10-06 11:53:40 -0500458 acquireNextBufferLocked(std::nullopt);
459 }
Vishnu Nair1506b182021-02-22 14:35:15 -0800460 }
461
Ady Abraham899dcdb2021-06-15 16:56:21 -0700462 ATRACE_INT("PendingRelease", mPendingRelease.size());
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700463 ATRACE_INT(mQueuedBufferTrace.c_str(),
464 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800465 mCallbackCV.notify_all();
466}
467
chaviw0acd33a2021-11-02 11:55:37 -0500468void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
469 const sp<Fence>& releaseFence) {
470 auto it = mSubmitted.find(callbackId);
471 if (it == mSubmitted.end()) {
472 BQA_LOGE("ERROR: releaseBufferCallback without corresponding submitted buffer %s",
473 callbackId.to_string().c_str());
474 return;
475 }
476 mNumAcquired--;
chaviw57ae4b22022-02-03 16:51:39 -0600477 BBQ_TRACE("frame=%" PRIu64, callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500478 BQA_LOGV("released %s", callbackId.to_string().c_str());
479 mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
480 mSubmitted.erase(it);
chaviwc1cf4022022-06-03 13:32:33 -0500481 // Remove the frame number from mSyncedFrameNumbers since we can get a release callback
482 // without getting a transaction committed if the buffer was dropped.
483 mSyncedFrameNumbers.erase(callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500484}
485
chaviwd7deef72021-10-06 11:53:40 -0500486void BLASTBufferQueue::acquireNextBufferLocked(
487 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800488 // If the next transaction is set, we want to guarantee the our acquire will not fail, so don't
489 // include the extra buffer when checking if we can acquire the next buffer.
chaviwd7deef72021-10-06 11:53:40 -0500490 const bool includeExtraAcquire = !transaction;
491 const bool maxAcquired = maxBuffersAcquired(includeExtraAcquire);
492 if (mNumFrameAvailable == 0 || maxAcquired) {
493 BQA_LOGV("Can't process next buffer maxBuffersAcquired=%s", boolToString(maxAcquired));
Valerie Haud3b90d22019-11-06 09:37:31 -0800494 return;
495 }
496
Valerie Haua32c5522019-12-09 10:11:08 -0800497 if (mSurfaceControl == nullptr) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700498 BQA_LOGE("ERROR : surface control is null");
Valerie Haud3b90d22019-11-06 09:37:31 -0800499 return;
500 }
501
Robert Carr78c25dd2019-08-15 14:10:33 -0700502 SurfaceComposerClient::Transaction localTransaction;
503 bool applyTransaction = true;
504 SurfaceComposerClient::Transaction* t = &localTransaction;
chaviwd7deef72021-10-06 11:53:40 -0500505 if (transaction) {
506 t = *transaction;
Robert Carr78c25dd2019-08-15 14:10:33 -0700507 applyTransaction = false;
508 }
509
Valerie Haua32c5522019-12-09 10:11:08 -0800510 BufferItem bufferItem;
Valerie Haud3b90d22019-11-06 09:37:31 -0800511
Vishnu Nairc6f89ee2020-12-11 14:27:32 -0800512 status_t status =
513 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800514 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
515 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
516 return;
517 } else if (status != OK) {
Vishnu Nairbf255772020-10-16 10:54:41 -0700518 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
Robert Carr78c25dd2019-08-15 14:10:33 -0700519 return;
520 }
chaviw57ae4b22022-02-03 16:51:39 -0600521
Valerie Haua32c5522019-12-09 10:11:08 -0800522 auto buffer = bufferItem.mGraphicBuffer;
523 mNumFrameAvailable--;
chaviw57ae4b22022-02-03 16:51:39 -0600524 BBQ_TRACE("frame=%" PRIu64, bufferItem.mFrameNumber);
Valerie Haua32c5522019-12-09 10:11:08 -0800525
526 if (buffer == nullptr) {
527 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Vishnu Nairbf255772020-10-16 10:54:41 -0700528 BQA_LOGE("Buffer was empty");
Valerie Haua32c5522019-12-09 10:11:08 -0800529 return;
530 }
531
Vishnu Nair670b3f72020-09-29 17:52:18 -0700532 if (rejectBuffer(bufferItem)) {
Vishnu Naira4fbca52021-07-07 16:52:34 -0700533 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
Vishnu Nairea0de002020-11-17 17:42:37 -0800534 "buffer{size=%dx%d transform=%d}",
535 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
536 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
537 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
chaviwd7deef72021-10-06 11:53:40 -0500538 acquireNextBufferLocked(transaction);
Vishnu Nairea0de002020-11-17 17:42:37 -0800539 return;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700540 }
541
Valerie Haua32c5522019-12-09 10:11:08 -0800542 mNumAcquired++;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700543 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
544 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
545 mSubmitted[releaseCallbackId] = bufferItem;
Robert Carr78c25dd2019-08-15 14:10:33 -0700546
Valerie Hau871d6352020-01-29 08:44:02 -0800547 bool needsDisconnect = false;
548 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
549
550 // if producer disconnected before, notify SurfaceFlinger
551 if (needsDisconnect) {
552 t->notifyProducerDisconnect(mSurfaceControl);
553 }
554
Robert Carr78c25dd2019-08-15 14:10:33 -0700555 // Ensure BLASTBufferQueue stays alive until we receive the transaction complete callback.
556 incStrong((void*)transactionCallbackThunk);
557
Vishnu Nair932f6ae2021-09-29 17:33:10 -0700558 mSize = mRequestedSize;
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700559 Rect crop = computeCrop(bufferItem);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000560 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
561 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700562 bufferItem.mScalingMode, crop);
Vishnu Nair53c936c2020-12-03 11:46:37 -0800563
Vishnu Nair1506b182021-02-22 14:35:15 -0800564 auto releaseBufferCallback =
565 std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
chaviw69058fb2021-09-27 09:37:30 -0500566 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
chaviwba4320c2021-09-15 15:20:53 -0500567 sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
chaviw8dd181f2022-01-05 18:36:46 -0600568 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, releaseBufferCallback);
John Reck137069e2020-12-10 22:07:37 -0500569 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
570 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
571 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
Robert Carr78c25dd2019-08-15 14:10:33 -0700572 t->addTransactionCompletedCallback(transactionCallbackThunk, static_cast<void*>(this));
chaviwf2dace72021-11-17 17:36:50 -0600573
chaviw42026162021-04-16 15:46:12 -0500574 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
Robert Carr78c25dd2019-08-15 14:10:33 -0700575
Vishnu Naird2aaab12022-02-10 14:49:09 -0800576 if (mUpdateDestinationFrame) {
577 t->setDestinationFrame(mSurfaceControl, Rect(mSize));
578 } else {
579 const bool ignoreDestinationFrame =
580 bufferItem.mScalingMode == NATIVE_WINDOW_SCALING_MODE_FREEZE;
581 t->setFlags(mSurfaceControl,
582 ignoreDestinationFrame ? layer_state_t::eIgnoreDestinationFrame : 0,
583 layer_state_t::eIgnoreDestinationFrame);
Vishnu Nair084514a2021-07-30 16:07:42 -0700584 }
Vishnu Nair6bdec7d2021-05-10 15:01:13 -0700585 t->setBufferCrop(mSurfaceControl, crop);
Valerie Haua32c5522019-12-09 10:11:08 -0800586 t->setTransform(mSurfaceControl, bufferItem.mTransform);
Valerie Hau2882e982020-01-23 13:33:10 -0800587 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
Vishnu Naird2aaab12022-02-10 14:49:09 -0800588 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
Ady Abrahamf0c56492020-12-17 18:04:15 -0800589 if (!bufferItem.mIsAutoTimestamp) {
590 t->setDesiredPresentTime(bufferItem.mTimestamp);
591 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700592
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000593 if (!mNextFrameTimelineInfoQueue.empty()) {
Ady Abraham8db10102021-03-15 17:19:23 -0700594 t->setFrameTimelineInfo(mNextFrameTimelineInfoQueue.front());
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000595 mNextFrameTimelineInfoQueue.pop();
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100596 }
597
Vishnu Nairadf632b2021-01-07 14:05:08 -0800598 {
599 std::unique_lock _lock{mTimestampMutex};
600 auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
601 if (dequeueTime != mDequeueTimestamps.end()) {
602 Parcel p;
603 p.writeInt64(dequeueTime->second);
Huihong Luod3d8f8e2022-03-08 14:48:46 -0800604 t->setMetadata(mSurfaceControl, gui::METADATA_DEQUEUE_TIME, p);
Vishnu Nairadf632b2021-01-07 14:05:08 -0800605 mDequeueTimestamps.erase(dequeueTime);
606 }
607 }
Vishnu Naircf26a0a2020-11-13 12:56:20 -0800608
chaviw6a195272021-09-03 16:14:25 -0500609 mergePendingTransactions(t, bufferItem.mFrameNumber);
Robert Carr78c25dd2019-08-15 14:10:33 -0700610 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800611 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
612 t->setApplyToken(mApplyToken).apply(false, true);
613 mAppliedLastTransaction = true;
614 mLastAppliedFrameNumber = bufferItem.mFrameNumber;
615 } else {
616 t->setBufferHasBarrier(mSurfaceControl, mLastAppliedFrameNumber);
617 mAppliedLastTransaction = false;
Robert Carr78c25dd2019-08-15 14:10:33 -0700618 }
Vishnu Nairdab94092020-09-29 16:09:04 -0700619
chaviwd7deef72021-10-06 11:53:40 -0500620 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
Vishnu Nair1506b182021-02-22 14:35:15 -0800621 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
Vishnu Naira4fbca52021-07-07 16:52:34 -0700622 " graphicBufferId=%" PRIu64 "%s transform=%d",
chaviw3277faf2021-05-19 16:45:23 -0500623 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
Vishnu Nair1506b182021-02-22 14:35:15 -0800624 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700625 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
Vishnu Naira4fbca52021-07-07 16:52:34 -0700626 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
Robert Carr78c25dd2019-08-15 14:10:33 -0700627}
628
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800629Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
630 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800631 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800632 }
633 return item.mCrop;
634}
635
chaviwd7deef72021-10-06 11:53:40 -0500636void BLASTBufferQueue::acquireAndReleaseBuffer() {
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000637 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500638 BufferItem bufferItem;
chaviw6ebdf5f2021-10-14 11:57:22 -0500639 status_t status =
640 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
641 if (status != OK) {
642 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
643 statusToString(status).c_str());
644 return;
645 }
chaviwd7deef72021-10-06 11:53:40 -0500646 mNumFrameAvailable--;
chaviw6ebdf5f2021-10-14 11:57:22 -0500647 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
chaviwd7deef72021-10-06 11:53:40 -0500648}
649
chaviw0acd33a2021-11-02 11:55:37 -0500650void BLASTBufferQueue::flushAndWaitForFreeBuffer(std::unique_lock<std::mutex>& lock) {
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000651 BBQ_TRACE();
chaviwc1cf4022022-06-03 13:32:33 -0500652 if (!mSyncedFrameNumbers.empty() && mNumFrameAvailable > 0) {
chaviw0acd33a2021-11-02 11:55:37 -0500653 // We are waiting on a previous sync's transaction callback so allow another sync
654 // transaction to proceed.
655 //
656 // We need to first flush out the transactions that were in between the two syncs.
657 // We do this by merging them into mSyncTransaction so any buffer merging will get
658 // a release callback invoked. The release callback will be async so we need to wait
659 // on max acquired to make sure we have the capacity to acquire another buffer.
660 if (maxBuffersAcquired(false /* includeExtraAcquire */)) {
661 BQA_LOGD("waiting to flush shadow queue...");
662 mCallbackCV.wait(lock);
663 }
664 while (mNumFrameAvailable > 0) {
665 // flush out the shadow queue
666 acquireAndReleaseBuffer();
667 }
668 }
669
670 while (maxBuffersAcquired(false /* includeExtraAcquire */)) {
671 BQA_LOGD("waiting for free buffer.");
672 mCallbackCV.wait(lock);
673 }
674}
675
Vishnu Nairaef1de92020-10-22 12:15:53 -0700676void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000677 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
678 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
chaviwc1cf4022022-06-03 13:32:33 -0500679 bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
680
Tianhao Yao4861b102022-02-03 20:18:35 +0000681 {
Tianhao Yao4861b102022-02-03 20:18:35 +0000682 std::unique_lock _lock{mMutex};
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000683 BBQ_TRACE();
Tianhao Yao4861b102022-02-03 20:18:35 +0000684 const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
685 BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
Valerie Haud3b90d22019-11-06 09:37:31 -0800686
Tianhao Yao4861b102022-02-03 20:18:35 +0000687 if (syncTransactionSet) {
688 bool mayNeedToWaitForBuffer = true;
689 // If we are going to re-use the same mSyncTransaction, release the buffer that may
690 // already be set in the Transaction. This is to allow us a free slot early to continue
691 // processing a new buffer.
692 if (!mAcquireSingleBuffer) {
693 auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
694 if (bufferData) {
695 BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
696 bufferData->frameNumber);
697 releaseBuffer(bufferData->generateReleaseCallbackId(),
698 bufferData->acquireFence);
699 // Because we just released a buffer, we know there's no need to wait for a free
700 // buffer.
701 mayNeedToWaitForBuffer = false;
702 }
703 }
chaviw0acd33a2021-11-02 11:55:37 -0500704
Tianhao Yao4861b102022-02-03 20:18:35 +0000705 if (mayNeedToWaitForBuffer) {
706 flushAndWaitForFreeBuffer(_lock);
chaviwd7deef72021-10-06 11:53:40 -0500707 }
708 }
709
Tianhao Yao4861b102022-02-03 20:18:35 +0000710 // add to shadow queue
711 mNumFrameAvailable++;
chaviwc1cf4022022-06-03 13:32:33 -0500712 if (waitForTransactionCallback && mNumFrameAvailable >= 2) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000713 acquireAndReleaseBuffer();
714 }
715 ATRACE_INT(mQueuedBufferTrace.c_str(),
716 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
717
718 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s",
719 item.mFrameNumber, boolToString(syncTransactionSet));
720
721 if (syncTransactionSet) {
722 acquireNextBufferLocked(mSyncTransaction);
723
724 // Only need a commit callback when syncing to ensure the buffer that's synced has been
725 // sent to SF
726 incStrong((void*)transactionCommittedCallbackThunk);
727 mSyncTransaction->addTransactionCommittedCallback(transactionCommittedCallbackThunk,
728 static_cast<void*>(this));
chaviwc1cf4022022-06-03 13:32:33 -0500729 mSyncedFrameNumbers.emplace(item.mFrameNumber);
Tianhao Yao4861b102022-02-03 20:18:35 +0000730 if (mAcquireSingleBuffer) {
731 prevCallback = mTransactionReadyCallback;
732 prevTransaction = mSyncTransaction;
733 mTransactionReadyCallback = nullptr;
734 mSyncTransaction = nullptr;
735 }
chaviwc1cf4022022-06-03 13:32:33 -0500736 } else if (!waitForTransactionCallback) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000737 acquireNextBufferLocked(std::nullopt);
Valerie Hau0188adf2020-02-13 08:29:20 -0800738 }
739 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000740 if (prevCallback) {
741 prevCallback(prevTransaction);
chaviwd7deef72021-10-06 11:53:40 -0500742 }
Valerie Haud3b90d22019-11-06 09:37:31 -0800743}
744
Vishnu Nairaef1de92020-10-22 12:15:53 -0700745void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
746 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
747 // Do nothing since we are not storing unacquired buffer items locally.
748}
749
Vishnu Nairadf632b2021-01-07 14:05:08 -0800750void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
751 std::unique_lock _lock{mTimestampMutex};
752 mDequeueTimestamps[bufferId] = systemTime();
753};
754
755void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
756 std::unique_lock _lock{mTimestampMutex};
757 mDequeueTimestamps.erase(bufferId);
758};
759
Tianhao Yao4861b102022-02-03 20:18:35 +0000760void BLASTBufferQueue::syncNextTransaction(
761 std::function<void(SurfaceComposerClient::Transaction*)> callback,
762 bool acquireSingleBuffer) {
chaviw57ae4b22022-02-03 16:51:39 -0600763 BBQ_TRACE();
chaviw3b4bdcf2022-03-17 09:27:03 -0500764
765 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
766 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
767
768 {
769 std::lock_guard _lock{mMutex};
770 // We're about to overwrite the previous call so we should invoke that callback
771 // immediately.
772 if (mTransactionReadyCallback) {
773 prevCallback = mTransactionReadyCallback;
774 prevTransaction = mSyncTransaction;
775 }
776
777 mTransactionReadyCallback = callback;
778 if (callback) {
779 mSyncTransaction = new SurfaceComposerClient::Transaction();
780 } else {
781 mSyncTransaction = nullptr;
782 }
783 mAcquireSingleBuffer = mTransactionReadyCallback ? acquireSingleBuffer : true;
Tianhao Yao4861b102022-02-03 20:18:35 +0000784 }
chaviw3b4bdcf2022-03-17 09:27:03 -0500785
786 if (prevCallback) {
787 prevCallback(prevTransaction);
788 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000789}
790
791void BLASTBufferQueue::stopContinuousSyncTransaction() {
792 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
793 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
794 {
795 std::lock_guard _lock{mMutex};
796 bool invokeCallback = mTransactionReadyCallback && !mAcquireSingleBuffer;
797 if (invokeCallback) {
798 prevCallback = mTransactionReadyCallback;
799 prevTransaction = mSyncTransaction;
800 }
801 mTransactionReadyCallback = nullptr;
802 mSyncTransaction = nullptr;
803 mAcquireSingleBuffer = true;
804 }
805 if (prevCallback) {
806 prevCallback(prevTransaction);
807 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700808}
809
Vishnu Nairea0de002020-11-17 17:42:37 -0800810bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700811 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
812 // Only reject buffers if scaling mode is freeze.
813 return false;
814 }
815
Vishnu Naire1a42322020-10-02 17:42:04 -0700816 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
817 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
818
819 // Take the buffer's orientation into account
820 if (item.mTransform & ui::Transform::ROT_90) {
821 std::swap(bufWidth, bufHeight);
822 }
Vishnu Nairea0de002020-11-17 17:42:37 -0800823 ui::Size bufferSize(bufWidth, bufHeight);
824 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800825 return false;
826 }
Vishnu Naire1a42322020-10-02 17:42:04 -0700827
Vishnu Nair670b3f72020-09-29 17:52:18 -0700828 // reject buffers if the buffer size doesn't match.
Vishnu Nairea0de002020-11-17 17:42:37 -0800829 return mSize != bufferSize;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700830}
Vishnu Nairbf255772020-10-16 10:54:41 -0700831
832// Check if we have acquired the maximum number of buffers.
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800833// Consumer can acquire an additional buffer if that buffer is not droppable. Set
834// includeExtraAcquire is true to include this buffer to the count. Since this depends on the state
835// of the buffer, the next acquire may return with NO_BUFFER_AVAILABLE.
836bool BLASTBufferQueue::maxBuffersAcquired(bool includeExtraAcquire) const {
Ady Abraham0bde6b52021-05-18 13:57:02 -0700837 int maxAcquiredBuffers = mMaxAcquiredBuffers + (includeExtraAcquire ? 2 : 1);
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800838 return mNumAcquired >= maxAcquiredBuffers;
Vishnu Nairbf255772020-10-16 10:54:41 -0700839}
840
Robert Carr05086b22020-10-13 18:22:51 -0700841class BBQSurface : public Surface {
Robert Carr9c006e02020-10-14 13:41:57 -0700842private:
Vishnu Nair95b6d512021-08-30 15:31:08 -0700843 std::mutex mMutex;
Robert Carr9c006e02020-10-14 13:41:57 -0700844 sp<BLASTBufferQueue> mBbq;
Vishnu Nair95b6d512021-08-30 15:31:08 -0700845 bool mDestroyed = false;
846
Robert Carr05086b22020-10-13 18:22:51 -0700847public:
Vishnu Nair992496b2020-10-22 17:27:21 -0700848 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
849 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
850 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
Robert Carr9c006e02020-10-14 13:41:57 -0700851
Robert Carr05086b22020-10-13 18:22:51 -0700852 void allocateBuffers() override {
853 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
854 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
855 auto gbp = getIGraphicBufferProducer();
856 std::thread ([reqWidth, reqHeight, gbp=getIGraphicBufferProducer(),
857 reqFormat=mReqFormat, reqUsage=mReqUsage] () {
858 gbp->allocateBuffers(reqWidth, reqHeight,
859 reqFormat, reqUsage);
860
861 }).detach();
862 }
Robert Carr9c006e02020-10-14 13:41:57 -0700863
Marin Shalamanovc5986772021-03-16 16:09:49 +0100864 status_t setFrameRate(float frameRate, int8_t compatibility,
865 int8_t changeFrameRateStrategy) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700866 std::unique_lock _lock{mMutex};
867 if (mDestroyed) {
868 return DEAD_OBJECT;
869 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100870 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
871 "BBQSurface::setFrameRate")) {
Robert Carr9c006e02020-10-14 13:41:57 -0700872 return BAD_VALUE;
873 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100874 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
Robert Carr9c006e02020-10-14 13:41:57 -0700875 }
Robert Carr9b611b72020-10-19 12:00:23 -0700876
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000877 status_t setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700878 std::unique_lock _lock{mMutex};
879 if (mDestroyed) {
880 return DEAD_OBJECT;
881 }
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000882 return mBbq->setFrameTimelineInfo(frameTimelineInfo);
Robert Carr9b611b72020-10-19 12:00:23 -0700883 }
Vishnu Nair95b6d512021-08-30 15:31:08 -0700884
885 void destroy() override {
886 Surface::destroy();
887
888 std::unique_lock _lock{mMutex};
889 mDestroyed = true;
890 mBbq = nullptr;
891 }
Robert Carr05086b22020-10-13 18:22:51 -0700892};
893
Robert Carr9c006e02020-10-14 13:41:57 -0700894// TODO: Can we coalesce this with frame updates? Need to confirm
895// no timing issues.
Marin Shalamanov46084422020-10-13 12:33:42 +0200896status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
897 bool shouldBeSeamless) {
Robert Carr9c006e02020-10-14 13:41:57 -0700898 std::unique_lock _lock{mMutex};
899 SurfaceComposerClient::Transaction t;
900
Marin Shalamanov46084422020-10-13 12:33:42 +0200901 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
Robert Carr9c006e02020-10-14 13:41:57 -0700902}
903
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000904status_t BLASTBufferQueue::setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) {
Robert Carr9b611b72020-10-19 12:00:23 -0700905 std::unique_lock _lock{mMutex};
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000906 mNextFrameTimelineInfoQueue.push(frameTimelineInfo);
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100907 return OK;
Robert Carr9b611b72020-10-19 12:00:23 -0700908}
909
Hongguang Chen621ec582021-02-16 15:42:35 -0800910void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
911 std::unique_lock _lock{mMutex};
912 SurfaceComposerClient::Transaction t;
913
914 t.setSidebandStream(mSurfaceControl, stream).apply();
915}
916
Vishnu Nair992496b2020-10-22 17:27:21 -0700917sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
918 std::unique_lock _lock{mMutex};
919 sp<IBinder> scHandle = nullptr;
920 if (includeSurfaceControlHandle && mSurfaceControl) {
921 scHandle = mSurfaceControl->getHandle();
922 }
923 return new BBQSurface(mProducer, true, scHandle, this);
Robert Carr05086b22020-10-13 18:22:51 -0700924}
925
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800926void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
927 uint64_t frameNumber) {
928 std::lock_guard _lock{mMutex};
929 if (mLastAcquiredFrameNumber >= frameNumber) {
930 // Apply the transaction since we have already acquired the desired frame.
931 t->apply();
932 } else {
chaviwaad6cf52021-03-23 17:27:20 -0500933 mPendingTransactions.emplace_back(frameNumber, *t);
934 // Clear the transaction so it can't be applied elsewhere.
935 t->clear();
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800936 }
937}
938
chaviw6a195272021-09-03 16:14:25 -0500939void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
940 std::lock_guard _lock{mMutex};
941
942 SurfaceComposerClient::Transaction t;
943 mergePendingTransactions(&t, frameNumber);
Robert Carr79dc06a2022-02-22 15:28:59 -0800944 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
945 t.setApplyToken(mApplyToken).apply(false, true);
chaviw6a195272021-09-03 16:14:25 -0500946}
947
948void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
949 uint64_t frameNumber) {
950 auto mergeTransaction =
951 [&t, currentFrameNumber = frameNumber](
952 std::tuple<uint64_t, SurfaceComposerClient::Transaction> pendingTransaction) {
953 auto& [targetFrameNumber, transaction] = pendingTransaction;
954 if (currentFrameNumber < targetFrameNumber) {
955 return false;
956 }
957 t->merge(std::move(transaction));
958 return true;
959 };
960
961 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
962 mPendingTransactions.end(), mergeTransaction),
963 mPendingTransactions.end());
964}
965
chaviwd84085a2022-02-08 11:07:04 -0600966SurfaceComposerClient::Transaction* BLASTBufferQueue::gatherPendingTransactions(
967 uint64_t frameNumber) {
968 std::lock_guard _lock{mMutex};
969 SurfaceComposerClient::Transaction* t = new SurfaceComposerClient::Transaction();
970 mergePendingTransactions(t, frameNumber);
971 return t;
972}
973
Vishnu Nair89496122020-12-14 17:14:53 -0800974// Maintains a single worker thread per process that services a list of runnables.
975class AsyncWorker : public Singleton<AsyncWorker> {
976private:
977 std::thread mThread;
978 bool mDone = false;
979 std::deque<std::function<void()>> mRunnables;
980 std::mutex mMutex;
981 std::condition_variable mCv;
982 void run() {
983 std::unique_lock<std::mutex> lock(mMutex);
984 while (!mDone) {
Vishnu Nair89496122020-12-14 17:14:53 -0800985 while (!mRunnables.empty()) {
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700986 std::deque<std::function<void()>> runnables = std::move(mRunnables);
987 mRunnables.clear();
988 lock.unlock();
989 // Run outside the lock since the runnable might trigger another
990 // post to the async worker.
991 execute(runnables);
992 lock.lock();
Vishnu Nair89496122020-12-14 17:14:53 -0800993 }
Wonsik Kim567533e2021-05-04 19:31:29 -0700994 mCv.wait(lock);
Vishnu Nair89496122020-12-14 17:14:53 -0800995 }
996 }
997
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700998 void execute(std::deque<std::function<void()>>& runnables) {
999 while (!runnables.empty()) {
1000 std::function<void()> runnable = runnables.front();
1001 runnables.pop_front();
1002 runnable();
1003 }
1004 }
1005
Vishnu Nair89496122020-12-14 17:14:53 -08001006public:
1007 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
1008
1009 ~AsyncWorker() {
1010 mDone = true;
1011 mCv.notify_all();
1012 if (mThread.joinable()) {
1013 mThread.join();
1014 }
1015 }
1016
1017 void post(std::function<void()> runnable) {
1018 std::unique_lock<std::mutex> lock(mMutex);
1019 mRunnables.emplace_back(std::move(runnable));
1020 mCv.notify_one();
1021 }
1022};
1023ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
1024
1025// Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
1026class AsyncProducerListener : public BnProducerListener {
1027private:
1028 const sp<IProducerListener> mListener;
1029
1030public:
1031 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
1032
1033 void onBufferReleased() override {
1034 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
1035 }
1036
1037 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
1038 AsyncWorker::getInstance().post(
1039 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
1040 }
1041};
1042
1043// Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
1044// can be non-blocking when the producer is in the client process.
1045class BBQBufferQueueProducer : public BufferQueueProducer {
1046public:
1047 BBQBufferQueueProducer(const sp<BufferQueueCore>& core)
1048 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/) {}
1049
1050 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
1051 QueueBufferOutput* output) override {
1052 if (!listener) {
1053 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
1054 }
1055
1056 return BufferQueueProducer::connect(new AsyncProducerListener(listener), api,
1057 producerControlledByApp, output);
1058 }
Vishnu Nair17dde612020-12-28 11:39:59 -08001059
1060 int query(int what, int* value) override {
1061 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
1062 *value = 1;
1063 return NO_ERROR;
1064 }
1065 return BufferQueueProducer::query(what, value);
1066 }
Vishnu Nair89496122020-12-14 17:14:53 -08001067};
1068
1069// Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
1070// This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
1071// emulating one way binder call behavior. Without this, if the listener calls back into the queue,
1072// we can deadlock.
1073void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
1074 sp<IGraphicBufferConsumer>* outConsumer) {
1075 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
1076 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
1077
1078 sp<BufferQueueCore> core(new BufferQueueCore());
1079 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
1080
1081 sp<IGraphicBufferProducer> producer(new BBQBufferQueueProducer(core));
1082 LOG_ALWAYS_FATAL_IF(producer == nullptr,
1083 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
1084
Vishnu Nair8b30dd12021-01-25 14:16:54 -08001085 sp<BufferQueueConsumer> consumer(new BufferQueueConsumer(core));
1086 consumer->setAllowExtraAcquire(true);
Vishnu Nair89496122020-12-14 17:14:53 -08001087 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
1088 "BLASTBufferQueue: failed to create BufferQueueConsumer");
1089
1090 *outProducer = producer;
1091 *outConsumer = consumer;
1092}
1093
chaviw497e81c2021-02-04 17:09:47 -08001094PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
1095 PixelFormat convertedFormat = format;
1096 switch (format) {
1097 case PIXEL_FORMAT_TRANSPARENT:
1098 case PIXEL_FORMAT_TRANSLUCENT:
1099 convertedFormat = PIXEL_FORMAT_RGBA_8888;
1100 break;
1101 case PIXEL_FORMAT_OPAQUE:
1102 convertedFormat = PIXEL_FORMAT_RGBX_8888;
1103 break;
1104 }
1105 return convertedFormat;
1106}
1107
Robert Carr82d07c92021-05-10 11:36:43 -07001108uint32_t BLASTBufferQueue::getLastTransformHint() const {
1109 if (mSurfaceControl != nullptr) {
1110 return mSurfaceControl->getTransformHint();
1111 } else {
1112 return 0;
1113 }
1114}
1115
chaviw0b020f82021-08-20 12:00:47 -05001116uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
1117 std::unique_lock _lock{mMutex};
1118 return mLastAcquiredFrameNumber;
1119}
1120
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001121bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
1122 std::unique_lock _lock{mMutex};
1123 return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
1124}
1125
Patrick Williamsf1e5df12022-10-17 21:37:42 +00001126void BLASTBufferQueue::setTransactionHangCallback(
1127 std::function<void(const std::string&)> callback) {
Robert Carr4c1b6462021-12-21 10:30:50 -08001128 std::unique_lock _lock{mMutex};
1129 mTransactionHangCallback = callback;
1130}
1131
Robert Carr78c25dd2019-08-15 14:10:33 -07001132} // namespace android