blob: 8e23eb87669e4f43971a03fc530d04d0bf5fda25 [file] [log] [blame]
Robert Carr78c25dd2019-08-15 14:10:33 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Valerie Haud3b90d22019-11-06 09:37:31 -080017#undef LOG_TAG
18#define LOG_TAG "BLASTBufferQueue"
19
Valerie Haua32c5522019-12-09 10:11:08 -080020#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Vishnu Naire1a42322020-10-02 17:42:04 -070021//#define LOG_NDEBUG 0
Valerie Haua32c5522019-12-09 10:11:08 -080022
Robert Carr78c25dd2019-08-15 14:10:33 -070023#include <gui/BLASTBufferQueue.h>
24#include <gui/BufferItemConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080025#include <gui/BufferQueueConsumer.h>
26#include <gui/BufferQueueCore.h>
27#include <gui/BufferQueueProducer.h>
Valerie Hau45e4b3b2019-12-03 10:49:17 -080028#include <gui/GLConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080029#include <gui/IProducerListener.h>
Robert Carr05086b22020-10-13 18:22:51 -070030#include <gui/Surface.h>
chaviw57ae4b22022-02-03 16:51:39 -060031#include <gui/TraceUtils.h>
Vishnu Nair89496122020-12-14 17:14:53 -080032#include <utils/Singleton.h>
Valerie Haua32c5522019-12-09 10:11:08 -080033#include <utils/Trace.h>
34
Ady Abraham0bde6b52021-05-18 13:57:02 -070035#include <private/gui/ComposerService.h>
36
Robert Carr78c25dd2019-08-15 14:10:33 -070037#include <chrono>
38
39using namespace std::chrono_literals;
40
Vishnu Nairdab94092020-09-29 16:09:04 -070041namespace {
chaviw3277faf2021-05-19 16:45:23 -050042inline const char* boolToString(bool b) {
Vishnu Nairdab94092020-09-29 16:09:04 -070043 return b ? "true" : "false";
44}
45} // namespace
46
Robert Carr78c25dd2019-08-15 14:10:33 -070047namespace android {
48
Vishnu Nairdab94092020-09-29 16:09:04 -070049// Macros to include adapter info in log messages
chaviwd7deef72021-10-06 11:53:40 -050050#define BQA_LOGD(x, ...) \
51 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070052#define BQA_LOGV(x, ...) \
53 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairc6f89ee2020-12-11 14:27:32 -080054// enable logs for a single layer
55//#define BQA_LOGV(x, ...) \
56// ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
57// mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070058#define BQA_LOGE(x, ...) \
59 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
60
chaviw57ae4b22022-02-03 16:51:39 -060061#define BBQ_TRACE(x, ...) \
62 ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
63 mNumAcquired, ##__VA_ARGS__)
64
Valerie Hau871d6352020-01-29 08:44:02 -080065void BLASTBufferItemConsumer::onDisconnect() {
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000066 Mutex::Autolock lock(mMutex);
67 mPreviouslyConnected = mCurrentlyConnected;
68 mCurrentlyConnected = false;
69 if (mPreviouslyConnected) {
70 mDisconnectEvents.push(mCurrentFrameNumber);
Valerie Hau871d6352020-01-29 08:44:02 -080071 }
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000072 mFrameEventHistory.onDisconnect();
Valerie Hau871d6352020-01-29 08:44:02 -080073}
74
75void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
76 FrameEventHistoryDelta* outDelta) {
Hongguang Chen621ec582021-02-16 15:42:35 -080077 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -080078 if (newTimestamps) {
79 // BufferQueueProducer only adds a new timestamp on
80 // queueBuffer
81 mCurrentFrameNumber = newTimestamps->frameNumber;
82 mFrameEventHistory.addQueue(*newTimestamps);
83 }
84 if (outDelta) {
85 // frame event histories will be processed
86 // only after the producer connects and requests
87 // deltas for the first time. Forward this intent
88 // to SF-side to turn event processing back on
89 mPreviouslyConnected = mCurrentlyConnected;
90 mCurrentlyConnected = true;
91 mFrameEventHistory.getAndResetDelta(outDelta);
92 }
93}
94
95void BLASTBufferItemConsumer::updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
96 const sp<Fence>& glDoneFence,
97 const sp<Fence>& presentFence,
98 const sp<Fence>& prevReleaseFence,
99 CompositorTiming compositorTiming,
100 nsecs_t latchTime, nsecs_t dequeueReadyTime) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800101 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800102
103 // if the producer is not connected, don't bother updating,
104 // the next producer that connects won't access this frame event
105 if (!mCurrentlyConnected) return;
106 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
107 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
108 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
109
110 mFrameEventHistory.addLatch(frameNumber, latchTime);
111 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
112 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
113 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
114 compositorTiming);
115}
116
117void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
118 bool disconnect = false;
Hongguang Chen621ec582021-02-16 15:42:35 -0800119 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800120 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
121 disconnect = true;
122 mDisconnectEvents.pop();
123 }
124 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
125}
126
Hongguang Chen621ec582021-02-16 15:42:35 -0800127void BLASTBufferItemConsumer::onSidebandStreamChanged() {
Ady Abrahamdbca1352021-12-15 11:58:56 -0800128 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
129 if (bbq != nullptr) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800130 sp<NativeHandle> stream = getSidebandStream();
Ady Abrahamdbca1352021-12-15 11:58:56 -0800131 bbq->setSidebandStream(stream);
Hongguang Chen621ec582021-02-16 15:42:35 -0800132 }
133}
134
Vishnu Naird2aaab12022-02-10 14:49:09 -0800135BLASTBufferQueue::BLASTBufferQueue(const std::string& name, bool updateDestinationFrame)
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800136 : mSurfaceControl(nullptr),
137 mSize(1, 1),
Vishnu Nairea0de002020-11-17 17:42:37 -0800138 mRequestedSize(mSize),
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800139 mFormat(PIXEL_FORMAT_RGBA_8888),
Tianhao Yao4861b102022-02-03 20:18:35 +0000140 mTransactionReadyCallback(nullptr),
Vishnu Naird2aaab12022-02-10 14:49:09 -0800141 mSyncTransaction(nullptr),
142 mUpdateDestinationFrame(updateDestinationFrame) {
Vishnu Nair89496122020-12-14 17:14:53 -0800143 createBufferQueue(&mProducer, &mConsumer);
Valerie Hau0889c622020-02-19 15:04:47 -0800144 // since the adapter is in the client process, set dequeue timeout
145 // explicitly so that dequeueBuffer will block
146 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
Valerie Hau65b8e872020-02-13 09:45:14 -0800147
Vishnu Nairdebd1cb2021-03-16 10:06:01 -0700148 // safe default, most producers are expected to override this
149 mProducer->setMaxDequeuedBufferCount(2);
Vishnu Nair1618c672021-02-05 13:08:26 -0800150 mBufferItemConsumer = new BLASTBufferItemConsumer(mConsumer,
151 GraphicBuffer::USAGE_HW_COMPOSER |
152 GraphicBuffer::USAGE_HW_TEXTURE,
Ady Abrahamdbca1352021-12-15 11:58:56 -0800153 1, false, this);
Valerie Haua32c5522019-12-09 10:11:08 -0800154 static int32_t id = 0;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700155 mName = name + "#" + std::to_string(id);
Vishnu Nairdab94092020-09-29 16:09:04 -0700156 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(id);
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700157 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(id);
Valerie Haua32c5522019-12-09 10:11:08 -0800158 id++;
Vishnu Nairdab94092020-09-29 16:09:04 -0700159 mBufferItemConsumer->setName(String8(consumerName.c_str()));
Robert Carr78c25dd2019-08-15 14:10:33 -0700160 mBufferItemConsumer->setFrameAvailableListener(this);
161 mBufferItemConsumer->setBufferFreedListener(this);
Robert Carr9f133d72020-04-01 15:51:46 -0700162
Ady Abraham899dcdb2021-06-15 16:56:21 -0700163 ComposerService::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
Ady Abraham0bde6b52021-05-18 13:57:02 -0700164 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
chaviw69058fb2021-09-27 09:37:30 -0500165 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
Valerie Haua32c5522019-12-09 10:11:08 -0800166 mNumAcquired = 0;
167 mNumFrameAvailable = 0;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800168 BQA_LOGV("BLASTBufferQueue created");
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800169}
170
171BLASTBufferQueue::BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface,
172 int width, int height, int32_t format)
173 : BLASTBufferQueue(name) {
174 update(surface, width, height, format);
Robert Carr78c25dd2019-08-15 14:10:33 -0700175}
176
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800177BLASTBufferQueue::~BLASTBufferQueue() {
178 if (mPendingTransactions.empty()) {
179 return;
180 }
181 BQA_LOGE("Applying pending transactions on dtor %d",
182 static_cast<uint32_t>(mPendingTransactions.size()));
183 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800184 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
185 t.setApplyToken(mApplyToken).apply();
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800186}
187
chaviw565ee542021-01-14 10:21:23 -0800188void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
Vishnu Naird2aaab12022-02-10 14:49:09 -0800189 int32_t format) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800190 LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
191
Robert Carr78c25dd2019-08-15 14:10:33 -0700192 std::unique_lock _lock{mMutex};
chaviw565ee542021-01-14 10:21:23 -0800193 if (mFormat != format) {
194 mFormat = format;
chaviw497e81c2021-02-04 17:09:47 -0800195 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
chaviw565ee542021-01-14 10:21:23 -0800196 }
197
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800198 const bool surfaceControlChanged = !SurfaceControl::isSameSurface(mSurfaceControl, surface);
Vishnu Nairab066512022-01-04 22:28:00 +0000199 if (surfaceControlChanged && mSurfaceControl != nullptr) {
200 BQA_LOGD("Updating SurfaceControl without recreating BBQ");
201 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800202 bool applyTransaction = false;
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800203
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700204 // Always update the native object even though they might have the same layer handle, so we can
205 // get the updated transform hint from WM.
206 mSurfaceControl = surface;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800207 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800208 if (surfaceControlChanged) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800209 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
210 layer_state_t::eEnableBackpressure);
211 applyTransaction = true;
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800212 }
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800213 mTransformHint = mSurfaceControl->getTransformHint();
214 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700215 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
216 mTransformHint);
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800217
Vishnu Nairea0de002020-11-17 17:42:37 -0800218 ui::Size newSize(width, height);
219 if (mRequestedSize != newSize) {
220 mRequestedSize.set(newSize);
221 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000222 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Vishnu Nair53c936c2020-12-03 11:46:37 -0800223 // If the buffer supports scaling, update the frame immediately since the client may
224 // want to scale the existing buffer to the new size.
225 mSize = mRequestedSize;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800226 if (mUpdateDestinationFrame) {
227 t.setDestinationFrame(mSurfaceControl, Rect(newSize));
228 applyTransaction = true;
229 }
Vishnu Nair53c936c2020-12-03 11:46:37 -0800230 }
Robert Carrfc416512020-04-02 12:32:44 -0700231 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800232 if (applyTransaction) {
Vishnu Nair084514a2021-07-30 16:07:42 -0700233 t.setApplyToken(mApplyToken).apply();
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800234 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700235}
236
chaviwd7deef72021-10-06 11:53:40 -0500237static std::optional<SurfaceControlStats> findMatchingStat(
238 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
239 for (auto stat : stats) {
240 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
241 return stat;
242 }
243 }
244 return std::nullopt;
245}
246
247static void transactionCommittedCallbackThunk(void* context, nsecs_t latchTime,
248 const sp<Fence>& presentFence,
249 const std::vector<SurfaceControlStats>& stats) {
250 if (context == nullptr) {
251 return;
252 }
253 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
254 bq->transactionCommittedCallback(latchTime, presentFence, stats);
255}
256
257void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
258 const sp<Fence>& /*presentFence*/,
259 const std::vector<SurfaceControlStats>& stats) {
260 {
261 std::unique_lock _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600262 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500263 BQA_LOGV("transactionCommittedCallback");
264 if (!mSurfaceControlsWithPendingCallback.empty()) {
265 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
266 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
267 if (stat) {
268 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
269
270 // We need to check if we were waiting for a transaction callback in order to
271 // process any pending buffers and unblock. It's possible to get transaction
272 // callbacks for previous requests so we need to ensure the frame from this
273 // transaction callback matches the last acquired buffer. Since acquireNextBuffer
274 // will stop processing buffers when mWaitForTransactionCallback is set, we know
275 // that mLastAcquiredFrameNumber is the frame we're waiting on.
276 // We also want to check if mNextTransaction is null because it's possible another
277 // sync request came in while waiting, but it hasn't started processing yet. In that
278 // case, we don't actually want to flush the frames in between since they will get
279 // processed and merged with the sync transaction and released earlier than if they
280 // were sent to SF
chaviwa1c4c822021-11-10 18:11:58 -0600281 if (mWaitForTransactionCallback && mSyncTransaction == nullptr &&
chaviwd7deef72021-10-06 11:53:40 -0500282 currFrameNumber >= mLastAcquiredFrameNumber) {
283 mWaitForTransactionCallback = false;
284 flushShadowQueue();
285 }
286 } else {
chaviw768bfa02021-11-01 09:50:57 -0500287 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
chaviwd7deef72021-10-06 11:53:40 -0500288 }
289 } else {
290 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
291 "empty.");
292 }
293
294 decStrong((void*)transactionCommittedCallbackThunk);
295 }
296}
297
Robert Carr78c25dd2019-08-15 14:10:33 -0700298static void transactionCallbackThunk(void* context, nsecs_t latchTime,
299 const sp<Fence>& presentFence,
300 const std::vector<SurfaceControlStats>& stats) {
301 if (context == nullptr) {
302 return;
303 }
Robert Carrfbcbb4c2020-11-02 14:14:34 -0800304 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
Robert Carr78c25dd2019-08-15 14:10:33 -0700305 bq->transactionCallback(latchTime, presentFence, stats);
306}
307
308void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
309 const std::vector<SurfaceControlStats>& stats) {
chaviw71c2cc42020-10-23 16:42:02 -0700310 {
311 std::unique_lock _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600312 BBQ_TRACE();
chaviw71c2cc42020-10-23 16:42:02 -0700313 BQA_LOGV("transactionCallback");
chaviw71c2cc42020-10-23 16:42:02 -0700314
chaviw42026162021-04-16 15:46:12 -0500315 if (!mSurfaceControlsWithPendingCallback.empty()) {
316 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
317 mSurfaceControlsWithPendingCallback.pop();
chaviwd7deef72021-10-06 11:53:40 -0500318 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
319 if (statsOptional) {
320 SurfaceControlStats stat = *statsOptional;
chaviw42026162021-04-16 15:46:12 -0500321 mTransformHint = stat.transformHint;
322 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700323 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
Vishnu Nairde66dc72021-06-17 17:54:41 -0700324 // Update frametime stamps if the frame was latched and presented, indicated by a
325 // valid latch time.
326 if (stat.latchTime > 0) {
327 mBufferItemConsumer
328 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
329 stat.frameEventStats.refreshStartTime,
330 stat.frameEventStats.gpuCompositionDoneFence,
331 stat.presentFence, stat.previousReleaseFence,
332 stat.frameEventStats.compositorTiming,
333 stat.latchTime,
334 stat.frameEventStats.dequeueReadyTime);
335 }
chaviwd7deef72021-10-06 11:53:40 -0500336 } else {
chaviw768bfa02021-11-01 09:50:57 -0500337 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
chaviw42026162021-04-16 15:46:12 -0500338 }
339 } else {
340 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
341 "empty.");
Valerie Haua32c5522019-12-09 10:11:08 -0800342 }
chaviw71c2cc42020-10-23 16:42:02 -0700343
chaviw71c2cc42020-10-23 16:42:02 -0700344 decStrong((void*)transactionCallbackThunk);
Robert Carr78c25dd2019-08-15 14:10:33 -0700345 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700346}
347
Vishnu Nair1506b182021-02-22 14:35:15 -0800348// Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
349// BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
350// So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
351// Otherwise, this is a no-op.
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700352static void releaseBufferCallbackThunk(wp<BLASTBufferQueue> context, const ReleaseCallbackId& id,
chaviw69058fb2021-09-27 09:37:30 -0500353 const sp<Fence>& releaseFence,
354 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800355 sp<BLASTBufferQueue> blastBufferQueue = context.promote();
Vishnu Nair1506b182021-02-22 14:35:15 -0800356 if (blastBufferQueue) {
chaviw69058fb2021-09-27 09:37:30 -0500357 blastBufferQueue->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700358 } else {
359 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800360 }
361}
362
chaviwd7deef72021-10-06 11:53:40 -0500363void BLASTBufferQueue::flushShadowQueue() {
364 BQA_LOGV("flushShadowQueue");
365 int numFramesToFlush = mNumFrameAvailable;
366 while (numFramesToFlush > 0) {
367 acquireNextBufferLocked(std::nullopt);
368 numFramesToFlush--;
369 }
370}
371
chaviw69058fb2021-09-27 09:37:30 -0500372void BLASTBufferQueue::releaseBufferCallback(
373 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
374 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
chaviw57ae4b22022-02-03 16:51:39 -0600375 BBQ_TRACE();
Vishnu Nair1506b182021-02-22 14:35:15 -0800376 std::unique_lock _lock{mMutex};
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700377 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800378
Ady Abraham899dcdb2021-06-15 16:56:21 -0700379 // Calculate how many buffers we need to hold before we release them back
380 // to the buffer queue. This will prevent higher latency when we are running
381 // on a lower refresh rate than the max supported. We only do that for EGL
382 // clients as others don't care about latency
383 const bool isEGL = [&] {
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700384 const auto it = mSubmitted.find(id);
Ady Abraham899dcdb2021-06-15 16:56:21 -0700385 return it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
386 }();
387
chaviw69058fb2021-09-27 09:37:30 -0500388 if (currentMaxAcquiredBufferCount) {
389 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
390 }
391
Ady Abraham899dcdb2021-06-15 16:56:21 -0700392 const auto numPendingBuffersToHold =
chaviw69058fb2021-09-27 09:37:30 -0500393 isEGL ? std::max(0u, mMaxAcquiredBuffers - mCurrentMaxAcquiredBufferCount) : 0;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700394 mPendingRelease.emplace_back(ReleasedBuffer{id, releaseFence});
Ady Abraham899dcdb2021-06-15 16:56:21 -0700395
396 // Release all buffers that are beyond the ones that we need to hold
397 while (mPendingRelease.size() > numPendingBuffersToHold) {
chaviw0acd33a2021-11-02 11:55:37 -0500398 const auto releasedBuffer = mPendingRelease.front();
Ady Abraham899dcdb2021-06-15 16:56:21 -0700399 mPendingRelease.pop_front();
chaviw0acd33a2021-11-02 11:55:37 -0500400 releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
chaviwd7deef72021-10-06 11:53:40 -0500401 // Don't process the transactions here if mWaitForTransactionCallback is set. Instead, let
chaviwa1c4c822021-11-10 18:11:58 -0600402 // onFrameAvailable handle processing them since it will merge with the syncTransaction.
chaviwd7deef72021-10-06 11:53:40 -0500403 if (!mWaitForTransactionCallback) {
404 acquireNextBufferLocked(std::nullopt);
405 }
Vishnu Nair1506b182021-02-22 14:35:15 -0800406 }
407
Ady Abraham899dcdb2021-06-15 16:56:21 -0700408 ATRACE_INT("PendingRelease", mPendingRelease.size());
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700409 ATRACE_INT(mQueuedBufferTrace.c_str(),
410 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800411 mCallbackCV.notify_all();
412}
413
chaviw0acd33a2021-11-02 11:55:37 -0500414void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
415 const sp<Fence>& releaseFence) {
416 auto it = mSubmitted.find(callbackId);
417 if (it == mSubmitted.end()) {
418 BQA_LOGE("ERROR: releaseBufferCallback without corresponding submitted buffer %s",
419 callbackId.to_string().c_str());
420 return;
421 }
422 mNumAcquired--;
chaviw57ae4b22022-02-03 16:51:39 -0600423 BBQ_TRACE("frame=%" PRIu64, callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500424 BQA_LOGV("released %s", callbackId.to_string().c_str());
425 mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
426 mSubmitted.erase(it);
427}
428
chaviwd7deef72021-10-06 11:53:40 -0500429void BLASTBufferQueue::acquireNextBufferLocked(
430 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800431 // If the next transaction is set, we want to guarantee the our acquire will not fail, so don't
432 // include the extra buffer when checking if we can acquire the next buffer.
chaviwd7deef72021-10-06 11:53:40 -0500433 const bool includeExtraAcquire = !transaction;
434 const bool maxAcquired = maxBuffersAcquired(includeExtraAcquire);
435 if (mNumFrameAvailable == 0 || maxAcquired) {
436 BQA_LOGV("Can't process next buffer maxBuffersAcquired=%s", boolToString(maxAcquired));
Valerie Haud3b90d22019-11-06 09:37:31 -0800437 return;
438 }
439
Valerie Haua32c5522019-12-09 10:11:08 -0800440 if (mSurfaceControl == nullptr) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700441 BQA_LOGE("ERROR : surface control is null");
Valerie Haud3b90d22019-11-06 09:37:31 -0800442 return;
443 }
444
Robert Carr78c25dd2019-08-15 14:10:33 -0700445 SurfaceComposerClient::Transaction localTransaction;
446 bool applyTransaction = true;
447 SurfaceComposerClient::Transaction* t = &localTransaction;
chaviwd7deef72021-10-06 11:53:40 -0500448 if (transaction) {
449 t = *transaction;
Robert Carr78c25dd2019-08-15 14:10:33 -0700450 applyTransaction = false;
451 }
452
Valerie Haua32c5522019-12-09 10:11:08 -0800453 BufferItem bufferItem;
Valerie Haud3b90d22019-11-06 09:37:31 -0800454
Vishnu Nairc6f89ee2020-12-11 14:27:32 -0800455 status_t status =
456 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800457 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
458 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
459 return;
460 } else if (status != OK) {
Vishnu Nairbf255772020-10-16 10:54:41 -0700461 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
Robert Carr78c25dd2019-08-15 14:10:33 -0700462 return;
463 }
chaviw57ae4b22022-02-03 16:51:39 -0600464
Valerie Haua32c5522019-12-09 10:11:08 -0800465 auto buffer = bufferItem.mGraphicBuffer;
466 mNumFrameAvailable--;
chaviw57ae4b22022-02-03 16:51:39 -0600467 BBQ_TRACE("frame=%" PRIu64, bufferItem.mFrameNumber);
Valerie Haua32c5522019-12-09 10:11:08 -0800468
469 if (buffer == nullptr) {
470 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Vishnu Nairbf255772020-10-16 10:54:41 -0700471 BQA_LOGE("Buffer was empty");
Valerie Haua32c5522019-12-09 10:11:08 -0800472 return;
473 }
474
Vishnu Nair670b3f72020-09-29 17:52:18 -0700475 if (rejectBuffer(bufferItem)) {
Vishnu Naira4fbca52021-07-07 16:52:34 -0700476 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
Vishnu Nairea0de002020-11-17 17:42:37 -0800477 "buffer{size=%dx%d transform=%d}",
478 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
479 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
480 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
chaviwd7deef72021-10-06 11:53:40 -0500481 acquireNextBufferLocked(transaction);
Vishnu Nairea0de002020-11-17 17:42:37 -0800482 return;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700483 }
484
Valerie Haua32c5522019-12-09 10:11:08 -0800485 mNumAcquired++;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700486 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
487 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
488 mSubmitted[releaseCallbackId] = bufferItem;
Robert Carr78c25dd2019-08-15 14:10:33 -0700489
Valerie Hau871d6352020-01-29 08:44:02 -0800490 bool needsDisconnect = false;
491 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
492
493 // if producer disconnected before, notify SurfaceFlinger
494 if (needsDisconnect) {
495 t->notifyProducerDisconnect(mSurfaceControl);
496 }
497
Robert Carr78c25dd2019-08-15 14:10:33 -0700498 // Ensure BLASTBufferQueue stays alive until we receive the transaction complete callback.
499 incStrong((void*)transactionCallbackThunk);
500
Vishnu Nair932f6ae2021-09-29 17:33:10 -0700501 mSize = mRequestedSize;
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700502 Rect crop = computeCrop(bufferItem);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000503 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
504 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700505 bufferItem.mScalingMode, crop);
Vishnu Nair53c936c2020-12-03 11:46:37 -0800506
Vishnu Nair1506b182021-02-22 14:35:15 -0800507 auto releaseBufferCallback =
508 std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
chaviw69058fb2021-09-27 09:37:30 -0500509 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
chaviwba4320c2021-09-15 15:20:53 -0500510 sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
chaviw8dd181f2022-01-05 18:36:46 -0600511 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, releaseBufferCallback);
John Reck137069e2020-12-10 22:07:37 -0500512 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
513 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
514 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
Robert Carr78c25dd2019-08-15 14:10:33 -0700515 t->addTransactionCompletedCallback(transactionCallbackThunk, static_cast<void*>(this));
chaviwf2dace72021-11-17 17:36:50 -0600516
chaviw42026162021-04-16 15:46:12 -0500517 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
Robert Carr78c25dd2019-08-15 14:10:33 -0700518
Vishnu Naird2aaab12022-02-10 14:49:09 -0800519 if (mUpdateDestinationFrame) {
520 t->setDestinationFrame(mSurfaceControl, Rect(mSize));
521 } else {
522 const bool ignoreDestinationFrame =
523 bufferItem.mScalingMode == NATIVE_WINDOW_SCALING_MODE_FREEZE;
524 t->setFlags(mSurfaceControl,
525 ignoreDestinationFrame ? layer_state_t::eIgnoreDestinationFrame : 0,
526 layer_state_t::eIgnoreDestinationFrame);
Vishnu Nair084514a2021-07-30 16:07:42 -0700527 }
Vishnu Nair6bdec7d2021-05-10 15:01:13 -0700528 t->setBufferCrop(mSurfaceControl, crop);
Valerie Haua32c5522019-12-09 10:11:08 -0800529 t->setTransform(mSurfaceControl, bufferItem.mTransform);
Valerie Hau2882e982020-01-23 13:33:10 -0800530 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
Vishnu Naird2aaab12022-02-10 14:49:09 -0800531 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
Ady Abrahamf0c56492020-12-17 18:04:15 -0800532 if (!bufferItem.mIsAutoTimestamp) {
533 t->setDesiredPresentTime(bufferItem.mTimestamp);
534 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700535
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000536 if (!mNextFrameTimelineInfoQueue.empty()) {
Ady Abraham8db10102021-03-15 17:19:23 -0700537 t->setFrameTimelineInfo(mNextFrameTimelineInfoQueue.front());
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000538 mNextFrameTimelineInfoQueue.pop();
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100539 }
540
Vishnu Nairadf632b2021-01-07 14:05:08 -0800541 {
542 std::unique_lock _lock{mTimestampMutex};
543 auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
544 if (dequeueTime != mDequeueTimestamps.end()) {
545 Parcel p;
546 p.writeInt64(dequeueTime->second);
547 t->setMetadata(mSurfaceControl, METADATA_DEQUEUE_TIME, p);
548 mDequeueTimestamps.erase(dequeueTime);
549 }
550 }
Vishnu Naircf26a0a2020-11-13 12:56:20 -0800551
chaviw6a195272021-09-03 16:14:25 -0500552 mergePendingTransactions(t, bufferItem.mFrameNumber);
Robert Carr78c25dd2019-08-15 14:10:33 -0700553 if (applyTransaction) {
Vishnu Nair277142c2021-01-05 18:35:29 -0800554 t->setApplyToken(mApplyToken).apply();
Robert Carr78c25dd2019-08-15 14:10:33 -0700555 }
Vishnu Nairdab94092020-09-29 16:09:04 -0700556
chaviwd7deef72021-10-06 11:53:40 -0500557 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
Vishnu Nair1506b182021-02-22 14:35:15 -0800558 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
Vishnu Naira4fbca52021-07-07 16:52:34 -0700559 " graphicBufferId=%" PRIu64 "%s transform=%d",
chaviw3277faf2021-05-19 16:45:23 -0500560 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
Vishnu Nair1506b182021-02-22 14:35:15 -0800561 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700562 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
Vishnu Naira4fbca52021-07-07 16:52:34 -0700563 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
Robert Carr78c25dd2019-08-15 14:10:33 -0700564}
565
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800566Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
567 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800568 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800569 }
570 return item.mCrop;
571}
572
chaviwd7deef72021-10-06 11:53:40 -0500573void BLASTBufferQueue::acquireAndReleaseBuffer() {
574 BufferItem bufferItem;
chaviw6ebdf5f2021-10-14 11:57:22 -0500575 status_t status =
576 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
577 if (status != OK) {
578 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
579 statusToString(status).c_str());
580 return;
581 }
chaviwd7deef72021-10-06 11:53:40 -0500582 mNumFrameAvailable--;
chaviw6ebdf5f2021-10-14 11:57:22 -0500583 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
chaviwd7deef72021-10-06 11:53:40 -0500584}
585
chaviw0acd33a2021-11-02 11:55:37 -0500586void BLASTBufferQueue::flushAndWaitForFreeBuffer(std::unique_lock<std::mutex>& lock) {
587 if (mWaitForTransactionCallback && mNumFrameAvailable > 0) {
588 // We are waiting on a previous sync's transaction callback so allow another sync
589 // transaction to proceed.
590 //
591 // We need to first flush out the transactions that were in between the two syncs.
592 // We do this by merging them into mSyncTransaction so any buffer merging will get
593 // a release callback invoked. The release callback will be async so we need to wait
594 // on max acquired to make sure we have the capacity to acquire another buffer.
595 if (maxBuffersAcquired(false /* includeExtraAcquire */)) {
596 BQA_LOGD("waiting to flush shadow queue...");
597 mCallbackCV.wait(lock);
598 }
599 while (mNumFrameAvailable > 0) {
600 // flush out the shadow queue
601 acquireAndReleaseBuffer();
602 }
603 }
604
605 while (maxBuffersAcquired(false /* includeExtraAcquire */)) {
606 BQA_LOGD("waiting for free buffer.");
607 mCallbackCV.wait(lock);
608 }
609}
610
Vishnu Nairaef1de92020-10-22 12:15:53 -0700611void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000612 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
613 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
614 {
615 BBQ_TRACE();
616 std::unique_lock _lock{mMutex};
617 const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
618 BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
Valerie Haud3b90d22019-11-06 09:37:31 -0800619
Tianhao Yao4861b102022-02-03 20:18:35 +0000620 if (syncTransactionSet) {
621 bool mayNeedToWaitForBuffer = true;
622 // If we are going to re-use the same mSyncTransaction, release the buffer that may
623 // already be set in the Transaction. This is to allow us a free slot early to continue
624 // processing a new buffer.
625 if (!mAcquireSingleBuffer) {
626 auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
627 if (bufferData) {
628 BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
629 bufferData->frameNumber);
630 releaseBuffer(bufferData->generateReleaseCallbackId(),
631 bufferData->acquireFence);
632 // Because we just released a buffer, we know there's no need to wait for a free
633 // buffer.
634 mayNeedToWaitForBuffer = false;
635 }
636 }
chaviw0acd33a2021-11-02 11:55:37 -0500637
Tianhao Yao4861b102022-02-03 20:18:35 +0000638 if (mayNeedToWaitForBuffer) {
639 flushAndWaitForFreeBuffer(_lock);
chaviwd7deef72021-10-06 11:53:40 -0500640 }
641 }
642
Tianhao Yao4861b102022-02-03 20:18:35 +0000643 // add to shadow queue
644 mNumFrameAvailable++;
645 if (mWaitForTransactionCallback && mNumFrameAvailable >= 2) {
646 acquireAndReleaseBuffer();
647 }
648 ATRACE_INT(mQueuedBufferTrace.c_str(),
649 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
650
651 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s",
652 item.mFrameNumber, boolToString(syncTransactionSet));
653
654 if (syncTransactionSet) {
655 acquireNextBufferLocked(mSyncTransaction);
656
657 // Only need a commit callback when syncing to ensure the buffer that's synced has been
658 // sent to SF
659 incStrong((void*)transactionCommittedCallbackThunk);
660 mSyncTransaction->addTransactionCommittedCallback(transactionCommittedCallbackThunk,
661 static_cast<void*>(this));
662 mWaitForTransactionCallback = true;
663 if (mAcquireSingleBuffer) {
664 prevCallback = mTransactionReadyCallback;
665 prevTransaction = mSyncTransaction;
666 mTransactionReadyCallback = nullptr;
667 mSyncTransaction = nullptr;
668 }
669 } else if (!mWaitForTransactionCallback) {
670 acquireNextBufferLocked(std::nullopt);
Valerie Hau0188adf2020-02-13 08:29:20 -0800671 }
672 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000673 if (prevCallback) {
674 prevCallback(prevTransaction);
chaviwd7deef72021-10-06 11:53:40 -0500675 }
Valerie Haud3b90d22019-11-06 09:37:31 -0800676}
677
Vishnu Nairaef1de92020-10-22 12:15:53 -0700678void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
679 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
680 // Do nothing since we are not storing unacquired buffer items locally.
681}
682
Vishnu Nairadf632b2021-01-07 14:05:08 -0800683void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
684 std::unique_lock _lock{mTimestampMutex};
685 mDequeueTimestamps[bufferId] = systemTime();
686};
687
688void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
689 std::unique_lock _lock{mTimestampMutex};
690 mDequeueTimestamps.erase(bufferId);
691};
692
Tianhao Yao4861b102022-02-03 20:18:35 +0000693void BLASTBufferQueue::syncNextTransaction(
694 std::function<void(SurfaceComposerClient::Transaction*)> callback,
695 bool acquireSingleBuffer) {
chaviw57ae4b22022-02-03 16:51:39 -0600696 BBQ_TRACE();
Valerie Haud3b90d22019-11-06 09:37:31 -0800697 std::lock_guard _lock{mMutex};
Tianhao Yao4861b102022-02-03 20:18:35 +0000698 mTransactionReadyCallback = callback;
699 if (callback) {
700 mSyncTransaction = new SurfaceComposerClient::Transaction();
701 } else {
702 mSyncTransaction = nullptr;
703 }
704 mAcquireSingleBuffer = mTransactionReadyCallback ? acquireSingleBuffer : true;
705}
706
707void BLASTBufferQueue::stopContinuousSyncTransaction() {
708 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
709 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
710 {
711 std::lock_guard _lock{mMutex};
712 bool invokeCallback = mTransactionReadyCallback && !mAcquireSingleBuffer;
713 if (invokeCallback) {
714 prevCallback = mTransactionReadyCallback;
715 prevTransaction = mSyncTransaction;
716 }
717 mTransactionReadyCallback = nullptr;
718 mSyncTransaction = nullptr;
719 mAcquireSingleBuffer = true;
720 }
721 if (prevCallback) {
722 prevCallback(prevTransaction);
723 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700724}
725
Vishnu Nairea0de002020-11-17 17:42:37 -0800726bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700727 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
728 // Only reject buffers if scaling mode is freeze.
729 return false;
730 }
731
Vishnu Naire1a42322020-10-02 17:42:04 -0700732 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
733 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
734
735 // Take the buffer's orientation into account
736 if (item.mTransform & ui::Transform::ROT_90) {
737 std::swap(bufWidth, bufHeight);
738 }
Vishnu Nairea0de002020-11-17 17:42:37 -0800739 ui::Size bufferSize(bufWidth, bufHeight);
740 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800741 return false;
742 }
Vishnu Naire1a42322020-10-02 17:42:04 -0700743
Vishnu Nair670b3f72020-09-29 17:52:18 -0700744 // reject buffers if the buffer size doesn't match.
Vishnu Nairea0de002020-11-17 17:42:37 -0800745 return mSize != bufferSize;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700746}
Vishnu Nairbf255772020-10-16 10:54:41 -0700747
748// Check if we have acquired the maximum number of buffers.
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800749// Consumer can acquire an additional buffer if that buffer is not droppable. Set
750// includeExtraAcquire is true to include this buffer to the count. Since this depends on the state
751// of the buffer, the next acquire may return with NO_BUFFER_AVAILABLE.
752bool BLASTBufferQueue::maxBuffersAcquired(bool includeExtraAcquire) const {
Ady Abraham0bde6b52021-05-18 13:57:02 -0700753 int maxAcquiredBuffers = mMaxAcquiredBuffers + (includeExtraAcquire ? 2 : 1);
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800754 return mNumAcquired >= maxAcquiredBuffers;
Vishnu Nairbf255772020-10-16 10:54:41 -0700755}
756
Robert Carr05086b22020-10-13 18:22:51 -0700757class BBQSurface : public Surface {
Robert Carr9c006e02020-10-14 13:41:57 -0700758private:
Vishnu Nair95b6d512021-08-30 15:31:08 -0700759 std::mutex mMutex;
Robert Carr9c006e02020-10-14 13:41:57 -0700760 sp<BLASTBufferQueue> mBbq;
Vishnu Nair95b6d512021-08-30 15:31:08 -0700761 bool mDestroyed = false;
762
Robert Carr05086b22020-10-13 18:22:51 -0700763public:
Vishnu Nair992496b2020-10-22 17:27:21 -0700764 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
765 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
766 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
Robert Carr9c006e02020-10-14 13:41:57 -0700767
Robert Carr05086b22020-10-13 18:22:51 -0700768 void allocateBuffers() override {
769 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
770 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
771 auto gbp = getIGraphicBufferProducer();
772 std::thread ([reqWidth, reqHeight, gbp=getIGraphicBufferProducer(),
773 reqFormat=mReqFormat, reqUsage=mReqUsage] () {
774 gbp->allocateBuffers(reqWidth, reqHeight,
775 reqFormat, reqUsage);
776
777 }).detach();
778 }
Robert Carr9c006e02020-10-14 13:41:57 -0700779
Marin Shalamanovc5986772021-03-16 16:09:49 +0100780 status_t setFrameRate(float frameRate, int8_t compatibility,
781 int8_t changeFrameRateStrategy) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700782 std::unique_lock _lock{mMutex};
783 if (mDestroyed) {
784 return DEAD_OBJECT;
785 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100786 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
787 "BBQSurface::setFrameRate")) {
Robert Carr9c006e02020-10-14 13:41:57 -0700788 return BAD_VALUE;
789 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100790 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
Robert Carr9c006e02020-10-14 13:41:57 -0700791 }
Robert Carr9b611b72020-10-19 12:00:23 -0700792
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000793 status_t setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700794 std::unique_lock _lock{mMutex};
795 if (mDestroyed) {
796 return DEAD_OBJECT;
797 }
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000798 return mBbq->setFrameTimelineInfo(frameTimelineInfo);
Robert Carr9b611b72020-10-19 12:00:23 -0700799 }
Vishnu Nair95b6d512021-08-30 15:31:08 -0700800
801 void destroy() override {
802 Surface::destroy();
803
804 std::unique_lock _lock{mMutex};
805 mDestroyed = true;
806 mBbq = nullptr;
807 }
Robert Carr05086b22020-10-13 18:22:51 -0700808};
809
Robert Carr9c006e02020-10-14 13:41:57 -0700810// TODO: Can we coalesce this with frame updates? Need to confirm
811// no timing issues.
Marin Shalamanov46084422020-10-13 12:33:42 +0200812status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
813 bool shouldBeSeamless) {
Robert Carr9c006e02020-10-14 13:41:57 -0700814 std::unique_lock _lock{mMutex};
815 SurfaceComposerClient::Transaction t;
816
Marin Shalamanov46084422020-10-13 12:33:42 +0200817 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
Robert Carr9c006e02020-10-14 13:41:57 -0700818}
819
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000820status_t BLASTBufferQueue::setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) {
Robert Carr9b611b72020-10-19 12:00:23 -0700821 std::unique_lock _lock{mMutex};
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000822 mNextFrameTimelineInfoQueue.push(frameTimelineInfo);
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100823 return OK;
Robert Carr9b611b72020-10-19 12:00:23 -0700824}
825
Hongguang Chen621ec582021-02-16 15:42:35 -0800826void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
827 std::unique_lock _lock{mMutex};
828 SurfaceComposerClient::Transaction t;
829
830 t.setSidebandStream(mSurfaceControl, stream).apply();
831}
832
Vishnu Nair992496b2020-10-22 17:27:21 -0700833sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
834 std::unique_lock _lock{mMutex};
835 sp<IBinder> scHandle = nullptr;
836 if (includeSurfaceControlHandle && mSurfaceControl) {
837 scHandle = mSurfaceControl->getHandle();
838 }
839 return new BBQSurface(mProducer, true, scHandle, this);
Robert Carr05086b22020-10-13 18:22:51 -0700840}
841
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800842void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
843 uint64_t frameNumber) {
844 std::lock_guard _lock{mMutex};
845 if (mLastAcquiredFrameNumber >= frameNumber) {
846 // Apply the transaction since we have already acquired the desired frame.
847 t->apply();
848 } else {
chaviwaad6cf52021-03-23 17:27:20 -0500849 mPendingTransactions.emplace_back(frameNumber, *t);
850 // Clear the transaction so it can't be applied elsewhere.
851 t->clear();
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800852 }
853}
854
chaviw6a195272021-09-03 16:14:25 -0500855void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
856 std::lock_guard _lock{mMutex};
857
858 SurfaceComposerClient::Transaction t;
859 mergePendingTransactions(&t, frameNumber);
860 t.setApplyToken(mApplyToken).apply();
861}
862
863void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
864 uint64_t frameNumber) {
865 auto mergeTransaction =
866 [&t, currentFrameNumber = frameNumber](
867 std::tuple<uint64_t, SurfaceComposerClient::Transaction> pendingTransaction) {
868 auto& [targetFrameNumber, transaction] = pendingTransaction;
869 if (currentFrameNumber < targetFrameNumber) {
870 return false;
871 }
872 t->merge(std::move(transaction));
873 return true;
874 };
875
876 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
877 mPendingTransactions.end(), mergeTransaction),
878 mPendingTransactions.end());
879}
880
chaviwd84085a2022-02-08 11:07:04 -0600881SurfaceComposerClient::Transaction* BLASTBufferQueue::gatherPendingTransactions(
882 uint64_t frameNumber) {
883 std::lock_guard _lock{mMutex};
884 SurfaceComposerClient::Transaction* t = new SurfaceComposerClient::Transaction();
885 mergePendingTransactions(t, frameNumber);
886 return t;
887}
888
Vishnu Nair89496122020-12-14 17:14:53 -0800889// Maintains a single worker thread per process that services a list of runnables.
890class AsyncWorker : public Singleton<AsyncWorker> {
891private:
892 std::thread mThread;
893 bool mDone = false;
894 std::deque<std::function<void()>> mRunnables;
895 std::mutex mMutex;
896 std::condition_variable mCv;
897 void run() {
898 std::unique_lock<std::mutex> lock(mMutex);
899 while (!mDone) {
Vishnu Nair89496122020-12-14 17:14:53 -0800900 while (!mRunnables.empty()) {
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700901 std::deque<std::function<void()>> runnables = std::move(mRunnables);
902 mRunnables.clear();
903 lock.unlock();
904 // Run outside the lock since the runnable might trigger another
905 // post to the async worker.
906 execute(runnables);
907 lock.lock();
Vishnu Nair89496122020-12-14 17:14:53 -0800908 }
Wonsik Kim567533e2021-05-04 19:31:29 -0700909 mCv.wait(lock);
Vishnu Nair89496122020-12-14 17:14:53 -0800910 }
911 }
912
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700913 void execute(std::deque<std::function<void()>>& runnables) {
914 while (!runnables.empty()) {
915 std::function<void()> runnable = runnables.front();
916 runnables.pop_front();
917 runnable();
918 }
919 }
920
Vishnu Nair89496122020-12-14 17:14:53 -0800921public:
922 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
923
924 ~AsyncWorker() {
925 mDone = true;
926 mCv.notify_all();
927 if (mThread.joinable()) {
928 mThread.join();
929 }
930 }
931
932 void post(std::function<void()> runnable) {
933 std::unique_lock<std::mutex> lock(mMutex);
934 mRunnables.emplace_back(std::move(runnable));
935 mCv.notify_one();
936 }
937};
938ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
939
940// Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
941class AsyncProducerListener : public BnProducerListener {
942private:
943 const sp<IProducerListener> mListener;
944
945public:
946 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
947
948 void onBufferReleased() override {
949 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
950 }
951
952 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
953 AsyncWorker::getInstance().post(
954 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
955 }
956};
957
958// Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
959// can be non-blocking when the producer is in the client process.
960class BBQBufferQueueProducer : public BufferQueueProducer {
961public:
962 BBQBufferQueueProducer(const sp<BufferQueueCore>& core)
963 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/) {}
964
965 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
966 QueueBufferOutput* output) override {
967 if (!listener) {
968 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
969 }
970
971 return BufferQueueProducer::connect(new AsyncProducerListener(listener), api,
972 producerControlledByApp, output);
973 }
Vishnu Nair17dde612020-12-28 11:39:59 -0800974
975 int query(int what, int* value) override {
976 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
977 *value = 1;
978 return NO_ERROR;
979 }
980 return BufferQueueProducer::query(what, value);
981 }
Vishnu Nair89496122020-12-14 17:14:53 -0800982};
983
984// Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
985// This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
986// emulating one way binder call behavior. Without this, if the listener calls back into the queue,
987// we can deadlock.
988void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
989 sp<IGraphicBufferConsumer>* outConsumer) {
990 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
991 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
992
993 sp<BufferQueueCore> core(new BufferQueueCore());
994 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
995
996 sp<IGraphicBufferProducer> producer(new BBQBufferQueueProducer(core));
997 LOG_ALWAYS_FATAL_IF(producer == nullptr,
998 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
999
Vishnu Nair8b30dd12021-01-25 14:16:54 -08001000 sp<BufferQueueConsumer> consumer(new BufferQueueConsumer(core));
1001 consumer->setAllowExtraAcquire(true);
Vishnu Nair89496122020-12-14 17:14:53 -08001002 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
1003 "BLASTBufferQueue: failed to create BufferQueueConsumer");
1004
1005 *outProducer = producer;
1006 *outConsumer = consumer;
1007}
1008
chaviw497e81c2021-02-04 17:09:47 -08001009PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
1010 PixelFormat convertedFormat = format;
1011 switch (format) {
1012 case PIXEL_FORMAT_TRANSPARENT:
1013 case PIXEL_FORMAT_TRANSLUCENT:
1014 convertedFormat = PIXEL_FORMAT_RGBA_8888;
1015 break;
1016 case PIXEL_FORMAT_OPAQUE:
1017 convertedFormat = PIXEL_FORMAT_RGBX_8888;
1018 break;
1019 }
1020 return convertedFormat;
1021}
1022
Robert Carr82d07c92021-05-10 11:36:43 -07001023uint32_t BLASTBufferQueue::getLastTransformHint() const {
1024 if (mSurfaceControl != nullptr) {
1025 return mSurfaceControl->getTransformHint();
1026 } else {
1027 return 0;
1028 }
1029}
1030
chaviw0b020f82021-08-20 12:00:47 -05001031uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
1032 std::unique_lock _lock{mMutex};
1033 return mLastAcquiredFrameNumber;
1034}
1035
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001036void BLASTBufferQueue::abandon() {
1037 std::unique_lock _lock{mMutex};
1038 // flush out the shadow queue
1039 while (mNumFrameAvailable > 0) {
1040 acquireAndReleaseBuffer();
1041 }
1042
1043 // Clear submitted buffer states
1044 mNumAcquired = 0;
1045 mSubmitted.clear();
1046 mPendingRelease.clear();
1047
1048 if (!mPendingTransactions.empty()) {
1049 BQA_LOGD("Applying pending transactions on abandon %d",
1050 static_cast<uint32_t>(mPendingTransactions.size()));
1051 SurfaceComposerClient::Transaction t;
1052 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
1053 t.setApplyToken(mApplyToken).apply();
1054 }
1055
1056 // Clear sync states
1057 if (mWaitForTransactionCallback) {
1058 BQA_LOGD("mWaitForTransactionCallback cleared");
1059 mWaitForTransactionCallback = false;
1060 }
1061
1062 if (mSyncTransaction != nullptr) {
1063 BQA_LOGD("mSyncTransaction cleared mAcquireSingleBuffer=%s",
1064 mAcquireSingleBuffer ? "true" : "false");
1065 mSyncTransaction = nullptr;
1066 mAcquireSingleBuffer = false;
1067 }
1068
1069 // abandon buffer queue
1070 if (mBufferItemConsumer != nullptr) {
1071 mBufferItemConsumer->abandon();
1072 mBufferItemConsumer->setFrameAvailableListener(nullptr);
1073 mBufferItemConsumer->setBufferFreedListener(nullptr);
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001074 }
1075 mBufferItemConsumer = nullptr;
1076 mConsumer = nullptr;
1077 mProducer = nullptr;
1078}
1079
1080bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
1081 std::unique_lock _lock{mMutex};
1082 return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
1083}
1084
Robert Carr78c25dd2019-08-15 14:10:33 -07001085} // namespace android