blob: 6bcbea10d70fa2d045a0e393a1bf36143074c659 [file] [log] [blame]
Robert Carr78c25dd2019-08-15 14:10:33 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Valerie Haud3b90d22019-11-06 09:37:31 -080017#undef LOG_TAG
18#define LOG_TAG "BLASTBufferQueue"
19
Valerie Haua32c5522019-12-09 10:11:08 -080020#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Vishnu Naire1a42322020-10-02 17:42:04 -070021//#define LOG_NDEBUG 0
Valerie Haua32c5522019-12-09 10:11:08 -080022
Robert Carr78c25dd2019-08-15 14:10:33 -070023#include <gui/BLASTBufferQueue.h>
24#include <gui/BufferItemConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080025#include <gui/BufferQueueConsumer.h>
26#include <gui/BufferQueueCore.h>
27#include <gui/BufferQueueProducer.h>
Valerie Hau45e4b3b2019-12-03 10:49:17 -080028#include <gui/GLConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080029#include <gui/IProducerListener.h>
Robert Carr05086b22020-10-13 18:22:51 -070030#include <gui/Surface.h>
Vishnu Nair89496122020-12-14 17:14:53 -080031#include <utils/Singleton.h>
Valerie Haua32c5522019-12-09 10:11:08 -080032#include <utils/Trace.h>
33
Ady Abraham0bde6b52021-05-18 13:57:02 -070034#include <private/gui/ComposerService.h>
35
Robert Carr78c25dd2019-08-15 14:10:33 -070036#include <chrono>
37
38using namespace std::chrono_literals;
39
Vishnu Nairdab94092020-09-29 16:09:04 -070040namespace {
chaviw3277faf2021-05-19 16:45:23 -050041inline const char* boolToString(bool b) {
Vishnu Nairdab94092020-09-29 16:09:04 -070042 return b ? "true" : "false";
43}
44} // namespace
45
Robert Carr78c25dd2019-08-15 14:10:33 -070046namespace android {
47
Vishnu Nairdab94092020-09-29 16:09:04 -070048// Macros to include adapter info in log messages
chaviwd7deef72021-10-06 11:53:40 -050049#define BQA_LOGD(x, ...) \
50 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070051#define BQA_LOGV(x, ...) \
52 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairc6f89ee2020-12-11 14:27:32 -080053// enable logs for a single layer
54//#define BQA_LOGV(x, ...) \
55// ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
56// mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070057#define BQA_LOGE(x, ...) \
58 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
59
Valerie Hau871d6352020-01-29 08:44:02 -080060void BLASTBufferItemConsumer::onDisconnect() {
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000061 Mutex::Autolock lock(mMutex);
62 mPreviouslyConnected = mCurrentlyConnected;
63 mCurrentlyConnected = false;
64 if (mPreviouslyConnected) {
65 mDisconnectEvents.push(mCurrentFrameNumber);
Valerie Hau871d6352020-01-29 08:44:02 -080066 }
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000067 mFrameEventHistory.onDisconnect();
Valerie Hau871d6352020-01-29 08:44:02 -080068}
69
70void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
71 FrameEventHistoryDelta* outDelta) {
Hongguang Chen621ec582021-02-16 15:42:35 -080072 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -080073 if (newTimestamps) {
74 // BufferQueueProducer only adds a new timestamp on
75 // queueBuffer
76 mCurrentFrameNumber = newTimestamps->frameNumber;
77 mFrameEventHistory.addQueue(*newTimestamps);
78 }
79 if (outDelta) {
80 // frame event histories will be processed
81 // only after the producer connects and requests
82 // deltas for the first time. Forward this intent
83 // to SF-side to turn event processing back on
84 mPreviouslyConnected = mCurrentlyConnected;
85 mCurrentlyConnected = true;
86 mFrameEventHistory.getAndResetDelta(outDelta);
87 }
88}
89
90void BLASTBufferItemConsumer::updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
91 const sp<Fence>& glDoneFence,
92 const sp<Fence>& presentFence,
93 const sp<Fence>& prevReleaseFence,
94 CompositorTiming compositorTiming,
95 nsecs_t latchTime, nsecs_t dequeueReadyTime) {
Hongguang Chen621ec582021-02-16 15:42:35 -080096 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -080097
98 // if the producer is not connected, don't bother updating,
99 // the next producer that connects won't access this frame event
100 if (!mCurrentlyConnected) return;
101 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
102 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
103 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
104
105 mFrameEventHistory.addLatch(frameNumber, latchTime);
106 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
107 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
108 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
109 compositorTiming);
110}
111
112void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
113 bool disconnect = false;
Hongguang Chen621ec582021-02-16 15:42:35 -0800114 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800115 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
116 disconnect = true;
117 mDisconnectEvents.pop();
118 }
119 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
120}
121
Hongguang Chen621ec582021-02-16 15:42:35 -0800122void BLASTBufferItemConsumer::onSidebandStreamChanged() {
Ady Abrahamdbca1352021-12-15 11:58:56 -0800123 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
124 if (bbq != nullptr) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800125 sp<NativeHandle> stream = getSidebandStream();
Ady Abrahamdbca1352021-12-15 11:58:56 -0800126 bbq->setSidebandStream(stream);
Hongguang Chen621ec582021-02-16 15:42:35 -0800127 }
128}
129
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800130BLASTBufferQueue::BLASTBufferQueue(const std::string& name)
131 : mSurfaceControl(nullptr),
132 mSize(1, 1),
Vishnu Nairea0de002020-11-17 17:42:37 -0800133 mRequestedSize(mSize),
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800134 mFormat(PIXEL_FORMAT_RGBA_8888),
chaviwa1c4c822021-11-10 18:11:58 -0600135 mSyncTransaction(nullptr) {
Vishnu Nair89496122020-12-14 17:14:53 -0800136 createBufferQueue(&mProducer, &mConsumer);
Valerie Hau0889c622020-02-19 15:04:47 -0800137 // since the adapter is in the client process, set dequeue timeout
138 // explicitly so that dequeueBuffer will block
139 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
Valerie Hau65b8e872020-02-13 09:45:14 -0800140
Vishnu Nairdebd1cb2021-03-16 10:06:01 -0700141 // safe default, most producers are expected to override this
142 mProducer->setMaxDequeuedBufferCount(2);
Vishnu Nair1618c672021-02-05 13:08:26 -0800143 mBufferItemConsumer = new BLASTBufferItemConsumer(mConsumer,
144 GraphicBuffer::USAGE_HW_COMPOSER |
145 GraphicBuffer::USAGE_HW_TEXTURE,
Ady Abrahamdbca1352021-12-15 11:58:56 -0800146 1, false, this);
Valerie Haua32c5522019-12-09 10:11:08 -0800147 static int32_t id = 0;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700148 mName = name + "#" + std::to_string(id);
Vishnu Nairdab94092020-09-29 16:09:04 -0700149 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(id);
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700150 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(id);
Valerie Haua32c5522019-12-09 10:11:08 -0800151 id++;
Vishnu Nairdab94092020-09-29 16:09:04 -0700152 mBufferItemConsumer->setName(String8(consumerName.c_str()));
Robert Carr78c25dd2019-08-15 14:10:33 -0700153 mBufferItemConsumer->setFrameAvailableListener(this);
154 mBufferItemConsumer->setBufferFreedListener(this);
Robert Carr9f133d72020-04-01 15:51:46 -0700155
Ady Abraham899dcdb2021-06-15 16:56:21 -0700156 ComposerService::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
Ady Abraham0bde6b52021-05-18 13:57:02 -0700157 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
chaviw69058fb2021-09-27 09:37:30 -0500158 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
Valerie Haua32c5522019-12-09 10:11:08 -0800159 mNumAcquired = 0;
160 mNumFrameAvailable = 0;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800161 BQA_LOGV("BLASTBufferQueue created");
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800162}
163
164BLASTBufferQueue::BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface,
165 int width, int height, int32_t format)
166 : BLASTBufferQueue(name) {
167 update(surface, width, height, format);
Robert Carr78c25dd2019-08-15 14:10:33 -0700168}
169
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800170BLASTBufferQueue::~BLASTBufferQueue() {
171 if (mPendingTransactions.empty()) {
172 return;
173 }
174 BQA_LOGE("Applying pending transactions on dtor %d",
175 static_cast<uint32_t>(mPendingTransactions.size()));
176 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800177 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
178 t.setApplyToken(mApplyToken).apply();
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800179}
180
chaviw565ee542021-01-14 10:21:23 -0800181void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
Vishnu Nair084514a2021-07-30 16:07:42 -0700182 int32_t format, SurfaceComposerClient::Transaction* outTransaction) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800183 LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
184
Robert Carr78c25dd2019-08-15 14:10:33 -0700185 std::unique_lock _lock{mMutex};
chaviw565ee542021-01-14 10:21:23 -0800186 if (mFormat != format) {
187 mFormat = format;
chaviw497e81c2021-02-04 17:09:47 -0800188 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
chaviw565ee542021-01-14 10:21:23 -0800189 }
190
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800191 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800192 const bool surfaceControlChanged = !SurfaceControl::isSameSurface(mSurfaceControl, surface);
Vishnu Nairab066512022-01-04 22:28:00 +0000193 if (surfaceControlChanged && mSurfaceControl != nullptr) {
194 BQA_LOGD("Updating SurfaceControl without recreating BBQ");
195 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800196 bool applyTransaction = false;
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800197
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700198 // Always update the native object even though they might have the same layer handle, so we can
199 // get the updated transform hint from WM.
200 mSurfaceControl = surface;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800201 if (surfaceControlChanged) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800202 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
203 layer_state_t::eEnableBackpressure);
204 applyTransaction = true;
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800205 }
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800206 mTransformHint = mSurfaceControl->getTransformHint();
207 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700208 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
209 mTransformHint);
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800210
Vishnu Nairea0de002020-11-17 17:42:37 -0800211 ui::Size newSize(width, height);
212 if (mRequestedSize != newSize) {
213 mRequestedSize.set(newSize);
214 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000215 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Vishnu Nair53c936c2020-12-03 11:46:37 -0800216 // If the buffer supports scaling, update the frame immediately since the client may
217 // want to scale the existing buffer to the new size.
218 mSize = mRequestedSize;
Vishnu Nair084514a2021-07-30 16:07:42 -0700219 SurfaceComposerClient::Transaction* destFrameTransaction =
220 (outTransaction) ? outTransaction : &t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800221 destFrameTransaction->setDestinationFrame(mSurfaceControl,
222 Rect(0, 0, newSize.getWidth(),
223 newSize.getHeight()));
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800224 applyTransaction = true;
Vishnu Nair53c936c2020-12-03 11:46:37 -0800225 }
Robert Carrfc416512020-04-02 12:32:44 -0700226 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800227 if (applyTransaction) {
Vishnu Nair084514a2021-07-30 16:07:42 -0700228 t.setApplyToken(mApplyToken).apply();
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800229 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700230}
231
chaviwd7deef72021-10-06 11:53:40 -0500232static std::optional<SurfaceControlStats> findMatchingStat(
233 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
234 for (auto stat : stats) {
235 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
236 return stat;
237 }
238 }
239 return std::nullopt;
240}
241
242static void transactionCommittedCallbackThunk(void* context, nsecs_t latchTime,
243 const sp<Fence>& presentFence,
244 const std::vector<SurfaceControlStats>& stats) {
245 if (context == nullptr) {
246 return;
247 }
248 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
249 bq->transactionCommittedCallback(latchTime, presentFence, stats);
250}
251
252void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
253 const sp<Fence>& /*presentFence*/,
254 const std::vector<SurfaceControlStats>& stats) {
255 {
256 std::unique_lock _lock{mMutex};
257 ATRACE_CALL();
258 BQA_LOGV("transactionCommittedCallback");
259 if (!mSurfaceControlsWithPendingCallback.empty()) {
260 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
261 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
262 if (stat) {
263 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
264
265 // We need to check if we were waiting for a transaction callback in order to
266 // process any pending buffers and unblock. It's possible to get transaction
267 // callbacks for previous requests so we need to ensure the frame from this
268 // transaction callback matches the last acquired buffer. Since acquireNextBuffer
269 // will stop processing buffers when mWaitForTransactionCallback is set, we know
270 // that mLastAcquiredFrameNumber is the frame we're waiting on.
271 // We also want to check if mNextTransaction is null because it's possible another
272 // sync request came in while waiting, but it hasn't started processing yet. In that
273 // case, we don't actually want to flush the frames in between since they will get
274 // processed and merged with the sync transaction and released earlier than if they
275 // were sent to SF
chaviwa1c4c822021-11-10 18:11:58 -0600276 if (mWaitForTransactionCallback && mSyncTransaction == nullptr &&
chaviwd7deef72021-10-06 11:53:40 -0500277 currFrameNumber >= mLastAcquiredFrameNumber) {
278 mWaitForTransactionCallback = false;
279 flushShadowQueue();
280 }
281 } else {
chaviw768bfa02021-11-01 09:50:57 -0500282 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
chaviwd7deef72021-10-06 11:53:40 -0500283 }
284 } else {
285 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
286 "empty.");
287 }
288
289 decStrong((void*)transactionCommittedCallbackThunk);
290 }
291}
292
Robert Carr78c25dd2019-08-15 14:10:33 -0700293static void transactionCallbackThunk(void* context, nsecs_t latchTime,
294 const sp<Fence>& presentFence,
295 const std::vector<SurfaceControlStats>& stats) {
296 if (context == nullptr) {
297 return;
298 }
Robert Carrfbcbb4c2020-11-02 14:14:34 -0800299 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
Robert Carr78c25dd2019-08-15 14:10:33 -0700300 bq->transactionCallback(latchTime, presentFence, stats);
301}
302
303void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
304 const std::vector<SurfaceControlStats>& stats) {
chaviw71c2cc42020-10-23 16:42:02 -0700305 {
306 std::unique_lock _lock{mMutex};
307 ATRACE_CALL();
308 BQA_LOGV("transactionCallback");
chaviw71c2cc42020-10-23 16:42:02 -0700309
chaviw42026162021-04-16 15:46:12 -0500310 if (!mSurfaceControlsWithPendingCallback.empty()) {
311 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
312 mSurfaceControlsWithPendingCallback.pop();
chaviwd7deef72021-10-06 11:53:40 -0500313 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
314 if (statsOptional) {
315 SurfaceControlStats stat = *statsOptional;
chaviw42026162021-04-16 15:46:12 -0500316 mTransformHint = stat.transformHint;
317 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700318 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
Vishnu Nairde66dc72021-06-17 17:54:41 -0700319 // Update frametime stamps if the frame was latched and presented, indicated by a
320 // valid latch time.
321 if (stat.latchTime > 0) {
322 mBufferItemConsumer
323 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
324 stat.frameEventStats.refreshStartTime,
325 stat.frameEventStats.gpuCompositionDoneFence,
326 stat.presentFence, stat.previousReleaseFence,
327 stat.frameEventStats.compositorTiming,
328 stat.latchTime,
329 stat.frameEventStats.dequeueReadyTime);
330 }
chaviwd7deef72021-10-06 11:53:40 -0500331 } else {
chaviw768bfa02021-11-01 09:50:57 -0500332 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
chaviw42026162021-04-16 15:46:12 -0500333 }
334 } else {
335 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
336 "empty.");
Valerie Haua32c5522019-12-09 10:11:08 -0800337 }
chaviw71c2cc42020-10-23 16:42:02 -0700338
chaviw71c2cc42020-10-23 16:42:02 -0700339 decStrong((void*)transactionCallbackThunk);
Robert Carr78c25dd2019-08-15 14:10:33 -0700340 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700341}
342
Vishnu Nair1506b182021-02-22 14:35:15 -0800343// Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
344// BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
345// So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
346// Otherwise, this is a no-op.
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700347static void releaseBufferCallbackThunk(wp<BLASTBufferQueue> context, const ReleaseCallbackId& id,
chaviw69058fb2021-09-27 09:37:30 -0500348 const sp<Fence>& releaseFence,
349 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800350 sp<BLASTBufferQueue> blastBufferQueue = context.promote();
Vishnu Nair1506b182021-02-22 14:35:15 -0800351 if (blastBufferQueue) {
chaviw69058fb2021-09-27 09:37:30 -0500352 blastBufferQueue->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700353 } else {
354 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800355 }
356}
357
chaviwd7deef72021-10-06 11:53:40 -0500358void BLASTBufferQueue::flushShadowQueue() {
359 BQA_LOGV("flushShadowQueue");
360 int numFramesToFlush = mNumFrameAvailable;
361 while (numFramesToFlush > 0) {
362 acquireNextBufferLocked(std::nullopt);
363 numFramesToFlush--;
364 }
365}
366
chaviw69058fb2021-09-27 09:37:30 -0500367void BLASTBufferQueue::releaseBufferCallback(
368 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
369 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800370 ATRACE_CALL();
371 std::unique_lock _lock{mMutex};
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700372 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800373
Ady Abraham899dcdb2021-06-15 16:56:21 -0700374 // Calculate how many buffers we need to hold before we release them back
375 // to the buffer queue. This will prevent higher latency when we are running
376 // on a lower refresh rate than the max supported. We only do that for EGL
377 // clients as others don't care about latency
378 const bool isEGL = [&] {
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700379 const auto it = mSubmitted.find(id);
Ady Abraham899dcdb2021-06-15 16:56:21 -0700380 return it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
381 }();
382
chaviw69058fb2021-09-27 09:37:30 -0500383 if (currentMaxAcquiredBufferCount) {
384 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
385 }
386
Ady Abraham899dcdb2021-06-15 16:56:21 -0700387 const auto numPendingBuffersToHold =
chaviw69058fb2021-09-27 09:37:30 -0500388 isEGL ? std::max(0u, mMaxAcquiredBuffers - mCurrentMaxAcquiredBufferCount) : 0;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700389 mPendingRelease.emplace_back(ReleasedBuffer{id, releaseFence});
Ady Abraham899dcdb2021-06-15 16:56:21 -0700390
391 // Release all buffers that are beyond the ones that we need to hold
392 while (mPendingRelease.size() > numPendingBuffersToHold) {
chaviw0acd33a2021-11-02 11:55:37 -0500393 const auto releasedBuffer = mPendingRelease.front();
Ady Abraham899dcdb2021-06-15 16:56:21 -0700394 mPendingRelease.pop_front();
chaviw0acd33a2021-11-02 11:55:37 -0500395 releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
chaviwd7deef72021-10-06 11:53:40 -0500396 // Don't process the transactions here if mWaitForTransactionCallback is set. Instead, let
chaviwa1c4c822021-11-10 18:11:58 -0600397 // onFrameAvailable handle processing them since it will merge with the syncTransaction.
chaviwd7deef72021-10-06 11:53:40 -0500398 if (!mWaitForTransactionCallback) {
399 acquireNextBufferLocked(std::nullopt);
400 }
Vishnu Nair1506b182021-02-22 14:35:15 -0800401 }
402
Ady Abraham899dcdb2021-06-15 16:56:21 -0700403 ATRACE_INT("PendingRelease", mPendingRelease.size());
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700404 ATRACE_INT(mQueuedBufferTrace.c_str(),
405 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800406 mCallbackCV.notify_all();
407}
408
chaviw0acd33a2021-11-02 11:55:37 -0500409void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
410 const sp<Fence>& releaseFence) {
411 auto it = mSubmitted.find(callbackId);
412 if (it == mSubmitted.end()) {
413 BQA_LOGE("ERROR: releaseBufferCallback without corresponding submitted buffer %s",
414 callbackId.to_string().c_str());
415 return;
416 }
417 mNumAcquired--;
418 BQA_LOGV("released %s", callbackId.to_string().c_str());
419 mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
420 mSubmitted.erase(it);
421}
422
chaviwd7deef72021-10-06 11:53:40 -0500423void BLASTBufferQueue::acquireNextBufferLocked(
424 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
Valerie Haua32c5522019-12-09 10:11:08 -0800425 ATRACE_CALL();
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800426 // If the next transaction is set, we want to guarantee the our acquire will not fail, so don't
427 // include the extra buffer when checking if we can acquire the next buffer.
chaviwd7deef72021-10-06 11:53:40 -0500428 const bool includeExtraAcquire = !transaction;
429 const bool maxAcquired = maxBuffersAcquired(includeExtraAcquire);
430 if (mNumFrameAvailable == 0 || maxAcquired) {
431 BQA_LOGV("Can't process next buffer maxBuffersAcquired=%s", boolToString(maxAcquired));
Valerie Haud3b90d22019-11-06 09:37:31 -0800432 return;
433 }
434
Valerie Haua32c5522019-12-09 10:11:08 -0800435 if (mSurfaceControl == nullptr) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700436 BQA_LOGE("ERROR : surface control is null");
Valerie Haud3b90d22019-11-06 09:37:31 -0800437 return;
438 }
439
Robert Carr78c25dd2019-08-15 14:10:33 -0700440 SurfaceComposerClient::Transaction localTransaction;
441 bool applyTransaction = true;
442 SurfaceComposerClient::Transaction* t = &localTransaction;
chaviwd7deef72021-10-06 11:53:40 -0500443 if (transaction) {
444 t = *transaction;
Robert Carr78c25dd2019-08-15 14:10:33 -0700445 applyTransaction = false;
446 }
447
Valerie Haua32c5522019-12-09 10:11:08 -0800448 BufferItem bufferItem;
Valerie Haud3b90d22019-11-06 09:37:31 -0800449
Vishnu Nairc6f89ee2020-12-11 14:27:32 -0800450 status_t status =
451 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800452 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
453 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
454 return;
455 } else if (status != OK) {
Vishnu Nairbf255772020-10-16 10:54:41 -0700456 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
Robert Carr78c25dd2019-08-15 14:10:33 -0700457 return;
458 }
Valerie Haua32c5522019-12-09 10:11:08 -0800459 auto buffer = bufferItem.mGraphicBuffer;
460 mNumFrameAvailable--;
461
462 if (buffer == nullptr) {
463 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Vishnu Nairbf255772020-10-16 10:54:41 -0700464 BQA_LOGE("Buffer was empty");
Valerie Haua32c5522019-12-09 10:11:08 -0800465 return;
466 }
467
Vishnu Nair670b3f72020-09-29 17:52:18 -0700468 if (rejectBuffer(bufferItem)) {
Vishnu Naira4fbca52021-07-07 16:52:34 -0700469 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
Vishnu Nairea0de002020-11-17 17:42:37 -0800470 "buffer{size=%dx%d transform=%d}",
471 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
472 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
473 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
chaviwd7deef72021-10-06 11:53:40 -0500474 acquireNextBufferLocked(transaction);
Vishnu Nairea0de002020-11-17 17:42:37 -0800475 return;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700476 }
477
Valerie Haua32c5522019-12-09 10:11:08 -0800478 mNumAcquired++;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700479 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
480 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
481 mSubmitted[releaseCallbackId] = bufferItem;
Robert Carr78c25dd2019-08-15 14:10:33 -0700482
Valerie Hau871d6352020-01-29 08:44:02 -0800483 bool needsDisconnect = false;
484 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
485
486 // if producer disconnected before, notify SurfaceFlinger
487 if (needsDisconnect) {
488 t->notifyProducerDisconnect(mSurfaceControl);
489 }
490
Robert Carr78c25dd2019-08-15 14:10:33 -0700491 // Ensure BLASTBufferQueue stays alive until we receive the transaction complete callback.
492 incStrong((void*)transactionCallbackThunk);
493
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800494 const bool updateDestinationFrame = mRequestedSize != mSize;
Vishnu Nair932f6ae2021-09-29 17:33:10 -0700495 mSize = mRequestedSize;
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700496 Rect crop = computeCrop(bufferItem);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000497 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
498 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700499 bufferItem.mScalingMode, crop);
Vishnu Nair53c936c2020-12-03 11:46:37 -0800500
Vishnu Nair1506b182021-02-22 14:35:15 -0800501 auto releaseBufferCallback =
502 std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
chaviw69058fb2021-09-27 09:37:30 -0500503 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
chaviwba4320c2021-09-15 15:20:53 -0500504 sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
chaviw8dd181f2022-01-05 18:36:46 -0600505 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, releaseBufferCallback);
John Reck137069e2020-12-10 22:07:37 -0500506 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
507 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
508 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
Robert Carr78c25dd2019-08-15 14:10:33 -0700509 t->addTransactionCompletedCallback(transactionCallbackThunk, static_cast<void*>(this));
chaviwf2dace72021-11-17 17:36:50 -0600510
chaviw42026162021-04-16 15:46:12 -0500511 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
Robert Carr78c25dd2019-08-15 14:10:33 -0700512
Vishnu Nair084514a2021-07-30 16:07:42 -0700513 if (updateDestinationFrame) {
514 t->setDestinationFrame(mSurfaceControl, Rect(0, 0, mSize.getWidth(), mSize.getHeight()));
515 }
Vishnu Nair6bdec7d2021-05-10 15:01:13 -0700516 t->setBufferCrop(mSurfaceControl, crop);
Valerie Haua32c5522019-12-09 10:11:08 -0800517 t->setTransform(mSurfaceControl, bufferItem.mTransform);
Valerie Hau2882e982020-01-23 13:33:10 -0800518 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
Ady Abrahamf0c56492020-12-17 18:04:15 -0800519 if (!bufferItem.mIsAutoTimestamp) {
520 t->setDesiredPresentTime(bufferItem.mTimestamp);
521 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700522
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000523 if (!mNextFrameTimelineInfoQueue.empty()) {
Ady Abraham8db10102021-03-15 17:19:23 -0700524 t->setFrameTimelineInfo(mNextFrameTimelineInfoQueue.front());
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000525 mNextFrameTimelineInfoQueue.pop();
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100526 }
527
Vishnu Naircf26a0a2020-11-13 12:56:20 -0800528 if (mAutoRefresh != bufferItem.mAutoRefresh) {
529 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
530 mAutoRefresh = bufferItem.mAutoRefresh;
531 }
Vishnu Nairadf632b2021-01-07 14:05:08 -0800532 {
533 std::unique_lock _lock{mTimestampMutex};
534 auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
535 if (dequeueTime != mDequeueTimestamps.end()) {
536 Parcel p;
537 p.writeInt64(dequeueTime->second);
538 t->setMetadata(mSurfaceControl, METADATA_DEQUEUE_TIME, p);
539 mDequeueTimestamps.erase(dequeueTime);
540 }
541 }
Vishnu Naircf26a0a2020-11-13 12:56:20 -0800542
chaviw6a195272021-09-03 16:14:25 -0500543 mergePendingTransactions(t, bufferItem.mFrameNumber);
Robert Carr78c25dd2019-08-15 14:10:33 -0700544 if (applyTransaction) {
Vishnu Nair277142c2021-01-05 18:35:29 -0800545 t->setApplyToken(mApplyToken).apply();
Robert Carr78c25dd2019-08-15 14:10:33 -0700546 }
Vishnu Nairdab94092020-09-29 16:09:04 -0700547
chaviwd7deef72021-10-06 11:53:40 -0500548 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
Vishnu Nair1506b182021-02-22 14:35:15 -0800549 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
Vishnu Naira4fbca52021-07-07 16:52:34 -0700550 " graphicBufferId=%" PRIu64 "%s transform=%d",
chaviw3277faf2021-05-19 16:45:23 -0500551 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
Vishnu Nair1506b182021-02-22 14:35:15 -0800552 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700553 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
Vishnu Naira4fbca52021-07-07 16:52:34 -0700554 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
Robert Carr78c25dd2019-08-15 14:10:33 -0700555}
556
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800557Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
558 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800559 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800560 }
561 return item.mCrop;
562}
563
chaviwd7deef72021-10-06 11:53:40 -0500564void BLASTBufferQueue::acquireAndReleaseBuffer() {
565 BufferItem bufferItem;
chaviw6ebdf5f2021-10-14 11:57:22 -0500566 status_t status =
567 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
568 if (status != OK) {
569 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
570 statusToString(status).c_str());
571 return;
572 }
chaviwd7deef72021-10-06 11:53:40 -0500573 mNumFrameAvailable--;
chaviw6ebdf5f2021-10-14 11:57:22 -0500574 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
chaviwd7deef72021-10-06 11:53:40 -0500575}
576
chaviw0acd33a2021-11-02 11:55:37 -0500577void BLASTBufferQueue::flushAndWaitForFreeBuffer(std::unique_lock<std::mutex>& lock) {
578 if (mWaitForTransactionCallback && mNumFrameAvailable > 0) {
579 // We are waiting on a previous sync's transaction callback so allow another sync
580 // transaction to proceed.
581 //
582 // We need to first flush out the transactions that were in between the two syncs.
583 // We do this by merging them into mSyncTransaction so any buffer merging will get
584 // a release callback invoked. The release callback will be async so we need to wait
585 // on max acquired to make sure we have the capacity to acquire another buffer.
586 if (maxBuffersAcquired(false /* includeExtraAcquire */)) {
587 BQA_LOGD("waiting to flush shadow queue...");
588 mCallbackCV.wait(lock);
589 }
590 while (mNumFrameAvailable > 0) {
591 // flush out the shadow queue
592 acquireAndReleaseBuffer();
593 }
594 }
595
596 while (maxBuffersAcquired(false /* includeExtraAcquire */)) {
597 BQA_LOGD("waiting for free buffer.");
598 mCallbackCV.wait(lock);
599 }
600}
601
Vishnu Nairaef1de92020-10-22 12:15:53 -0700602void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
Valerie Haua32c5522019-12-09 10:11:08 -0800603 ATRACE_CALL();
Valerie Hau0188adf2020-02-13 08:29:20 -0800604 std::unique_lock _lock{mMutex};
Valerie Haud3b90d22019-11-06 09:37:31 -0800605
chaviwa1c4c822021-11-10 18:11:58 -0600606 const bool syncTransactionSet = mSyncTransaction != nullptr;
607 BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
chaviw0acd33a2021-11-02 11:55:37 -0500608
chaviwa1c4c822021-11-10 18:11:58 -0600609 if (syncTransactionSet) {
chaviw0acd33a2021-11-02 11:55:37 -0500610 bool mayNeedToWaitForBuffer = true;
611 // If we are going to re-use the same mSyncTransaction, release the buffer that may already
612 // be set in the Transaction. This is to allow us a free slot early to continue processing
613 // a new buffer.
614 if (!mAcquireSingleBuffer) {
615 auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
616 if (bufferData) {
617 BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
618 bufferData->frameNumber);
chaviw8dd181f2022-01-05 18:36:46 -0600619 releaseBuffer(bufferData->generateReleaseCallbackId(), bufferData->acquireFence);
chaviw0acd33a2021-11-02 11:55:37 -0500620 // Because we just released a buffer, we know there's no need to wait for a free
621 // buffer.
622 mayNeedToWaitForBuffer = false;
chaviwd7deef72021-10-06 11:53:40 -0500623 }
624 }
625
chaviw0acd33a2021-11-02 11:55:37 -0500626 if (mayNeedToWaitForBuffer) {
627 flushAndWaitForFreeBuffer(_lock);
Valerie Hau0188adf2020-02-13 08:29:20 -0800628 }
629 }
chaviwd7deef72021-10-06 11:53:40 -0500630
Valerie Haud3b90d22019-11-06 09:37:31 -0800631 // add to shadow queue
Valerie Haua32c5522019-12-09 10:11:08 -0800632 mNumFrameAvailable++;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800633 if (mWaitForTransactionCallback && mNumFrameAvailable >= 2) {
Robert Carrcf2f21f2021-11-30 14:47:02 -0800634 acquireAndReleaseBuffer();
635 }
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700636 ATRACE_INT(mQueuedBufferTrace.c_str(),
637 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800638
chaviwa1c4c822021-11-10 18:11:58 -0600639 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s", item.mFrameNumber,
640 boolToString(syncTransactionSet));
chaviwd7deef72021-10-06 11:53:40 -0500641
chaviwa1c4c822021-11-10 18:11:58 -0600642 if (syncTransactionSet) {
chaviw0acd33a2021-11-02 11:55:37 -0500643 acquireNextBufferLocked(mSyncTransaction);
chaviwf2dace72021-11-17 17:36:50 -0600644
645 // Only need a commit callback when syncing to ensure the buffer that's synced has been sent
646 // to SF
647 incStrong((void*)transactionCommittedCallbackThunk);
648 mSyncTransaction->addTransactionCommittedCallback(transactionCommittedCallbackThunk,
649 static_cast<void*>(this));
650
chaviw0acd33a2021-11-02 11:55:37 -0500651 if (mAcquireSingleBuffer) {
652 mSyncTransaction = nullptr;
653 }
chaviwd7deef72021-10-06 11:53:40 -0500654 mWaitForTransactionCallback = true;
655 } else if (!mWaitForTransactionCallback) {
656 acquireNextBufferLocked(std::nullopt);
657 }
Valerie Haud3b90d22019-11-06 09:37:31 -0800658}
659
Vishnu Nairaef1de92020-10-22 12:15:53 -0700660void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
661 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
662 // Do nothing since we are not storing unacquired buffer items locally.
663}
664
Vishnu Nairadf632b2021-01-07 14:05:08 -0800665void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
666 std::unique_lock _lock{mTimestampMutex};
667 mDequeueTimestamps[bufferId] = systemTime();
668};
669
670void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
671 std::unique_lock _lock{mTimestampMutex};
672 mDequeueTimestamps.erase(bufferId);
673};
674
chaviw0acd33a2021-11-02 11:55:37 -0500675void BLASTBufferQueue::setSyncTransaction(SurfaceComposerClient::Transaction* t,
676 bool acquireSingleBuffer) {
Valerie Haud3b90d22019-11-06 09:37:31 -0800677 std::lock_guard _lock{mMutex};
chaviwa1c4c822021-11-10 18:11:58 -0600678 mSyncTransaction = t;
chaviw0acd33a2021-11-02 11:55:37 -0500679 mAcquireSingleBuffer = mSyncTransaction ? acquireSingleBuffer : true;
Robert Carr78c25dd2019-08-15 14:10:33 -0700680}
681
Vishnu Nairea0de002020-11-17 17:42:37 -0800682bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700683 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
684 // Only reject buffers if scaling mode is freeze.
685 return false;
686 }
687
Vishnu Naire1a42322020-10-02 17:42:04 -0700688 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
689 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
690
691 // Take the buffer's orientation into account
692 if (item.mTransform & ui::Transform::ROT_90) {
693 std::swap(bufWidth, bufHeight);
694 }
Vishnu Nairea0de002020-11-17 17:42:37 -0800695 ui::Size bufferSize(bufWidth, bufHeight);
696 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800697 return false;
698 }
Vishnu Naire1a42322020-10-02 17:42:04 -0700699
Vishnu Nair670b3f72020-09-29 17:52:18 -0700700 // reject buffers if the buffer size doesn't match.
Vishnu Nairea0de002020-11-17 17:42:37 -0800701 return mSize != bufferSize;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700702}
Vishnu Nairbf255772020-10-16 10:54:41 -0700703
704// Check if we have acquired the maximum number of buffers.
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800705// Consumer can acquire an additional buffer if that buffer is not droppable. Set
706// includeExtraAcquire is true to include this buffer to the count. Since this depends on the state
707// of the buffer, the next acquire may return with NO_BUFFER_AVAILABLE.
708bool BLASTBufferQueue::maxBuffersAcquired(bool includeExtraAcquire) const {
Ady Abraham0bde6b52021-05-18 13:57:02 -0700709 int maxAcquiredBuffers = mMaxAcquiredBuffers + (includeExtraAcquire ? 2 : 1);
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800710 return mNumAcquired >= maxAcquiredBuffers;
Vishnu Nairbf255772020-10-16 10:54:41 -0700711}
712
Robert Carr05086b22020-10-13 18:22:51 -0700713class BBQSurface : public Surface {
Robert Carr9c006e02020-10-14 13:41:57 -0700714private:
Vishnu Nair95b6d512021-08-30 15:31:08 -0700715 std::mutex mMutex;
Robert Carr9c006e02020-10-14 13:41:57 -0700716 sp<BLASTBufferQueue> mBbq;
Vishnu Nair95b6d512021-08-30 15:31:08 -0700717 bool mDestroyed = false;
718
Robert Carr05086b22020-10-13 18:22:51 -0700719public:
Vishnu Nair992496b2020-10-22 17:27:21 -0700720 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
721 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
722 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
Robert Carr9c006e02020-10-14 13:41:57 -0700723
Robert Carr05086b22020-10-13 18:22:51 -0700724 void allocateBuffers() override {
725 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
726 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
727 auto gbp = getIGraphicBufferProducer();
728 std::thread ([reqWidth, reqHeight, gbp=getIGraphicBufferProducer(),
729 reqFormat=mReqFormat, reqUsage=mReqUsage] () {
730 gbp->allocateBuffers(reqWidth, reqHeight,
731 reqFormat, reqUsage);
732
733 }).detach();
734 }
Robert Carr9c006e02020-10-14 13:41:57 -0700735
Marin Shalamanovc5986772021-03-16 16:09:49 +0100736 status_t setFrameRate(float frameRate, int8_t compatibility,
737 int8_t changeFrameRateStrategy) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700738 std::unique_lock _lock{mMutex};
739 if (mDestroyed) {
740 return DEAD_OBJECT;
741 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100742 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
743 "BBQSurface::setFrameRate")) {
Robert Carr9c006e02020-10-14 13:41:57 -0700744 return BAD_VALUE;
745 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100746 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
Robert Carr9c006e02020-10-14 13:41:57 -0700747 }
Robert Carr9b611b72020-10-19 12:00:23 -0700748
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000749 status_t setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700750 std::unique_lock _lock{mMutex};
751 if (mDestroyed) {
752 return DEAD_OBJECT;
753 }
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000754 return mBbq->setFrameTimelineInfo(frameTimelineInfo);
Robert Carr9b611b72020-10-19 12:00:23 -0700755 }
Vishnu Nair95b6d512021-08-30 15:31:08 -0700756
757 void destroy() override {
758 Surface::destroy();
759
760 std::unique_lock _lock{mMutex};
761 mDestroyed = true;
762 mBbq = nullptr;
763 }
Robert Carr05086b22020-10-13 18:22:51 -0700764};
765
Robert Carr9c006e02020-10-14 13:41:57 -0700766// TODO: Can we coalesce this with frame updates? Need to confirm
767// no timing issues.
Marin Shalamanov46084422020-10-13 12:33:42 +0200768status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
769 bool shouldBeSeamless) {
Robert Carr9c006e02020-10-14 13:41:57 -0700770 std::unique_lock _lock{mMutex};
771 SurfaceComposerClient::Transaction t;
772
Marin Shalamanov46084422020-10-13 12:33:42 +0200773 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
Robert Carr9c006e02020-10-14 13:41:57 -0700774}
775
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000776status_t BLASTBufferQueue::setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) {
Robert Carr9b611b72020-10-19 12:00:23 -0700777 std::unique_lock _lock{mMutex};
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000778 mNextFrameTimelineInfoQueue.push(frameTimelineInfo);
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100779 return OK;
Robert Carr9b611b72020-10-19 12:00:23 -0700780}
781
Hongguang Chen621ec582021-02-16 15:42:35 -0800782void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
783 std::unique_lock _lock{mMutex};
784 SurfaceComposerClient::Transaction t;
785
786 t.setSidebandStream(mSurfaceControl, stream).apply();
787}
788
Vishnu Nair992496b2020-10-22 17:27:21 -0700789sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
790 std::unique_lock _lock{mMutex};
791 sp<IBinder> scHandle = nullptr;
792 if (includeSurfaceControlHandle && mSurfaceControl) {
793 scHandle = mSurfaceControl->getHandle();
794 }
795 return new BBQSurface(mProducer, true, scHandle, this);
Robert Carr05086b22020-10-13 18:22:51 -0700796}
797
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800798void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
799 uint64_t frameNumber) {
800 std::lock_guard _lock{mMutex};
801 if (mLastAcquiredFrameNumber >= frameNumber) {
802 // Apply the transaction since we have already acquired the desired frame.
803 t->apply();
804 } else {
chaviwaad6cf52021-03-23 17:27:20 -0500805 mPendingTransactions.emplace_back(frameNumber, *t);
806 // Clear the transaction so it can't be applied elsewhere.
807 t->clear();
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800808 }
809}
810
chaviw6a195272021-09-03 16:14:25 -0500811void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
812 std::lock_guard _lock{mMutex};
813
814 SurfaceComposerClient::Transaction t;
815 mergePendingTransactions(&t, frameNumber);
816 t.setApplyToken(mApplyToken).apply();
817}
818
819void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
820 uint64_t frameNumber) {
821 auto mergeTransaction =
822 [&t, currentFrameNumber = frameNumber](
823 std::tuple<uint64_t, SurfaceComposerClient::Transaction> pendingTransaction) {
824 auto& [targetFrameNumber, transaction] = pendingTransaction;
825 if (currentFrameNumber < targetFrameNumber) {
826 return false;
827 }
828 t->merge(std::move(transaction));
829 return true;
830 };
831
832 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
833 mPendingTransactions.end(), mergeTransaction),
834 mPendingTransactions.end());
835}
836
chaviwd84085a2022-02-08 11:07:04 -0600837SurfaceComposerClient::Transaction* BLASTBufferQueue::gatherPendingTransactions(
838 uint64_t frameNumber) {
839 std::lock_guard _lock{mMutex};
840 SurfaceComposerClient::Transaction* t = new SurfaceComposerClient::Transaction();
841 mergePendingTransactions(t, frameNumber);
842 return t;
843}
844
Vishnu Nair89496122020-12-14 17:14:53 -0800845// Maintains a single worker thread per process that services a list of runnables.
846class AsyncWorker : public Singleton<AsyncWorker> {
847private:
848 std::thread mThread;
849 bool mDone = false;
850 std::deque<std::function<void()>> mRunnables;
851 std::mutex mMutex;
852 std::condition_variable mCv;
853 void run() {
854 std::unique_lock<std::mutex> lock(mMutex);
855 while (!mDone) {
Vishnu Nair89496122020-12-14 17:14:53 -0800856 while (!mRunnables.empty()) {
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700857 std::deque<std::function<void()>> runnables = std::move(mRunnables);
858 mRunnables.clear();
859 lock.unlock();
860 // Run outside the lock since the runnable might trigger another
861 // post to the async worker.
862 execute(runnables);
863 lock.lock();
Vishnu Nair89496122020-12-14 17:14:53 -0800864 }
Wonsik Kim567533e2021-05-04 19:31:29 -0700865 mCv.wait(lock);
Vishnu Nair89496122020-12-14 17:14:53 -0800866 }
867 }
868
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700869 void execute(std::deque<std::function<void()>>& runnables) {
870 while (!runnables.empty()) {
871 std::function<void()> runnable = runnables.front();
872 runnables.pop_front();
873 runnable();
874 }
875 }
876
Vishnu Nair89496122020-12-14 17:14:53 -0800877public:
878 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
879
880 ~AsyncWorker() {
881 mDone = true;
882 mCv.notify_all();
883 if (mThread.joinable()) {
884 mThread.join();
885 }
886 }
887
888 void post(std::function<void()> runnable) {
889 std::unique_lock<std::mutex> lock(mMutex);
890 mRunnables.emplace_back(std::move(runnable));
891 mCv.notify_one();
892 }
893};
894ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
895
896// Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
897class AsyncProducerListener : public BnProducerListener {
898private:
899 const sp<IProducerListener> mListener;
900
901public:
902 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
903
904 void onBufferReleased() override {
905 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
906 }
907
908 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
909 AsyncWorker::getInstance().post(
910 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
911 }
912};
913
914// Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
915// can be non-blocking when the producer is in the client process.
916class BBQBufferQueueProducer : public BufferQueueProducer {
917public:
918 BBQBufferQueueProducer(const sp<BufferQueueCore>& core)
919 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/) {}
920
921 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
922 QueueBufferOutput* output) override {
923 if (!listener) {
924 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
925 }
926
927 return BufferQueueProducer::connect(new AsyncProducerListener(listener), api,
928 producerControlledByApp, output);
929 }
Vishnu Nair17dde612020-12-28 11:39:59 -0800930
931 int query(int what, int* value) override {
932 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
933 *value = 1;
934 return NO_ERROR;
935 }
936 return BufferQueueProducer::query(what, value);
937 }
Vishnu Nair89496122020-12-14 17:14:53 -0800938};
939
940// Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
941// This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
942// emulating one way binder call behavior. Without this, if the listener calls back into the queue,
943// we can deadlock.
944void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
945 sp<IGraphicBufferConsumer>* outConsumer) {
946 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
947 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
948
949 sp<BufferQueueCore> core(new BufferQueueCore());
950 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
951
952 sp<IGraphicBufferProducer> producer(new BBQBufferQueueProducer(core));
953 LOG_ALWAYS_FATAL_IF(producer == nullptr,
954 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
955
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800956 sp<BufferQueueConsumer> consumer(new BufferQueueConsumer(core));
957 consumer->setAllowExtraAcquire(true);
Vishnu Nair89496122020-12-14 17:14:53 -0800958 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
959 "BLASTBufferQueue: failed to create BufferQueueConsumer");
960
961 *outProducer = producer;
962 *outConsumer = consumer;
963}
964
chaviw497e81c2021-02-04 17:09:47 -0800965PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
966 PixelFormat convertedFormat = format;
967 switch (format) {
968 case PIXEL_FORMAT_TRANSPARENT:
969 case PIXEL_FORMAT_TRANSLUCENT:
970 convertedFormat = PIXEL_FORMAT_RGBA_8888;
971 break;
972 case PIXEL_FORMAT_OPAQUE:
973 convertedFormat = PIXEL_FORMAT_RGBX_8888;
974 break;
975 }
976 return convertedFormat;
977}
978
Robert Carr82d07c92021-05-10 11:36:43 -0700979uint32_t BLASTBufferQueue::getLastTransformHint() const {
980 if (mSurfaceControl != nullptr) {
981 return mSurfaceControl->getTransformHint();
982 } else {
983 return 0;
984 }
985}
986
chaviw0b020f82021-08-20 12:00:47 -0500987uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
988 std::unique_lock _lock{mMutex};
989 return mLastAcquiredFrameNumber;
990}
991
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800992void BLASTBufferQueue::abandon() {
993 std::unique_lock _lock{mMutex};
994 // flush out the shadow queue
995 while (mNumFrameAvailable > 0) {
996 acquireAndReleaseBuffer();
997 }
998
999 // Clear submitted buffer states
1000 mNumAcquired = 0;
1001 mSubmitted.clear();
1002 mPendingRelease.clear();
1003
1004 if (!mPendingTransactions.empty()) {
1005 BQA_LOGD("Applying pending transactions on abandon %d",
1006 static_cast<uint32_t>(mPendingTransactions.size()));
1007 SurfaceComposerClient::Transaction t;
1008 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
1009 t.setApplyToken(mApplyToken).apply();
1010 }
1011
1012 // Clear sync states
1013 if (mWaitForTransactionCallback) {
1014 BQA_LOGD("mWaitForTransactionCallback cleared");
1015 mWaitForTransactionCallback = false;
1016 }
1017
1018 if (mSyncTransaction != nullptr) {
1019 BQA_LOGD("mSyncTransaction cleared mAcquireSingleBuffer=%s",
1020 mAcquireSingleBuffer ? "true" : "false");
1021 mSyncTransaction = nullptr;
1022 mAcquireSingleBuffer = false;
1023 }
1024
1025 // abandon buffer queue
1026 if (mBufferItemConsumer != nullptr) {
1027 mBufferItemConsumer->abandon();
1028 mBufferItemConsumer->setFrameAvailableListener(nullptr);
1029 mBufferItemConsumer->setBufferFreedListener(nullptr);
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001030 }
1031 mBufferItemConsumer = nullptr;
1032 mConsumer = nullptr;
1033 mProducer = nullptr;
1034}
1035
1036bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
1037 std::unique_lock _lock{mMutex};
1038 return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
1039}
1040
Robert Carr78c25dd2019-08-15 14:10:33 -07001041} // namespace android