blob: 9cb7c8895694999eb66cc5e74d17445adf6e1c74 [file] [log] [blame]
Robert Carr78c25dd2019-08-15 14:10:33 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Valerie Haud3b90d22019-11-06 09:37:31 -080017#undef LOG_TAG
18#define LOG_TAG "BLASTBufferQueue"
19
Valerie Haua32c5522019-12-09 10:11:08 -080020#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Vishnu Naire1a42322020-10-02 17:42:04 -070021//#define LOG_NDEBUG 0
Valerie Haua32c5522019-12-09 10:11:08 -080022
Robert Carr78c25dd2019-08-15 14:10:33 -070023#include <gui/BLASTBufferQueue.h>
24#include <gui/BufferItemConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080025#include <gui/BufferQueueConsumer.h>
26#include <gui/BufferQueueCore.h>
27#include <gui/BufferQueueProducer.h>
Valerie Hau45e4b3b2019-12-03 10:49:17 -080028#include <gui/GLConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080029#include <gui/IProducerListener.h>
Robert Carr05086b22020-10-13 18:22:51 -070030#include <gui/Surface.h>
Vishnu Nair89496122020-12-14 17:14:53 -080031#include <utils/Singleton.h>
Valerie Haua32c5522019-12-09 10:11:08 -080032#include <utils/Trace.h>
33
Ady Abraham0bde6b52021-05-18 13:57:02 -070034#include <private/gui/ComposerService.h>
35
Robert Carr78c25dd2019-08-15 14:10:33 -070036#include <chrono>
37
38using namespace std::chrono_literals;
39
Vishnu Nairdab94092020-09-29 16:09:04 -070040namespace {
chaviw3277faf2021-05-19 16:45:23 -050041inline const char* boolToString(bool b) {
Vishnu Nairdab94092020-09-29 16:09:04 -070042 return b ? "true" : "false";
43}
44} // namespace
45
Robert Carr78c25dd2019-08-15 14:10:33 -070046namespace android {
47
Vishnu Nairdab94092020-09-29 16:09:04 -070048// Macros to include adapter info in log messages
chaviwd7deef72021-10-06 11:53:40 -050049#define BQA_LOGD(x, ...) \
50 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070051#define BQA_LOGV(x, ...) \
52 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairc6f89ee2020-12-11 14:27:32 -080053// enable logs for a single layer
54//#define BQA_LOGV(x, ...) \
55// ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
56// mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070057#define BQA_LOGE(x, ...) \
58 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
59
Valerie Hau871d6352020-01-29 08:44:02 -080060void BLASTBufferItemConsumer::onDisconnect() {
Vishnu Nair9051fb12021-11-05 16:21:06 -070061 {
62 Mutex::Autolock lock(mMutex);
63 mPreviouslyConnected = mCurrentlyConnected;
64 mCurrentlyConnected = false;
65 if (mPreviouslyConnected) {
66 mDisconnectEvents.push(mCurrentFrameNumber);
67 }
68 mFrameEventHistory.onDisconnect();
Valerie Hau871d6352020-01-29 08:44:02 -080069 }
Vishnu Nair9051fb12021-11-05 16:21:06 -070070
71 {
72 std::scoped_lock lock(mBufferQueueMutex);
73 if (mBLASTBufferQueue != nullptr) {
74 mBLASTBufferQueue->onProducerDisconnect();
75 }
76 }
Valerie Hau871d6352020-01-29 08:44:02 -080077}
78
79void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
80 FrameEventHistoryDelta* outDelta) {
Hongguang Chen621ec582021-02-16 15:42:35 -080081 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -080082 if (newTimestamps) {
83 // BufferQueueProducer only adds a new timestamp on
84 // queueBuffer
85 mCurrentFrameNumber = newTimestamps->frameNumber;
86 mFrameEventHistory.addQueue(*newTimestamps);
87 }
88 if (outDelta) {
89 // frame event histories will be processed
90 // only after the producer connects and requests
91 // deltas for the first time. Forward this intent
92 // to SF-side to turn event processing back on
93 mPreviouslyConnected = mCurrentlyConnected;
94 mCurrentlyConnected = true;
95 mFrameEventHistory.getAndResetDelta(outDelta);
96 }
97}
98
99void BLASTBufferItemConsumer::updateFrameTimestamps(uint64_t frameNumber, nsecs_t refreshStartTime,
100 const sp<Fence>& glDoneFence,
101 const sp<Fence>& presentFence,
102 const sp<Fence>& prevReleaseFence,
103 CompositorTiming compositorTiming,
104 nsecs_t latchTime, nsecs_t dequeueReadyTime) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800105 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800106
107 // if the producer is not connected, don't bother updating,
108 // the next producer that connects won't access this frame event
109 if (!mCurrentlyConnected) return;
110 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
111 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
112 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
113
114 mFrameEventHistory.addLatch(frameNumber, latchTime);
115 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
116 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
117 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
118 compositorTiming);
119}
120
121void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
122 bool disconnect = false;
Hongguang Chen621ec582021-02-16 15:42:35 -0800123 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800124 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
125 disconnect = true;
126 mDisconnectEvents.pop();
127 }
128 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
129}
130
Hongguang Chen621ec582021-02-16 15:42:35 -0800131void BLASTBufferItemConsumer::setBlastBufferQueue(BLASTBufferQueue* blastbufferqueue) {
Alec Mouri5c8b18c2021-08-19 16:52:34 -0700132 std::scoped_lock lock(mBufferQueueMutex);
Hongguang Chen621ec582021-02-16 15:42:35 -0800133 mBLASTBufferQueue = blastbufferqueue;
134}
135
136void BLASTBufferItemConsumer::onSidebandStreamChanged() {
Alec Mouri5c8b18c2021-08-19 16:52:34 -0700137 std::scoped_lock lock(mBufferQueueMutex);
Hongguang Chen621ec582021-02-16 15:42:35 -0800138 if (mBLASTBufferQueue != nullptr) {
139 sp<NativeHandle> stream = getSidebandStream();
140 mBLASTBufferQueue->setSidebandStream(stream);
141 }
142}
143
Vishnu Nairdab94092020-09-29 16:09:04 -0700144BLASTBufferQueue::BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface,
Vishnu Nairdebd1cb2021-03-16 10:06:01 -0700145 int width, int height, int32_t format)
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700146 : mSurfaceControl(surface),
Vishnu Nairea0de002020-11-17 17:42:37 -0800147 mSize(width, height),
148 mRequestedSize(mSize),
chaviw565ee542021-01-14 10:21:23 -0800149 mFormat(format),
Valerie Haud3b90d22019-11-06 09:37:31 -0800150 mNextTransaction(nullptr) {
Vishnu Nair89496122020-12-14 17:14:53 -0800151 createBufferQueue(&mProducer, &mConsumer);
Valerie Hau0889c622020-02-19 15:04:47 -0800152 // since the adapter is in the client process, set dequeue timeout
153 // explicitly so that dequeueBuffer will block
154 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
Valerie Hau65b8e872020-02-13 09:45:14 -0800155
Vishnu Nairdebd1cb2021-03-16 10:06:01 -0700156 // safe default, most producers are expected to override this
157 mProducer->setMaxDequeuedBufferCount(2);
Vishnu Nair1618c672021-02-05 13:08:26 -0800158 mBufferItemConsumer = new BLASTBufferItemConsumer(mConsumer,
159 GraphicBuffer::USAGE_HW_COMPOSER |
160 GraphicBuffer::USAGE_HW_TEXTURE,
161 1, false);
Valerie Haua32c5522019-12-09 10:11:08 -0800162 static int32_t id = 0;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700163 mName = name + "#" + std::to_string(id);
Vishnu Nairdab94092020-09-29 16:09:04 -0700164 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(id);
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700165 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(id);
Valerie Haua32c5522019-12-09 10:11:08 -0800166 id++;
Vishnu Nairdab94092020-09-29 16:09:04 -0700167 mBufferItemConsumer->setName(String8(consumerName.c_str()));
Robert Carr78c25dd2019-08-15 14:10:33 -0700168 mBufferItemConsumer->setFrameAvailableListener(this);
169 mBufferItemConsumer->setBufferFreedListener(this);
Vishnu Nairea0de002020-11-17 17:42:37 -0800170 mBufferItemConsumer->setDefaultBufferSize(mSize.width, mSize.height);
chaviw497e81c2021-02-04 17:09:47 -0800171 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
Hongguang Chen621ec582021-02-16 15:42:35 -0800172 mBufferItemConsumer->setBlastBufferQueue(this);
Robert Carr9f133d72020-04-01 15:51:46 -0700173
Ady Abraham899dcdb2021-06-15 16:56:21 -0700174 ComposerService::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
Ady Abraham0bde6b52021-05-18 13:57:02 -0700175 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
chaviw69058fb2021-09-27 09:37:30 -0500176 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
Ady Abraham0bde6b52021-05-18 13:57:02 -0700177
Valerie Hau2882e982020-01-23 13:33:10 -0800178 mTransformHint = mSurfaceControl->getTransformHint();
Robert Carr9f133d72020-04-01 15:51:46 -0700179 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800180 SurfaceComposerClient::Transaction()
Vishnu Nair084514a2021-07-30 16:07:42 -0700181 .setFlags(surface, layer_state_t::eEnableBackpressure,
182 layer_state_t::eEnableBackpressure)
183 .setApplyToken(mApplyToken)
184 .apply();
Valerie Haua32c5522019-12-09 10:11:08 -0800185 mNumAcquired = 0;
186 mNumFrameAvailable = 0;
Vishnu Naira4fbca52021-07-07 16:52:34 -0700187 BQA_LOGV("BLASTBufferQueue created width=%d height=%d format=%d mTransformHint=%d", width,
188 height, format, mTransformHint);
Robert Carr78c25dd2019-08-15 14:10:33 -0700189}
190
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800191BLASTBufferQueue::~BLASTBufferQueue() {
Hongguang Chen621ec582021-02-16 15:42:35 -0800192 mBufferItemConsumer->setBlastBufferQueue(nullptr);
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800193 if (mPendingTransactions.empty()) {
194 return;
195 }
196 BQA_LOGE("Applying pending transactions on dtor %d",
197 static_cast<uint32_t>(mPendingTransactions.size()));
198 SurfaceComposerClient::Transaction t;
199 for (auto& [targetFrameNumber, transaction] : mPendingTransactions) {
200 t.merge(std::move(transaction));
201 }
202 t.apply();
203}
204
chaviw565ee542021-01-14 10:21:23 -0800205void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
Vishnu Nair084514a2021-07-30 16:07:42 -0700206 int32_t format, SurfaceComposerClient::Transaction* outTransaction) {
Robert Carr78c25dd2019-08-15 14:10:33 -0700207 std::unique_lock _lock{mMutex};
chaviw565ee542021-01-14 10:21:23 -0800208 if (mFormat != format) {
209 mFormat = format;
chaviw497e81c2021-02-04 17:09:47 -0800210 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
chaviw565ee542021-01-14 10:21:23 -0800211 }
212
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800213 SurfaceComposerClient::Transaction t;
Vishnu Nair9051fb12021-11-05 16:21:06 -0700214 bool setBackpressureFlag = false;
215 if (!SurfaceControl::isSameSurface(mSurfaceControl, surface)) {
216 mSurfaceControlSwapCount++;
217 setBackpressureFlag = true;
218 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800219 bool applyTransaction = false;
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800220
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700221 // Always update the native object even though they might have the same layer handle, so we can
222 // get the updated transform hint from WM.
223 mSurfaceControl = surface;
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800224 if (mSurfaceControl != nullptr) {
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700225 if (setBackpressureFlag) {
226 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
227 layer_state_t::eEnableBackpressure);
228 applyTransaction = true;
229 }
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800230 mTransformHint = mSurfaceControl->getTransformHint();
231 mBufferItemConsumer->setTransformHint(mTransformHint);
232 }
Vishnu Naira4fbca52021-07-07 16:52:34 -0700233 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
234 mTransformHint);
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800235
Vishnu Nairea0de002020-11-17 17:42:37 -0800236 ui::Size newSize(width, height);
237 if (mRequestedSize != newSize) {
238 mRequestedSize.set(newSize);
239 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000240 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Vishnu Nair53c936c2020-12-03 11:46:37 -0800241 // If the buffer supports scaling, update the frame immediately since the client may
242 // want to scale the existing buffer to the new size.
243 mSize = mRequestedSize;
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000244 // We only need to update the scale if we've received at least one buffer. The reason
245 // for this is the scale is calculated based on the requested size and buffer size.
246 // If there's no buffer, the scale will always be 1.
Vishnu Nair084514a2021-07-30 16:07:42 -0700247 SurfaceComposerClient::Transaction* destFrameTransaction =
248 (outTransaction) ? outTransaction : &t;
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700249 if (mSurfaceControl != nullptr && mLastBufferInfo.hasBuffer) {
Vishnu Nair084514a2021-07-30 16:07:42 -0700250 destFrameTransaction->setDestinationFrame(mSurfaceControl,
251 Rect(0, 0, newSize.getWidth(),
252 newSize.getHeight()));
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000253 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800254 applyTransaction = true;
Vishnu Nair53c936c2020-12-03 11:46:37 -0800255 }
Robert Carrfc416512020-04-02 12:32:44 -0700256 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800257 if (applyTransaction) {
Vishnu Nair084514a2021-07-30 16:07:42 -0700258 t.setApplyToken(mApplyToken).apply();
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800259 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700260}
261
chaviwd7deef72021-10-06 11:53:40 -0500262static std::optional<SurfaceControlStats> findMatchingStat(
263 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
264 for (auto stat : stats) {
265 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
266 return stat;
267 }
268 }
269 return std::nullopt;
270}
271
272static void transactionCommittedCallbackThunk(void* context, nsecs_t latchTime,
273 const sp<Fence>& presentFence,
274 const std::vector<SurfaceControlStats>& stats) {
275 if (context == nullptr) {
276 return;
277 }
278 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
279 bq->transactionCommittedCallback(latchTime, presentFence, stats);
280}
281
282void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
283 const sp<Fence>& /*presentFence*/,
284 const std::vector<SurfaceControlStats>& stats) {
285 {
286 std::unique_lock _lock{mMutex};
287 ATRACE_CALL();
288 BQA_LOGV("transactionCommittedCallback");
289 if (!mSurfaceControlsWithPendingCallback.empty()) {
290 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
291 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
292 if (stat) {
293 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
294
295 // We need to check if we were waiting for a transaction callback in order to
296 // process any pending buffers and unblock. It's possible to get transaction
297 // callbacks for previous requests so we need to ensure the frame from this
298 // transaction callback matches the last acquired buffer. Since acquireNextBuffer
299 // will stop processing buffers when mWaitForTransactionCallback is set, we know
300 // that mLastAcquiredFrameNumber is the frame we're waiting on.
301 // We also want to check if mNextTransaction is null because it's possible another
302 // sync request came in while waiting, but it hasn't started processing yet. In that
303 // case, we don't actually want to flush the frames in between since they will get
304 // processed and merged with the sync transaction and released earlier than if they
305 // were sent to SF
306 if (mWaitForTransactionCallback && mNextTransaction == nullptr &&
307 currFrameNumber >= mLastAcquiredFrameNumber) {
308 mWaitForTransactionCallback = false;
309 flushShadowQueue();
310 }
311 } else {
chaviw768bfa02021-11-01 09:50:57 -0500312 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
chaviwd7deef72021-10-06 11:53:40 -0500313 }
314 } else {
315 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
316 "empty.");
317 }
318
319 decStrong((void*)transactionCommittedCallbackThunk);
320 }
321}
322
Robert Carr78c25dd2019-08-15 14:10:33 -0700323static void transactionCallbackThunk(void* context, nsecs_t latchTime,
324 const sp<Fence>& presentFence,
325 const std::vector<SurfaceControlStats>& stats) {
326 if (context == nullptr) {
327 return;
328 }
Robert Carrfbcbb4c2020-11-02 14:14:34 -0800329 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
Robert Carr78c25dd2019-08-15 14:10:33 -0700330 bq->transactionCallback(latchTime, presentFence, stats);
331}
332
333void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
334 const std::vector<SurfaceControlStats>& stats) {
chaviw71c2cc42020-10-23 16:42:02 -0700335 {
336 std::unique_lock _lock{mMutex};
337 ATRACE_CALL();
338 BQA_LOGV("transactionCallback");
chaviw71c2cc42020-10-23 16:42:02 -0700339
chaviw42026162021-04-16 15:46:12 -0500340 if (!mSurfaceControlsWithPendingCallback.empty()) {
341 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
342 mSurfaceControlsWithPendingCallback.pop();
chaviwd7deef72021-10-06 11:53:40 -0500343 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
344 if (statsOptional) {
345 SurfaceControlStats stat = *statsOptional;
chaviw42026162021-04-16 15:46:12 -0500346 mTransformHint = stat.transformHint;
347 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700348 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
Vishnu Nairde66dc72021-06-17 17:54:41 -0700349 // Update frametime stamps if the frame was latched and presented, indicated by a
350 // valid latch time.
351 if (stat.latchTime > 0) {
352 mBufferItemConsumer
353 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
354 stat.frameEventStats.refreshStartTime,
355 stat.frameEventStats.gpuCompositionDoneFence,
356 stat.presentFence, stat.previousReleaseFence,
357 stat.frameEventStats.compositorTiming,
358 stat.latchTime,
359 stat.frameEventStats.dequeueReadyTime);
360 }
chaviwd7deef72021-10-06 11:53:40 -0500361 } else {
chaviw768bfa02021-11-01 09:50:57 -0500362 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
chaviw42026162021-04-16 15:46:12 -0500363 }
364 } else {
365 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
366 "empty.");
Valerie Haua32c5522019-12-09 10:11:08 -0800367 }
chaviw71c2cc42020-10-23 16:42:02 -0700368
chaviw71c2cc42020-10-23 16:42:02 -0700369 decStrong((void*)transactionCallbackThunk);
Robert Carr78c25dd2019-08-15 14:10:33 -0700370 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700371}
372
Vishnu Nair1506b182021-02-22 14:35:15 -0800373// Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
374// BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
375// So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
376// Otherwise, this is a no-op.
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700377static void releaseBufferCallbackThunk(wp<BLASTBufferQueue> context, const ReleaseCallbackId& id,
chaviw69058fb2021-09-27 09:37:30 -0500378 const sp<Fence>& releaseFence,
379 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800380 sp<BLASTBufferQueue> blastBufferQueue = context.promote();
Vishnu Nair1506b182021-02-22 14:35:15 -0800381 if (blastBufferQueue) {
chaviw69058fb2021-09-27 09:37:30 -0500382 blastBufferQueue->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700383 } else {
384 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800385 }
386}
387
chaviwd7deef72021-10-06 11:53:40 -0500388void BLASTBufferQueue::flushShadowQueue() {
389 BQA_LOGV("flushShadowQueue");
390 int numFramesToFlush = mNumFrameAvailable;
391 while (numFramesToFlush > 0) {
392 acquireNextBufferLocked(std::nullopt);
393 numFramesToFlush--;
394 }
395}
396
chaviw69058fb2021-09-27 09:37:30 -0500397void BLASTBufferQueue::releaseBufferCallback(
398 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
399 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800400 ATRACE_CALL();
401 std::unique_lock _lock{mMutex};
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700402 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800403
Vishnu Nair9051fb12021-11-05 16:21:06 -0700404 const auto it = mFreedBuffers.find(id);
405 if (it != mFreedBuffers.end()) {
406 mFreedBuffers.erase(it);
407 BQA_LOGV("releaseBufferCallback ignoring freed buffer %s", id.to_string().c_str());
408 return;
409 }
410
411 if (mFreedBuffers.size() != 0 && mLogMissingReleaseCallback) {
412 BQA_LOGD("Unexpected out of order buffer release. mFreedBuffer count=%d",
413 static_cast<uint32_t>(mFreedBuffers.size()));
414 mLogMissingReleaseCallback = false;
415 }
416
Ady Abraham899dcdb2021-06-15 16:56:21 -0700417 // Calculate how many buffers we need to hold before we release them back
418 // to the buffer queue. This will prevent higher latency when we are running
419 // on a lower refresh rate than the max supported. We only do that for EGL
420 // clients as others don't care about latency
421 const bool isEGL = [&] {
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700422 const auto it = mSubmitted.find(id);
Ady Abraham899dcdb2021-06-15 16:56:21 -0700423 return it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
424 }();
425
chaviw69058fb2021-09-27 09:37:30 -0500426 if (currentMaxAcquiredBufferCount) {
427 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
428 }
429
Ady Abraham899dcdb2021-06-15 16:56:21 -0700430 const auto numPendingBuffersToHold =
chaviw69058fb2021-09-27 09:37:30 -0500431 isEGL ? std::max(0u, mMaxAcquiredBuffers - mCurrentMaxAcquiredBufferCount) : 0;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700432 mPendingRelease.emplace_back(ReleasedBuffer{id, releaseFence});
Ady Abraham899dcdb2021-06-15 16:56:21 -0700433
434 // Release all buffers that are beyond the ones that we need to hold
435 while (mPendingRelease.size() > numPendingBuffersToHold) {
436 const auto releaseBuffer = mPendingRelease.front();
437 mPendingRelease.pop_front();
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700438 auto it = mSubmitted.find(releaseBuffer.callbackId);
Ady Abraham899dcdb2021-06-15 16:56:21 -0700439 if (it == mSubmitted.end()) {
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700440 BQA_LOGE("ERROR: releaseBufferCallback without corresponding submitted buffer %s",
441 releaseBuffer.callbackId.to_string().c_str());
Ady Abraham899dcdb2021-06-15 16:56:21 -0700442 return;
443 }
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700444 mNumAcquired--;
chaviw69058fb2021-09-27 09:37:30 -0500445 BQA_LOGV("released %s", releaseBuffer.callbackId.to_string().c_str());
Ady Abraham899dcdb2021-06-15 16:56:21 -0700446 mBufferItemConsumer->releaseBuffer(it->second, releaseBuffer.releaseFence);
447 mSubmitted.erase(it);
chaviwd7deef72021-10-06 11:53:40 -0500448 // Don't process the transactions here if mWaitForTransactionCallback is set. Instead, let
449 // onFrameAvailable handle processing them since it will merge with the nextTransaction.
450 if (!mWaitForTransactionCallback) {
451 acquireNextBufferLocked(std::nullopt);
452 }
Vishnu Nair1506b182021-02-22 14:35:15 -0800453 }
454
Ady Abraham899dcdb2021-06-15 16:56:21 -0700455 ATRACE_INT("PendingRelease", mPendingRelease.size());
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700456 ATRACE_INT(mQueuedBufferTrace.c_str(),
457 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800458 mCallbackCV.notify_all();
459}
460
chaviwd7deef72021-10-06 11:53:40 -0500461void BLASTBufferQueue::acquireNextBufferLocked(
462 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
Valerie Haua32c5522019-12-09 10:11:08 -0800463 ATRACE_CALL();
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800464 // If the next transaction is set, we want to guarantee the our acquire will not fail, so don't
465 // include the extra buffer when checking if we can acquire the next buffer.
chaviwd7deef72021-10-06 11:53:40 -0500466 const bool includeExtraAcquire = !transaction;
467 const bool maxAcquired = maxBuffersAcquired(includeExtraAcquire);
468 if (mNumFrameAvailable == 0 || maxAcquired) {
469 BQA_LOGV("Can't process next buffer maxBuffersAcquired=%s", boolToString(maxAcquired));
Valerie Haud3b90d22019-11-06 09:37:31 -0800470 return;
471 }
472
Valerie Haua32c5522019-12-09 10:11:08 -0800473 if (mSurfaceControl == nullptr) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700474 BQA_LOGE("ERROR : surface control is null");
Valerie Haud3b90d22019-11-06 09:37:31 -0800475 return;
476 }
477
Robert Carr78c25dd2019-08-15 14:10:33 -0700478 SurfaceComposerClient::Transaction localTransaction;
479 bool applyTransaction = true;
480 SurfaceComposerClient::Transaction* t = &localTransaction;
chaviwd7deef72021-10-06 11:53:40 -0500481 if (transaction) {
482 t = *transaction;
Robert Carr78c25dd2019-08-15 14:10:33 -0700483 applyTransaction = false;
484 }
485
Valerie Haua32c5522019-12-09 10:11:08 -0800486 BufferItem bufferItem;
Valerie Haud3b90d22019-11-06 09:37:31 -0800487
Vishnu Nairc6f89ee2020-12-11 14:27:32 -0800488 status_t status =
489 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800490 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
491 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
492 return;
493 } else if (status != OK) {
Vishnu Nairbf255772020-10-16 10:54:41 -0700494 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
Robert Carr78c25dd2019-08-15 14:10:33 -0700495 return;
496 }
Valerie Haua32c5522019-12-09 10:11:08 -0800497 auto buffer = bufferItem.mGraphicBuffer;
498 mNumFrameAvailable--;
499
500 if (buffer == nullptr) {
501 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Vishnu Nairbf255772020-10-16 10:54:41 -0700502 BQA_LOGE("Buffer was empty");
Valerie Haua32c5522019-12-09 10:11:08 -0800503 return;
504 }
505
Vishnu Nair670b3f72020-09-29 17:52:18 -0700506 if (rejectBuffer(bufferItem)) {
Vishnu Naira4fbca52021-07-07 16:52:34 -0700507 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
Vishnu Nairea0de002020-11-17 17:42:37 -0800508 "buffer{size=%dx%d transform=%d}",
509 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
510 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
511 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
chaviwd7deef72021-10-06 11:53:40 -0500512 acquireNextBufferLocked(transaction);
Vishnu Nairea0de002020-11-17 17:42:37 -0800513 return;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700514 }
515
Valerie Haua32c5522019-12-09 10:11:08 -0800516 mNumAcquired++;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700517 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
518 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
519 mSubmitted[releaseCallbackId] = bufferItem;
Robert Carr78c25dd2019-08-15 14:10:33 -0700520
Valerie Hau871d6352020-01-29 08:44:02 -0800521 bool needsDisconnect = false;
522 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
523
524 // if producer disconnected before, notify SurfaceFlinger
525 if (needsDisconnect) {
526 t->notifyProducerDisconnect(mSurfaceControl);
527 }
528
Robert Carr78c25dd2019-08-15 14:10:33 -0700529 // Ensure BLASTBufferQueue stays alive until we receive the transaction complete callback.
530 incStrong((void*)transactionCallbackThunk);
chaviwd7deef72021-10-06 11:53:40 -0500531 incStrong((void*)transactionCommittedCallbackThunk);
Robert Carr78c25dd2019-08-15 14:10:33 -0700532
Vishnu Nair932f6ae2021-09-29 17:33:10 -0700533 const bool sizeHasChanged = mRequestedSize != mSize;
534 mSize = mRequestedSize;
535 const bool updateDestinationFrame = sizeHasChanged || !mLastBufferInfo.hasBuffer;
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700536 Rect crop = computeCrop(bufferItem);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000537 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
538 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700539 bufferItem.mScalingMode, crop);
Vishnu Nair53c936c2020-12-03 11:46:37 -0800540
Vishnu Nair1506b182021-02-22 14:35:15 -0800541 auto releaseBufferCallback =
542 std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
chaviw69058fb2021-09-27 09:37:30 -0500543 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
chaviwba4320c2021-09-15 15:20:53 -0500544 sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
545 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, releaseCallbackId,
546 releaseBufferCallback);
John Reck137069e2020-12-10 22:07:37 -0500547 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
548 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
549 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
Robert Carr78c25dd2019-08-15 14:10:33 -0700550 t->addTransactionCompletedCallback(transactionCallbackThunk, static_cast<void*>(this));
chaviwd7deef72021-10-06 11:53:40 -0500551 t->addTransactionCommittedCallback(transactionCommittedCallbackThunk, static_cast<void*>(this));
chaviw42026162021-04-16 15:46:12 -0500552 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
Robert Carr78c25dd2019-08-15 14:10:33 -0700553
Vishnu Nair084514a2021-07-30 16:07:42 -0700554 if (updateDestinationFrame) {
555 t->setDestinationFrame(mSurfaceControl, Rect(0, 0, mSize.getWidth(), mSize.getHeight()));
556 }
Vishnu Nair6bdec7d2021-05-10 15:01:13 -0700557 t->setBufferCrop(mSurfaceControl, crop);
Valerie Haua32c5522019-12-09 10:11:08 -0800558 t->setTransform(mSurfaceControl, bufferItem.mTransform);
Valerie Hau2882e982020-01-23 13:33:10 -0800559 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
Ady Abrahamf0c56492020-12-17 18:04:15 -0800560 if (!bufferItem.mIsAutoTimestamp) {
561 t->setDesiredPresentTime(bufferItem.mTimestamp);
562 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700563
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000564 if (!mNextFrameTimelineInfoQueue.empty()) {
Ady Abraham8db10102021-03-15 17:19:23 -0700565 t->setFrameTimelineInfo(mNextFrameTimelineInfoQueue.front());
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000566 mNextFrameTimelineInfoQueue.pop();
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100567 }
568
Vishnu Naircf26a0a2020-11-13 12:56:20 -0800569 if (mAutoRefresh != bufferItem.mAutoRefresh) {
570 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
571 mAutoRefresh = bufferItem.mAutoRefresh;
572 }
Vishnu Nairadf632b2021-01-07 14:05:08 -0800573 {
574 std::unique_lock _lock{mTimestampMutex};
575 auto dequeueTime = mDequeueTimestamps.find(buffer->getId());
576 if (dequeueTime != mDequeueTimestamps.end()) {
577 Parcel p;
578 p.writeInt64(dequeueTime->second);
579 t->setMetadata(mSurfaceControl, METADATA_DEQUEUE_TIME, p);
580 mDequeueTimestamps.erase(dequeueTime);
581 }
582 }
Vishnu Naircf26a0a2020-11-13 12:56:20 -0800583
chaviw6a195272021-09-03 16:14:25 -0500584 mergePendingTransactions(t, bufferItem.mFrameNumber);
Robert Carr78c25dd2019-08-15 14:10:33 -0700585 if (applyTransaction) {
Vishnu Nair277142c2021-01-05 18:35:29 -0800586 t->setApplyToken(mApplyToken).apply();
Robert Carr78c25dd2019-08-15 14:10:33 -0700587 }
Vishnu Nairdab94092020-09-29 16:09:04 -0700588
chaviwd7deef72021-10-06 11:53:40 -0500589 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
Vishnu Nair1506b182021-02-22 14:35:15 -0800590 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
Vishnu Naira4fbca52021-07-07 16:52:34 -0700591 " graphicBufferId=%" PRIu64 "%s transform=%d",
chaviw3277faf2021-05-19 16:45:23 -0500592 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
Vishnu Nair1506b182021-02-22 14:35:15 -0800593 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700594 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
Vishnu Naira4fbca52021-07-07 16:52:34 -0700595 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
Robert Carr78c25dd2019-08-15 14:10:33 -0700596}
597
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800598Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
599 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800600 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800601 }
602 return item.mCrop;
603}
604
chaviwd7deef72021-10-06 11:53:40 -0500605void BLASTBufferQueue::acquireAndReleaseBuffer() {
606 BufferItem bufferItem;
chaviw6ebdf5f2021-10-14 11:57:22 -0500607 status_t status =
608 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
609 if (status != OK) {
610 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
611 statusToString(status).c_str());
612 return;
613 }
chaviwd7deef72021-10-06 11:53:40 -0500614 mNumFrameAvailable--;
chaviw6ebdf5f2021-10-14 11:57:22 -0500615 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
chaviwd7deef72021-10-06 11:53:40 -0500616}
617
Vishnu Nairaef1de92020-10-22 12:15:53 -0700618void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
Valerie Haua32c5522019-12-09 10:11:08 -0800619 ATRACE_CALL();
Valerie Hau0188adf2020-02-13 08:29:20 -0800620 std::unique_lock _lock{mMutex};
Valerie Haud3b90d22019-11-06 09:37:31 -0800621
Vishnu Nair9051fb12021-11-05 16:21:06 -0700622 if ((mSurfaceControlSwapCount > mProducerDisconnectCount) && mLogScSwap) {
623 BQA_LOGD("Expected producer disconnect sc swap count=%d bq disconnect count=%d",
624 mSurfaceControlSwapCount, mProducerDisconnectCount);
625 mLogScSwap = false;
626 }
627
Vishnu Nairdab94092020-09-29 16:09:04 -0700628 const bool nextTransactionSet = mNextTransaction != nullptr;
chaviwd7deef72021-10-06 11:53:40 -0500629 BQA_LOGV("onFrameAvailable-start nextTransactionSet=%s", boolToString(nextTransactionSet));
Vishnu Nair1506b182021-02-22 14:35:15 -0800630 if (nextTransactionSet) {
chaviwd7deef72021-10-06 11:53:40 -0500631 if (mWaitForTransactionCallback) {
632 // We are waiting on a previous sync's transaction callback so allow another sync
633 // transaction to proceed.
634 //
635 // We need to first flush out the transactions that were in between the two syncs.
636 // We do this by merging them into mNextTransaction so any buffer merging will get
637 // a release callback invoked. The release callback will be async so we need to wait
638 // on max acquired to make sure we have the capacity to acquire another buffer.
639 if (maxBuffersAcquired(false /* includeExtraAcquire */)) {
640 BQA_LOGD("waiting to flush shadow queue...");
641 mCallbackCV.wait(_lock);
642 }
643 while (mNumFrameAvailable > 0) {
644 // flush out the shadow queue
645 acquireAndReleaseBuffer();
646 }
647 }
648
649 while (maxBuffersAcquired(false /* includeExtraAcquire */)) {
650 BQA_LOGD("waiting for free buffer.");
Valerie Hau0188adf2020-02-13 08:29:20 -0800651 mCallbackCV.wait(_lock);
652 }
653 }
chaviwd7deef72021-10-06 11:53:40 -0500654
Valerie Haud3b90d22019-11-06 09:37:31 -0800655 // add to shadow queue
Valerie Haua32c5522019-12-09 10:11:08 -0800656 mNumFrameAvailable++;
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700657 ATRACE_INT(mQueuedBufferTrace.c_str(),
658 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800659
660 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " nextTransactionSet=%s", item.mFrameNumber,
chaviw3277faf2021-05-19 16:45:23 -0500661 boolToString(nextTransactionSet));
chaviwd7deef72021-10-06 11:53:40 -0500662
663 if (nextTransactionSet) {
664 acquireNextBufferLocked(std::move(mNextTransaction));
665 mNextTransaction = nullptr;
666 mWaitForTransactionCallback = true;
667 } else if (!mWaitForTransactionCallback) {
668 acquireNextBufferLocked(std::nullopt);
669 }
Valerie Haud3b90d22019-11-06 09:37:31 -0800670}
671
Vishnu Nairaef1de92020-10-22 12:15:53 -0700672void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
673 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
674 // Do nothing since we are not storing unacquired buffer items locally.
675}
676
Vishnu Nairadf632b2021-01-07 14:05:08 -0800677void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
678 std::unique_lock _lock{mTimestampMutex};
679 mDequeueTimestamps[bufferId] = systemTime();
680};
681
682void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
683 std::unique_lock _lock{mTimestampMutex};
684 mDequeueTimestamps.erase(bufferId);
685};
686
Robert Carr78c25dd2019-08-15 14:10:33 -0700687void BLASTBufferQueue::setNextTransaction(SurfaceComposerClient::Transaction* t) {
Valerie Haud3b90d22019-11-06 09:37:31 -0800688 std::lock_guard _lock{mMutex};
Robert Carr78c25dd2019-08-15 14:10:33 -0700689 mNextTransaction = t;
690}
691
Vishnu Nairea0de002020-11-17 17:42:37 -0800692bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700693 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
694 // Only reject buffers if scaling mode is freeze.
695 return false;
696 }
697
Vishnu Naire1a42322020-10-02 17:42:04 -0700698 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
699 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
700
701 // Take the buffer's orientation into account
702 if (item.mTransform & ui::Transform::ROT_90) {
703 std::swap(bufWidth, bufHeight);
704 }
Vishnu Nairea0de002020-11-17 17:42:37 -0800705 ui::Size bufferSize(bufWidth, bufHeight);
706 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800707 return false;
708 }
Vishnu Naire1a42322020-10-02 17:42:04 -0700709
Vishnu Nair670b3f72020-09-29 17:52:18 -0700710 // reject buffers if the buffer size doesn't match.
Vishnu Nairea0de002020-11-17 17:42:37 -0800711 return mSize != bufferSize;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700712}
Vishnu Nairbf255772020-10-16 10:54:41 -0700713
714// Check if we have acquired the maximum number of buffers.
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800715// Consumer can acquire an additional buffer if that buffer is not droppable. Set
716// includeExtraAcquire is true to include this buffer to the count. Since this depends on the state
717// of the buffer, the next acquire may return with NO_BUFFER_AVAILABLE.
718bool BLASTBufferQueue::maxBuffersAcquired(bool includeExtraAcquire) const {
Ady Abraham0bde6b52021-05-18 13:57:02 -0700719 int maxAcquiredBuffers = mMaxAcquiredBuffers + (includeExtraAcquire ? 2 : 1);
Vishnu Nair1506b182021-02-22 14:35:15 -0800720 return mNumAcquired == maxAcquiredBuffers;
Vishnu Nairbf255772020-10-16 10:54:41 -0700721}
722
Robert Carr05086b22020-10-13 18:22:51 -0700723class BBQSurface : public Surface {
Robert Carr9c006e02020-10-14 13:41:57 -0700724private:
Vishnu Nair95b6d512021-08-30 15:31:08 -0700725 std::mutex mMutex;
Robert Carr9c006e02020-10-14 13:41:57 -0700726 sp<BLASTBufferQueue> mBbq;
Vishnu Nair95b6d512021-08-30 15:31:08 -0700727 bool mDestroyed = false;
728
Robert Carr05086b22020-10-13 18:22:51 -0700729public:
Vishnu Nair992496b2020-10-22 17:27:21 -0700730 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
731 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
732 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
Robert Carr9c006e02020-10-14 13:41:57 -0700733
Robert Carr05086b22020-10-13 18:22:51 -0700734 void allocateBuffers() override {
735 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
736 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
737 auto gbp = getIGraphicBufferProducer();
738 std::thread ([reqWidth, reqHeight, gbp=getIGraphicBufferProducer(),
739 reqFormat=mReqFormat, reqUsage=mReqUsage] () {
740 gbp->allocateBuffers(reqWidth, reqHeight,
741 reqFormat, reqUsage);
742
743 }).detach();
744 }
Robert Carr9c006e02020-10-14 13:41:57 -0700745
Marin Shalamanovc5986772021-03-16 16:09:49 +0100746 status_t setFrameRate(float frameRate, int8_t compatibility,
747 int8_t changeFrameRateStrategy) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700748 std::unique_lock _lock{mMutex};
749 if (mDestroyed) {
750 return DEAD_OBJECT;
751 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100752 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
753 "BBQSurface::setFrameRate")) {
Robert Carr9c006e02020-10-14 13:41:57 -0700754 return BAD_VALUE;
755 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100756 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
Robert Carr9c006e02020-10-14 13:41:57 -0700757 }
Robert Carr9b611b72020-10-19 12:00:23 -0700758
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000759 status_t setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) override {
Vishnu Nair95b6d512021-08-30 15:31:08 -0700760 std::unique_lock _lock{mMutex};
761 if (mDestroyed) {
762 return DEAD_OBJECT;
763 }
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000764 return mBbq->setFrameTimelineInfo(frameTimelineInfo);
Robert Carr9b611b72020-10-19 12:00:23 -0700765 }
Vishnu Nair95b6d512021-08-30 15:31:08 -0700766
767 void destroy() override {
768 Surface::destroy();
769
770 std::unique_lock _lock{mMutex};
771 mDestroyed = true;
772 mBbq = nullptr;
773 }
Robert Carr05086b22020-10-13 18:22:51 -0700774};
775
Robert Carr9c006e02020-10-14 13:41:57 -0700776// TODO: Can we coalesce this with frame updates? Need to confirm
777// no timing issues.
Marin Shalamanov46084422020-10-13 12:33:42 +0200778status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
779 bool shouldBeSeamless) {
Robert Carr9c006e02020-10-14 13:41:57 -0700780 std::unique_lock _lock{mMutex};
781 SurfaceComposerClient::Transaction t;
782
Marin Shalamanov46084422020-10-13 12:33:42 +0200783 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
Robert Carr9c006e02020-10-14 13:41:57 -0700784}
785
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000786status_t BLASTBufferQueue::setFrameTimelineInfo(const FrameTimelineInfo& frameTimelineInfo) {
Robert Carr9b611b72020-10-19 12:00:23 -0700787 std::unique_lock _lock{mMutex};
Siarhei Vishniakoufc434ac2021-01-13 10:28:00 -1000788 mNextFrameTimelineInfoQueue.push(frameTimelineInfo);
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100789 return OK;
Robert Carr9b611b72020-10-19 12:00:23 -0700790}
791
Hongguang Chen621ec582021-02-16 15:42:35 -0800792void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
793 std::unique_lock _lock{mMutex};
794 SurfaceComposerClient::Transaction t;
795
796 t.setSidebandStream(mSurfaceControl, stream).apply();
797}
798
Vishnu Nair992496b2020-10-22 17:27:21 -0700799sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
800 std::unique_lock _lock{mMutex};
801 sp<IBinder> scHandle = nullptr;
802 if (includeSurfaceControlHandle && mSurfaceControl) {
803 scHandle = mSurfaceControl->getHandle();
804 }
805 return new BBQSurface(mProducer, true, scHandle, this);
Robert Carr05086b22020-10-13 18:22:51 -0700806}
807
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800808void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
809 uint64_t frameNumber) {
810 std::lock_guard _lock{mMutex};
811 if (mLastAcquiredFrameNumber >= frameNumber) {
812 // Apply the transaction since we have already acquired the desired frame.
813 t->apply();
814 } else {
chaviwaad6cf52021-03-23 17:27:20 -0500815 mPendingTransactions.emplace_back(frameNumber, *t);
816 // Clear the transaction so it can't be applied elsewhere.
817 t->clear();
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800818 }
819}
820
chaviw6a195272021-09-03 16:14:25 -0500821void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
822 std::lock_guard _lock{mMutex};
823
824 SurfaceComposerClient::Transaction t;
825 mergePendingTransactions(&t, frameNumber);
826 t.setApplyToken(mApplyToken).apply();
827}
828
829void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
830 uint64_t frameNumber) {
831 auto mergeTransaction =
832 [&t, currentFrameNumber = frameNumber](
833 std::tuple<uint64_t, SurfaceComposerClient::Transaction> pendingTransaction) {
834 auto& [targetFrameNumber, transaction] = pendingTransaction;
835 if (currentFrameNumber < targetFrameNumber) {
836 return false;
837 }
838 t->merge(std::move(transaction));
839 return true;
840 };
841
842 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
843 mPendingTransactions.end(), mergeTransaction),
844 mPendingTransactions.end());
845}
846
Vishnu Nair89496122020-12-14 17:14:53 -0800847// Maintains a single worker thread per process that services a list of runnables.
848class AsyncWorker : public Singleton<AsyncWorker> {
849private:
850 std::thread mThread;
851 bool mDone = false;
852 std::deque<std::function<void()>> mRunnables;
853 std::mutex mMutex;
854 std::condition_variable mCv;
855 void run() {
856 std::unique_lock<std::mutex> lock(mMutex);
857 while (!mDone) {
Vishnu Nair89496122020-12-14 17:14:53 -0800858 while (!mRunnables.empty()) {
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700859 std::deque<std::function<void()>> runnables = std::move(mRunnables);
860 mRunnables.clear();
861 lock.unlock();
862 // Run outside the lock since the runnable might trigger another
863 // post to the async worker.
864 execute(runnables);
865 lock.lock();
Vishnu Nair89496122020-12-14 17:14:53 -0800866 }
Wonsik Kim567533e2021-05-04 19:31:29 -0700867 mCv.wait(lock);
Vishnu Nair89496122020-12-14 17:14:53 -0800868 }
869 }
870
Vishnu Nair51e4dc82021-10-01 15:32:33 -0700871 void execute(std::deque<std::function<void()>>& runnables) {
872 while (!runnables.empty()) {
873 std::function<void()> runnable = runnables.front();
874 runnables.pop_front();
875 runnable();
876 }
877 }
878
Vishnu Nair89496122020-12-14 17:14:53 -0800879public:
880 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
881
882 ~AsyncWorker() {
883 mDone = true;
884 mCv.notify_all();
885 if (mThread.joinable()) {
886 mThread.join();
887 }
888 }
889
890 void post(std::function<void()> runnable) {
891 std::unique_lock<std::mutex> lock(mMutex);
892 mRunnables.emplace_back(std::move(runnable));
893 mCv.notify_one();
894 }
895};
896ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
897
898// Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
899class AsyncProducerListener : public BnProducerListener {
900private:
901 const sp<IProducerListener> mListener;
902
903public:
904 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
905
906 void onBufferReleased() override {
907 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
908 }
909
910 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
911 AsyncWorker::getInstance().post(
912 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
913 }
914};
915
916// Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
917// can be non-blocking when the producer is in the client process.
918class BBQBufferQueueProducer : public BufferQueueProducer {
919public:
920 BBQBufferQueueProducer(const sp<BufferQueueCore>& core)
921 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/) {}
922
923 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
924 QueueBufferOutput* output) override {
925 if (!listener) {
926 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
927 }
928
929 return BufferQueueProducer::connect(new AsyncProducerListener(listener), api,
930 producerControlledByApp, output);
931 }
Vishnu Nair17dde612020-12-28 11:39:59 -0800932
933 int query(int what, int* value) override {
934 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
935 *value = 1;
936 return NO_ERROR;
937 }
938 return BufferQueueProducer::query(what, value);
939 }
Vishnu Nair89496122020-12-14 17:14:53 -0800940};
941
942// Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
943// This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
944// emulating one way binder call behavior. Without this, if the listener calls back into the queue,
945// we can deadlock.
946void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
947 sp<IGraphicBufferConsumer>* outConsumer) {
948 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
949 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
950
951 sp<BufferQueueCore> core(new BufferQueueCore());
952 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
953
954 sp<IGraphicBufferProducer> producer(new BBQBufferQueueProducer(core));
955 LOG_ALWAYS_FATAL_IF(producer == nullptr,
956 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
957
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800958 sp<BufferQueueConsumer> consumer(new BufferQueueConsumer(core));
959 consumer->setAllowExtraAcquire(true);
Vishnu Nair89496122020-12-14 17:14:53 -0800960 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
961 "BLASTBufferQueue: failed to create BufferQueueConsumer");
962
963 *outProducer = producer;
964 *outConsumer = consumer;
965}
966
chaviw497e81c2021-02-04 17:09:47 -0800967PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
968 PixelFormat convertedFormat = format;
969 switch (format) {
970 case PIXEL_FORMAT_TRANSPARENT:
971 case PIXEL_FORMAT_TRANSLUCENT:
972 convertedFormat = PIXEL_FORMAT_RGBA_8888;
973 break;
974 case PIXEL_FORMAT_OPAQUE:
975 convertedFormat = PIXEL_FORMAT_RGBX_8888;
976 break;
977 }
978 return convertedFormat;
979}
980
Robert Carr82d07c92021-05-10 11:36:43 -0700981uint32_t BLASTBufferQueue::getLastTransformHint() const {
982 if (mSurfaceControl != nullptr) {
983 return mSurfaceControl->getTransformHint();
984 } else {
985 return 0;
986 }
987}
988
chaviw0b020f82021-08-20 12:00:47 -0500989uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
990 std::unique_lock _lock{mMutex};
991 return mLastAcquiredFrameNumber;
992}
993
Vishnu Nair9051fb12021-11-05 16:21:06 -0700994// When the producer disconnects, all buffers in the queue will be freed. So clean up the bbq
995// acquire state and handle any pending release callbacks. If we do get a release callback for a
996// pending buffer for a disconnected queue, we cannot release the buffer back to the queue. So track
997// these separately and drop the release callbacks as they come.
998
999// Transaction callbacks are still expected to come in the order they were submitted regardless of
1000// buffer queue state. So we can continue to handles the pending transactions and transaction
1001// complete callbacks. When the queue is reconnected, the queue will increment the framenumbers
1002// starting from the last queued framenumber.
1003void BLASTBufferQueue::onProducerDisconnect() {
1004 BQA_LOGV("onProducerDisconnect");
1005 std::scoped_lock _lock{mMutex};
1006 // reset counts since the queue has been disconnected and all buffers have been freed.
1007 mNumFrameAvailable = 0;
1008 mNumAcquired = 0;
1009
1010 // Track submitted buffers in a different container so we can handle any pending release buffer
1011 // callbacks without affecting the BBQ acquire state.
1012 mFreedBuffers.insert(mSubmitted.begin(), mSubmitted.end());
1013 mSubmitted.clear();
1014 mPendingRelease.clear();
1015 mProducerDisconnectCount++;
1016 mCallbackCV.notify_all();
1017 mLogMissingReleaseCallback = true;
1018 mLogScSwap = true;
1019}
1020
Robert Carr78c25dd2019-08-15 14:10:33 -07001021} // namespace android