blob: fdc39ed7659adf6f24b2f68ee2a5063a0480e4d1 [file] [log] [blame]
Robert Carr78c25dd2019-08-15 14:10:33 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Valerie Haud3b90d22019-11-06 09:37:31 -080017#undef LOG_TAG
18#define LOG_TAG "BLASTBufferQueue"
19
Valerie Haua32c5522019-12-09 10:11:08 -080020#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Vishnu Naire1a42322020-10-02 17:42:04 -070021//#define LOG_NDEBUG 0
Valerie Haua32c5522019-12-09 10:11:08 -080022
Jim Shargod30823a2024-07-27 02:49:39 +000023#include <com_android_graphics_libgui_flags.h>
liulijuneb489f62022-10-17 22:02:14 +080024#include <cutils/atomic.h>
Patrick Williams7c9fa272024-08-30 12:38:43 +000025#include <ftl/fake_guard.h>
Robert Carr78c25dd2019-08-15 14:10:33 -070026#include <gui/BLASTBufferQueue.h>
27#include <gui/BufferItemConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080028#include <gui/BufferQueueConsumer.h>
29#include <gui/BufferQueueCore.h>
30#include <gui/BufferQueueProducer.h>
Patrick Williams7c9fa272024-08-30 12:38:43 +000031#include <sys/epoll.h>
32#include <sys/eventfd.h>
Ady Abraham107788e2023-10-17 12:31:08 -070033
Ady Abraham6cdd3fd2023-09-07 18:45:58 -070034#include <gui/FrameRateUtils.h>
Valerie Hau45e4b3b2019-12-03 10:49:17 -080035#include <gui/GLConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080036#include <gui/IProducerListener.h>
Robert Carr05086b22020-10-13 18:22:51 -070037#include <gui/Surface.h>
chaviw57ae4b22022-02-03 16:51:39 -060038#include <gui/TraceUtils.h>
Vishnu Nair89496122020-12-14 17:14:53 -080039#include <utils/Singleton.h>
Valerie Haua32c5522019-12-09 10:11:08 -080040#include <utils/Trace.h>
41
Ady Abraham0bde6b52021-05-18 13:57:02 -070042#include <private/gui/ComposerService.h>
Huihong Luo02186fb2022-02-23 14:21:54 -080043#include <private/gui/ComposerServiceAIDL.h>
Ady Abraham0bde6b52021-05-18 13:57:02 -070044
Chavi Weingartene0237bb2023-02-06 21:48:32 +000045#include <android-base/thread_annotations.h>
Robert Carr78c25dd2019-08-15 14:10:33 -070046
Alec Mouri21d94322023-10-17 19:51:39 +000047#include <com_android_graphics_libgui_flags.h>
48
Ady Abraham6cdd3fd2023-09-07 18:45:58 -070049using namespace com::android::graphics::libgui;
Robert Carr78c25dd2019-08-15 14:10:33 -070050using namespace std::chrono_literals;
51
Vishnu Nairdab94092020-09-29 16:09:04 -070052namespace {
Patrick Williams078d7362024-08-27 10:20:39 -050053
54#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
Patrick Williamsc16a4a52024-10-26 01:48:01 -050055template <class Mutex>
56class UnlockGuard {
Patrick Williams078d7362024-08-27 10:20:39 -050057public:
Patrick Williamsc16a4a52024-10-26 01:48:01 -050058 explicit UnlockGuard(Mutex& lock) : mLock{lock} { mLock.unlock(); }
Patrick Williams078d7362024-08-27 10:20:39 -050059
Patrick Williamsc16a4a52024-10-26 01:48:01 -050060 ~UnlockGuard() { mLock.lock(); }
Patrick Williams078d7362024-08-27 10:20:39 -050061
Patrick Williamsc16a4a52024-10-26 01:48:01 -050062 UnlockGuard(const UnlockGuard&) = delete;
63 UnlockGuard& operator=(const UnlockGuard&) = delete;
Patrick Williams078d7362024-08-27 10:20:39 -050064
65private:
Patrick Williamsc16a4a52024-10-26 01:48:01 -050066 Mutex& mLock;
Patrick Williams078d7362024-08-27 10:20:39 -050067};
68#endif
69
chaviw3277faf2021-05-19 16:45:23 -050070inline const char* boolToString(bool b) {
Vishnu Nairdab94092020-09-29 16:09:04 -070071 return b ? "true" : "false";
72}
Patrick Williams078d7362024-08-27 10:20:39 -050073
Vishnu Nairdab94092020-09-29 16:09:04 -070074} // namespace
75
Robert Carr78c25dd2019-08-15 14:10:33 -070076namespace android {
77
Vishnu Nairdab94092020-09-29 16:09:04 -070078// Macros to include adapter info in log messages
chaviwd7deef72021-10-06 11:53:40 -050079#define BQA_LOGD(x, ...) \
80 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070081#define BQA_LOGV(x, ...) \
82 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairc6f89ee2020-12-11 14:27:32 -080083// enable logs for a single layer
84//#define BQA_LOGV(x, ...) \
85// ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
86// mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070087#define BQA_LOGE(x, ...) \
88 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
89
chaviw57ae4b22022-02-03 16:51:39 -060090#define BBQ_TRACE(x, ...) \
91 ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
92 mNumAcquired, ##__VA_ARGS__)
93
Chavi Weingartene0237bb2023-02-06 21:48:32 +000094#define UNIQUE_LOCK_WITH_ASSERTION(mutex) \
95 std::unique_lock _lock{mutex}; \
96 base::ScopedLockAssertion assumeLocked(mutex);
97
Valerie Hau871d6352020-01-29 08:44:02 -080098void BLASTBufferItemConsumer::onDisconnect() {
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000099 Mutex::Autolock lock(mMutex);
100 mPreviouslyConnected = mCurrentlyConnected;
101 mCurrentlyConnected = false;
102 if (mPreviouslyConnected) {
103 mDisconnectEvents.push(mCurrentFrameNumber);
Valerie Hau871d6352020-01-29 08:44:02 -0800104 }
Jiakai Zhangc33c63a2021-11-09 11:24:04 +0000105 mFrameEventHistory.onDisconnect();
Valerie Hau871d6352020-01-29 08:44:02 -0800106}
107
108void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
109 FrameEventHistoryDelta* outDelta) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800110 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800111 if (newTimestamps) {
112 // BufferQueueProducer only adds a new timestamp on
113 // queueBuffer
114 mCurrentFrameNumber = newTimestamps->frameNumber;
115 mFrameEventHistory.addQueue(*newTimestamps);
116 }
117 if (outDelta) {
118 // frame event histories will be processed
119 // only after the producer connects and requests
120 // deltas for the first time. Forward this intent
121 // to SF-side to turn event processing back on
122 mPreviouslyConnected = mCurrentlyConnected;
123 mCurrentlyConnected = true;
124 mFrameEventHistory.getAndResetDelta(outDelta);
125 }
126}
127
Alec Mouri21d94322023-10-17 19:51:39 +0000128void BLASTBufferItemConsumer::updateFrameTimestamps(
129 uint64_t frameNumber, uint64_t previousFrameNumber, nsecs_t refreshStartTime,
130 const sp<Fence>& glDoneFence, const sp<Fence>& presentFence,
131 const sp<Fence>& prevReleaseFence, CompositorTiming compositorTiming, nsecs_t latchTime,
132 nsecs_t dequeueReadyTime) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800133 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800134
135 // if the producer is not connected, don't bother updating,
136 // the next producer that connects won't access this frame event
137 if (!mCurrentlyConnected) return;
138 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
139 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
140 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
141
142 mFrameEventHistory.addLatch(frameNumber, latchTime);
Alec Mouri21d94322023-10-17 19:51:39 +0000143 if (flags::frametimestamps_previousrelease()) {
144 if (previousFrameNumber > 0) {
145 mFrameEventHistory.addRelease(previousFrameNumber, dequeueReadyTime,
146 std::move(releaseFenceTime));
147 }
148 } else {
149 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
150 }
151
Valerie Hau871d6352020-01-29 08:44:02 -0800152 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
153 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
154 compositorTiming);
155}
156
157void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
158 bool disconnect = false;
Hongguang Chen621ec582021-02-16 15:42:35 -0800159 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800160 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
161 disconnect = true;
162 mDisconnectEvents.pop();
163 }
164 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
165}
166
Hongguang Chen621ec582021-02-16 15:42:35 -0800167void BLASTBufferItemConsumer::onSidebandStreamChanged() {
Ady Abrahamdbca1352021-12-15 11:58:56 -0800168 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
169 if (bbq != nullptr) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800170 sp<NativeHandle> stream = getSidebandStream();
Ady Abrahamdbca1352021-12-15 11:58:56 -0800171 bbq->setSidebandStream(stream);
Hongguang Chen621ec582021-02-16 15:42:35 -0800172 }
173}
174
Ady Abraham107788e2023-10-17 12:31:08 -0700175#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_SETFRAMERATE)
Ady Abraham6cdd3fd2023-09-07 18:45:58 -0700176void BLASTBufferItemConsumer::onSetFrameRate(float frameRate, int8_t compatibility,
177 int8_t changeFrameRateStrategy) {
178 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
179 if (bbq != nullptr) {
180 bbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
181 }
182}
183#endif
184
Brian Lindahlc794b692023-01-31 15:42:47 -0700185void BLASTBufferItemConsumer::resizeFrameEventHistory(size_t newSize) {
186 Mutex::Autolock lock(mMutex);
187 mFrameEventHistory.resize(newSize);
188}
189
Vishnu Naird2aaab12022-02-10 14:49:09 -0800190BLASTBufferQueue::BLASTBufferQueue(const std::string& name, bool updateDestinationFrame)
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800191 : mSurfaceControl(nullptr),
192 mSize(1, 1),
Vishnu Nairea0de002020-11-17 17:42:37 -0800193 mRequestedSize(mSize),
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800194 mFormat(PIXEL_FORMAT_RGBA_8888),
Tianhao Yao4861b102022-02-03 20:18:35 +0000195 mTransactionReadyCallback(nullptr),
Vishnu Naird2aaab12022-02-10 14:49:09 -0800196 mSyncTransaction(nullptr),
197 mUpdateDestinationFrame(updateDestinationFrame) {
Vishnu Nair89496122020-12-14 17:14:53 -0800198 createBufferQueue(&mProducer, &mConsumer);
Jim Shargod30823a2024-07-27 02:49:39 +0000199#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
200 mBufferItemConsumer = new BLASTBufferItemConsumer(mProducer, mConsumer,
201 GraphicBuffer::USAGE_HW_COMPOSER |
202 GraphicBuffer::USAGE_HW_TEXTURE,
203 1, false, this);
204#else
Vishnu Nair1618c672021-02-05 13:08:26 -0800205 mBufferItemConsumer = new BLASTBufferItemConsumer(mConsumer,
206 GraphicBuffer::USAGE_HW_COMPOSER |
207 GraphicBuffer::USAGE_HW_TEXTURE,
Ady Abrahamdbca1352021-12-15 11:58:56 -0800208 1, false, this);
Jim Shargod30823a2024-07-27 02:49:39 +0000209#endif // COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(WB_CONSUMER_BASE_OWNS_BQ)
210 // since the adapter is in the client process, set dequeue timeout
211 // explicitly so that dequeueBuffer will block
212 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
213
liulijuneb489f62022-10-17 22:02:14 +0800214 static std::atomic<uint32_t> nextId = 0;
215 mProducerId = nextId++;
216 mName = name + "#" + std::to_string(mProducerId);
217 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(mProducerId);
218 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(mProducerId);
Vishnu Nairdab94092020-09-29 16:09:04 -0700219 mBufferItemConsumer->setName(String8(consumerName.c_str()));
Robert Carr78c25dd2019-08-15 14:10:33 -0700220 mBufferItemConsumer->setFrameAvailableListener(this);
Robert Carr9f133d72020-04-01 15:51:46 -0700221
Huihong Luo02186fb2022-02-23 14:21:54 -0800222 ComposerServiceAIDL::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
Ady Abraham0bde6b52021-05-18 13:57:02 -0700223 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
chaviw69058fb2021-09-27 09:37:30 -0500224 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
Valerie Haua32c5522019-12-09 10:11:08 -0800225 mNumAcquired = 0;
226 mNumFrameAvailable = 0;
Robert Carr4c1b6462021-12-21 10:30:50 -0800227
228 TransactionCompletedListener::getInstance()->addQueueStallListener(
Patrick Williamsf1e5df12022-10-17 21:37:42 +0000229 [&](const std::string& reason) {
230 std::function<void(const std::string&)> callbackCopy;
231 {
232 std::unique_lock _lock{mMutex};
233 callbackCopy = mTransactionHangCallback;
234 }
235 if (callbackCopy) callbackCopy(reason);
236 },
237 this);
Robert Carr4c1b6462021-12-21 10:30:50 -0800238
Patrick Williams7c9fa272024-08-30 12:38:43 +0000239#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
Patrick Williams078d7362024-08-27 10:20:39 -0500240 gui::BufferReleaseChannel::open(mName, mBufferReleaseConsumer, mBufferReleaseProducer);
241 mBufferReleaseReader.emplace(*this);
Patrick Williams7c9fa272024-08-30 12:38:43 +0000242#endif
243
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800244 BQA_LOGV("BLASTBufferQueue created");
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800245}
246
247BLASTBufferQueue::BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface,
248 int width, int height, int32_t format)
249 : BLASTBufferQueue(name) {
250 update(surface, width, height, format);
Robert Carr78c25dd2019-08-15 14:10:33 -0700251}
252
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800253BLASTBufferQueue::~BLASTBufferQueue() {
Robert Carr4c1b6462021-12-21 10:30:50 -0800254 TransactionCompletedListener::getInstance()->removeQueueStallListener(this);
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800255 if (mPendingTransactions.empty()) {
256 return;
257 }
258 BQA_LOGE("Applying pending transactions on dtor %d",
259 static_cast<uint32_t>(mPendingTransactions.size()));
260 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800261 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
Robert Carr79dc06a2022-02-22 15:28:59 -0800262 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
263 t.setApplyToken(mApplyToken).apply(false, true);
chaviw3b4bdcf2022-03-17 09:27:03 -0500264
265 if (mTransactionReadyCallback) {
266 mTransactionReadyCallback(mSyncTransaction);
267 }
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800268}
269
Patrick Williamsf5b42de2024-08-01 16:08:51 -0500270void BLASTBufferQueue::onFirstRef() {
271 // safe default, most producers are expected to override this
272 mProducer->setMaxDequeuedBufferCount(2);
273}
274
chaviw565ee542021-01-14 10:21:23 -0800275void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
Vishnu Naird2aaab12022-02-10 14:49:09 -0800276 int32_t format) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800277 LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
278
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000279 std::lock_guard _lock{mMutex};
chaviw565ee542021-01-14 10:21:23 -0800280 if (mFormat != format) {
281 mFormat = format;
chaviw497e81c2021-02-04 17:09:47 -0800282 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
chaviw565ee542021-01-14 10:21:23 -0800283 }
284
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800285 const bool surfaceControlChanged = !SurfaceControl::isSameSurface(mSurfaceControl, surface);
Vishnu Nairab066512022-01-04 22:28:00 +0000286 if (surfaceControlChanged && mSurfaceControl != nullptr) {
287 BQA_LOGD("Updating SurfaceControl without recreating BBQ");
288 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800289 bool applyTransaction = false;
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800290
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700291 // Always update the native object even though they might have the same layer handle, so we can
292 // get the updated transform hint from WM.
293 mSurfaceControl = surface;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800294 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800295 if (surfaceControlChanged) {
Patrick Williamsc16a4a52024-10-26 01:48:01 -0500296#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
297 // SELinux policy may prevent this process from sending the BufferReleaseChannel's file
298 // descriptor to SurfaceFlinger, causing the entire transaction to be dropped. This
299 // transaction is applied separately to ensure we don't lose the other updates.
300 t.setApplyToken(mApplyToken)
301 .setBufferReleaseChannel(mSurfaceControl, mBufferReleaseProducer)
302 .apply(false /* synchronous */, true /* oneWay */);
303#endif
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800304 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
305 layer_state_t::eEnableBackpressure);
306 applyTransaction = true;
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800307 }
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800308 mTransformHint = mSurfaceControl->getTransformHint();
309 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700310 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
311 mTransformHint);
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800312
Vishnu Nairea0de002020-11-17 17:42:37 -0800313 ui::Size newSize(width, height);
314 if (mRequestedSize != newSize) {
315 mRequestedSize.set(newSize);
316 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000317 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Vishnu Nair53c936c2020-12-03 11:46:37 -0800318 // If the buffer supports scaling, update the frame immediately since the client may
319 // want to scale the existing buffer to the new size.
320 mSize = mRequestedSize;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800321 if (mUpdateDestinationFrame) {
322 t.setDestinationFrame(mSurfaceControl, Rect(newSize));
323 applyTransaction = true;
324 }
Vishnu Nair53c936c2020-12-03 11:46:37 -0800325 }
Robert Carrfc416512020-04-02 12:32:44 -0700326 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800327 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800328 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
Patrick Williamsc16a4a52024-10-26 01:48:01 -0500329 t.setApplyToken(mApplyToken).apply(false /* synchronous */, true /* oneWay */);
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800330 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700331}
332
chaviwd7deef72021-10-06 11:53:40 -0500333static std::optional<SurfaceControlStats> findMatchingStat(
334 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
335 for (auto stat : stats) {
336 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
337 return stat;
338 }
339 }
340 return std::nullopt;
341}
342
Patrick Williams5312ec12024-08-23 16:11:10 -0500343TransactionCompletedCallbackTakesContext BLASTBufferQueue::makeTransactionCommittedCallbackThunk() {
344 return [bbq = sp<BLASTBufferQueue>::fromExisting(
345 this)](void* /*context*/, nsecs_t latchTime, const sp<Fence>& presentFence,
346 const std::vector<SurfaceControlStats>& stats) {
347 bbq->transactionCommittedCallback(latchTime, presentFence, stats);
348 };
chaviwd7deef72021-10-06 11:53:40 -0500349}
350
351void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
352 const sp<Fence>& /*presentFence*/,
353 const std::vector<SurfaceControlStats>& stats) {
354 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000355 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600356 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500357 BQA_LOGV("transactionCommittedCallback");
358 if (!mSurfaceControlsWithPendingCallback.empty()) {
359 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
360 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
361 if (stat) {
362 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
363
364 // We need to check if we were waiting for a transaction callback in order to
365 // process any pending buffers and unblock. It's possible to get transaction
chaviwc1cf4022022-06-03 13:32:33 -0500366 // callbacks for previous requests so we need to ensure that there are no pending
367 // frame numbers that were in a sync. We remove the frame from mSyncedFrameNumbers
368 // set and then check if it's empty. If there are no more pending syncs, we can
369 // proceed with flushing the shadow queue.
chaviwc1cf4022022-06-03 13:32:33 -0500370 mSyncedFrameNumbers.erase(currFrameNumber);
Chavi Weingartend48797b2023-08-04 13:11:39 +0000371 if (mSyncedFrameNumbers.empty()) {
chaviwd7deef72021-10-06 11:53:40 -0500372 flushShadowQueue();
373 }
374 } else {
chaviw768bfa02021-11-01 09:50:57 -0500375 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
chaviwd7deef72021-10-06 11:53:40 -0500376 }
377 } else {
378 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
379 "empty.");
380 }
chaviwd7deef72021-10-06 11:53:40 -0500381 }
382}
383
Patrick Williams5312ec12024-08-23 16:11:10 -0500384TransactionCompletedCallbackTakesContext BLASTBufferQueue::makeTransactionCallbackThunk() {
385 return [bbq = sp<BLASTBufferQueue>::fromExisting(
386 this)](void* /*context*/, nsecs_t latchTime, const sp<Fence>& presentFence,
387 const std::vector<SurfaceControlStats>& stats) {
388 bbq->transactionCallback(latchTime, presentFence, stats);
389 };
Robert Carr78c25dd2019-08-15 14:10:33 -0700390}
391
392void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
393 const std::vector<SurfaceControlStats>& stats) {
chaviw71c2cc42020-10-23 16:42:02 -0700394 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000395 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600396 BBQ_TRACE();
chaviw71c2cc42020-10-23 16:42:02 -0700397 BQA_LOGV("transactionCallback");
chaviw71c2cc42020-10-23 16:42:02 -0700398
chaviw42026162021-04-16 15:46:12 -0500399 if (!mSurfaceControlsWithPendingCallback.empty()) {
400 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
401 mSurfaceControlsWithPendingCallback.pop();
chaviwd7deef72021-10-06 11:53:40 -0500402 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
403 if (statsOptional) {
404 SurfaceControlStats stat = *statsOptional;
Vishnu Nair71fcf912022-10-18 09:14:20 -0700405 if (stat.transformHint) {
406 mTransformHint = *stat.transformHint;
407 mBufferItemConsumer->setTransformHint(mTransformHint);
408 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
409 }
Vishnu Nairde66dc72021-06-17 17:54:41 -0700410 // Update frametime stamps if the frame was latched and presented, indicated by a
411 // valid latch time.
412 if (stat.latchTime > 0) {
413 mBufferItemConsumer
414 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
Alec Mouri21d94322023-10-17 19:51:39 +0000415 stat.frameEventStats.previousFrameNumber,
Vishnu Nairde66dc72021-06-17 17:54:41 -0700416 stat.frameEventStats.refreshStartTime,
417 stat.frameEventStats.gpuCompositionDoneFence,
418 stat.presentFence, stat.previousReleaseFence,
419 stat.frameEventStats.compositorTiming,
420 stat.latchTime,
421 stat.frameEventStats.dequeueReadyTime);
422 }
Robert Carr405e2f62021-12-31 16:59:34 -0800423 auto currFrameNumber = stat.frameEventStats.frameNumber;
424 std::vector<ReleaseCallbackId> staleReleases;
425 for (const auto& [key, value]: mSubmitted) {
426 if (currFrameNumber > key.framenumber) {
427 staleReleases.push_back(key);
428 }
429 }
430 for (const auto& staleRelease : staleReleases) {
Robert Carr405e2f62021-12-31 16:59:34 -0800431 releaseBufferCallbackLocked(staleRelease,
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700432 stat.previousReleaseFence
433 ? stat.previousReleaseFence
434 : Fence::NO_FENCE,
435 stat.currentMaxAcquiredBufferCount,
436 true /* fakeRelease */);
Robert Carr405e2f62021-12-31 16:59:34 -0800437 }
chaviwd7deef72021-10-06 11:53:40 -0500438 } else {
chaviw768bfa02021-11-01 09:50:57 -0500439 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
chaviw42026162021-04-16 15:46:12 -0500440 }
441 } else {
442 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
443 "empty.");
Valerie Haua32c5522019-12-09 10:11:08 -0800444 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700445 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700446}
447
Patrick Williams7c9fa272024-08-30 12:38:43 +0000448void BLASTBufferQueue::flushShadowQueue() {
449 BQA_LOGV("flushShadowQueue");
450 int numFramesToFlush = mNumFrameAvailable;
451 while (numFramesToFlush > 0) {
452 acquireNextBufferLocked(std::nullopt);
453 numFramesToFlush--;
454 }
455}
456
Vishnu Nair1506b182021-02-22 14:35:15 -0800457// Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
458// BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
459// So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
460// Otherwise, this is a no-op.
Patrick Williams5312ec12024-08-23 16:11:10 -0500461ReleaseBufferCallback BLASTBufferQueue::makeReleaseBufferCallbackThunk() {
462 return [weakBbq = wp<BLASTBufferQueue>::fromExisting(
463 this)](const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
464 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
465 sp<BLASTBufferQueue> bbq = weakBbq.promote();
466 if (!bbq) {
467 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
468 return;
469 }
470 bbq->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
Patrick Williamsc16a4a52024-10-26 01:48:01 -0500471#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
472 bbq->drainBufferReleaseConsumer();
473#endif
Patrick Williams5312ec12024-08-23 16:11:10 -0500474 };
Vishnu Nair1506b182021-02-22 14:35:15 -0800475}
476
chaviw69058fb2021-09-27 09:37:30 -0500477void BLASTBufferQueue::releaseBufferCallback(
478 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
479 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000480 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600481 BBQ_TRACE();
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700482 releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
483 false /* fakeRelease */);
Robert Carr405e2f62021-12-31 16:59:34 -0800484}
485
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700486void BLASTBufferQueue::releaseBufferCallbackLocked(
487 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
488 std::optional<uint32_t> currentMaxAcquiredBufferCount, bool fakeRelease) {
Robert Carr405e2f62021-12-31 16:59:34 -0800489 ATRACE_CALL();
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700490 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800491
Ady Abraham899dcdb2021-06-15 16:56:21 -0700492 // Calculate how many buffers we need to hold before we release them back
493 // to the buffer queue. This will prevent higher latency when we are running
494 // on a lower refresh rate than the max supported. We only do that for EGL
495 // clients as others don't care about latency
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000496 const auto it = mSubmitted.find(id);
497 const bool isEGL = it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
Ady Abraham899dcdb2021-06-15 16:56:21 -0700498
chaviw69058fb2021-09-27 09:37:30 -0500499 if (currentMaxAcquiredBufferCount) {
500 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
501 }
502
liulijunf90df632022-11-14 14:24:48 +0800503 const uint32_t numPendingBuffersToHold =
504 isEGL ? std::max(0, mMaxAcquiredBuffers - (int32_t)mCurrentMaxAcquiredBufferCount) : 0;
Robert Carr405e2f62021-12-31 16:59:34 -0800505
506 auto rb = ReleasedBuffer{id, releaseFence};
507 if (std::find(mPendingRelease.begin(), mPendingRelease.end(), rb) == mPendingRelease.end()) {
508 mPendingRelease.emplace_back(rb);
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700509 if (fakeRelease) {
510 BQA_LOGE("Faking releaseBufferCallback from transactionCompleteCallback %" PRIu64,
511 id.framenumber);
512 BBQ_TRACE("FakeReleaseCallback");
513 }
Robert Carr405e2f62021-12-31 16:59:34 -0800514 }
Ady Abraham899dcdb2021-06-15 16:56:21 -0700515
516 // Release all buffers that are beyond the ones that we need to hold
517 while (mPendingRelease.size() > numPendingBuffersToHold) {
chaviw0acd33a2021-11-02 11:55:37 -0500518 const auto releasedBuffer = mPendingRelease.front();
Ady Abraham899dcdb2021-06-15 16:56:21 -0700519 mPendingRelease.pop_front();
chaviw0acd33a2021-11-02 11:55:37 -0500520 releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
chaviwc1cf4022022-06-03 13:32:33 -0500521 // Don't process the transactions here if mSyncedFrameNumbers is not empty. That means
522 // are still transactions that have sync buffers in them that have not been applied or
523 // dropped. Instead, let onFrameAvailable handle processing them since it will merge with
524 // the syncTransaction.
525 if (mSyncedFrameNumbers.empty()) {
chaviwd7deef72021-10-06 11:53:40 -0500526 acquireNextBufferLocked(std::nullopt);
527 }
Vishnu Nair1506b182021-02-22 14:35:15 -0800528 }
529
Ady Abraham899dcdb2021-06-15 16:56:21 -0700530 ATRACE_INT("PendingRelease", mPendingRelease.size());
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700531 ATRACE_INT(mQueuedBufferTrace.c_str(),
532 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800533 mCallbackCV.notify_all();
534}
535
chaviw0acd33a2021-11-02 11:55:37 -0500536void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
537 const sp<Fence>& releaseFence) {
538 auto it = mSubmitted.find(callbackId);
539 if (it == mSubmitted.end()) {
chaviw0acd33a2021-11-02 11:55:37 -0500540 return;
541 }
542 mNumAcquired--;
chaviw57ae4b22022-02-03 16:51:39 -0600543 BBQ_TRACE("frame=%" PRIu64, callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500544 BQA_LOGV("released %s", callbackId.to_string().c_str());
545 mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
546 mSubmitted.erase(it);
chaviwc1cf4022022-06-03 13:32:33 -0500547 // Remove the frame number from mSyncedFrameNumbers since we can get a release callback
548 // without getting a transaction committed if the buffer was dropped.
549 mSyncedFrameNumbers.erase(callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500550}
551
Chavi Weingarten70670e62023-02-22 17:36:40 +0000552static ui::Size getBufferSize(const BufferItem& item) {
553 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
554 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
555
556 // Take the buffer's orientation into account
557 if (item.mTransform & ui::Transform::ROT_90) {
558 std::swap(bufWidth, bufHeight);
559 }
560 return ui::Size(bufWidth, bufHeight);
561}
562
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000563status_t BLASTBufferQueue::acquireNextBufferLocked(
chaviwd7deef72021-10-06 11:53:40 -0500564 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800565 // Check if we have frames available and we have not acquired the maximum number of buffers.
566 // Even with this check, the consumer can fail to acquire an additional buffer if the consumer
567 // has already acquired (mMaxAcquiredBuffers + 1) and the new buffer is not droppable. In this
568 // case mBufferItemConsumer->acquireBuffer will return with NO_BUFFER_AVAILABLE.
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000569 if (mNumFrameAvailable == 0) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800570 BQA_LOGV("Can't acquire next buffer. No available frames");
571 return BufferQueue::NO_BUFFER_AVAILABLE;
572 }
573
574 if (mNumAcquired >= (mMaxAcquiredBuffers + 2)) {
575 BQA_LOGV("Can't acquire next buffer. Already acquired max frames %d max:%d + 2",
576 mNumAcquired, mMaxAcquiredBuffers);
577 return BufferQueue::NO_BUFFER_AVAILABLE;
Valerie Haud3b90d22019-11-06 09:37:31 -0800578 }
579
Valerie Haua32c5522019-12-09 10:11:08 -0800580 if (mSurfaceControl == nullptr) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700581 BQA_LOGE("ERROR : surface control is null");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000582 return NAME_NOT_FOUND;
Valerie Haud3b90d22019-11-06 09:37:31 -0800583 }
584
Robert Carr78c25dd2019-08-15 14:10:33 -0700585 SurfaceComposerClient::Transaction localTransaction;
586 bool applyTransaction = true;
587 SurfaceComposerClient::Transaction* t = &localTransaction;
chaviwd7deef72021-10-06 11:53:40 -0500588 if (transaction) {
589 t = *transaction;
Robert Carr78c25dd2019-08-15 14:10:33 -0700590 applyTransaction = false;
591 }
592
Patrick Williams3ced5382024-08-21 15:39:32 -0500593 BufferItem bufferItem;
Valerie Haud3b90d22019-11-06 09:37:31 -0800594
Vishnu Nairc6f89ee2020-12-11 14:27:32 -0800595 status_t status =
596 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800597 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
598 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000599 return status;
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800600 } else if (status != OK) {
Vishnu Nairbf255772020-10-16 10:54:41 -0700601 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000602 return status;
Robert Carr78c25dd2019-08-15 14:10:33 -0700603 }
chaviw57ae4b22022-02-03 16:51:39 -0600604
Valerie Haua32c5522019-12-09 10:11:08 -0800605 auto buffer = bufferItem.mGraphicBuffer;
606 mNumFrameAvailable--;
chaviw57ae4b22022-02-03 16:51:39 -0600607 BBQ_TRACE("frame=%" PRIu64, bufferItem.mFrameNumber);
Valerie Haua32c5522019-12-09 10:11:08 -0800608
609 if (buffer == nullptr) {
610 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Vishnu Nairbf255772020-10-16 10:54:41 -0700611 BQA_LOGE("Buffer was empty");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000612 return BAD_VALUE;
Valerie Haua32c5522019-12-09 10:11:08 -0800613 }
614
Vishnu Nair670b3f72020-09-29 17:52:18 -0700615 if (rejectBuffer(bufferItem)) {
Vishnu Naira4fbca52021-07-07 16:52:34 -0700616 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
Vishnu Nairea0de002020-11-17 17:42:37 -0800617 "buffer{size=%dx%d transform=%d}",
618 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
619 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
620 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000621 return acquireNextBufferLocked(transaction);
Vishnu Nair670b3f72020-09-29 17:52:18 -0700622 }
623
Valerie Haua32c5522019-12-09 10:11:08 -0800624 mNumAcquired++;
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700625 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
626 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
627 mSubmitted[releaseCallbackId] = bufferItem;
Robert Carr78c25dd2019-08-15 14:10:33 -0700628
Valerie Hau871d6352020-01-29 08:44:02 -0800629 bool needsDisconnect = false;
630 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
631
632 // if producer disconnected before, notify SurfaceFlinger
633 if (needsDisconnect) {
634 t->notifyProducerDisconnect(mSurfaceControl);
635 }
636
Chavi Weingarten70670e62023-02-22 17:36:40 +0000637 // Only update mSize for destination bounds if the incoming buffer matches the requested size.
638 // Otherwise, it could cause stretching since the destination bounds will update before the
639 // buffer with the new size is acquired.
Vishnu Nair5b5f6932023-04-12 16:28:19 -0700640 if (mRequestedSize == getBufferSize(bufferItem) ||
641 bufferItem.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Chavi Weingarten70670e62023-02-22 17:36:40 +0000642 mSize = mRequestedSize;
643 }
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700644 Rect crop = computeCrop(bufferItem);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000645 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
646 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700647 bufferItem.mScalingMode, crop);
Vishnu Nair53c936c2020-12-03 11:46:37 -0800648
Patrick Williams5312ec12024-08-23 16:11:10 -0500649 auto releaseBufferCallback = makeReleaseBufferCallbackThunk();
chaviwba4320c2021-09-15 15:20:53 -0500650 sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
Nergi Rahardi39f510f2024-05-23 15:16:54 +0900651
652 nsecs_t dequeueTime = -1;
653 {
654 std::lock_guard _lock{mTimestampMutex};
655 auto dequeueTimeIt = mDequeueTimestamps.find(buffer->getId());
656 if (dequeueTimeIt != mDequeueTimestamps.end()) {
657 dequeueTime = dequeueTimeIt->second;
658 mDequeueTimestamps.erase(dequeueTimeIt);
659 }
660 }
661
liulijuneb489f62022-10-17 22:02:14 +0800662 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, mProducerId,
Nergi Rahardi39f510f2024-05-23 15:16:54 +0900663 releaseBufferCallback, dequeueTime);
John Reck137069e2020-12-10 22:07:37 -0500664 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
665 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
666 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
Patrick Williams5312ec12024-08-23 16:11:10 -0500667 t->addTransactionCompletedCallback(makeTransactionCallbackThunk(), nullptr);
chaviwf2dace72021-11-17 17:36:50 -0600668
chaviw42026162021-04-16 15:46:12 -0500669 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
Robert Carr78c25dd2019-08-15 14:10:33 -0700670
Vishnu Naird2aaab12022-02-10 14:49:09 -0800671 if (mUpdateDestinationFrame) {
672 t->setDestinationFrame(mSurfaceControl, Rect(mSize));
673 } else {
674 const bool ignoreDestinationFrame =
675 bufferItem.mScalingMode == NATIVE_WINDOW_SCALING_MODE_FREEZE;
676 t->setFlags(mSurfaceControl,
677 ignoreDestinationFrame ? layer_state_t::eIgnoreDestinationFrame : 0,
678 layer_state_t::eIgnoreDestinationFrame);
Vishnu Nair084514a2021-07-30 16:07:42 -0700679 }
Vishnu Nair6bdec7d2021-05-10 15:01:13 -0700680 t->setBufferCrop(mSurfaceControl, crop);
Valerie Haua32c5522019-12-09 10:11:08 -0800681 t->setTransform(mSurfaceControl, bufferItem.mTransform);
Valerie Hau2882e982020-01-23 13:33:10 -0800682 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
Vishnu Naird2aaab12022-02-10 14:49:09 -0800683 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
Ady Abrahamf0c56492020-12-17 18:04:15 -0800684 if (!bufferItem.mIsAutoTimestamp) {
685 t->setDesiredPresentTime(bufferItem.mTimestamp);
686 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700687
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800688 // Drop stale frame timeline infos
689 while (!mPendingFrameTimelines.empty() &&
690 mPendingFrameTimelines.front().first < bufferItem.mFrameNumber) {
691 ATRACE_FORMAT_INSTANT("dropping stale frameNumber: %" PRIu64 " vsyncId: %" PRId64,
692 mPendingFrameTimelines.front().first,
693 mPendingFrameTimelines.front().second.vsyncId);
694 mPendingFrameTimelines.pop();
695 }
696
697 if (!mPendingFrameTimelines.empty() &&
698 mPendingFrameTimelines.front().first == bufferItem.mFrameNumber) {
699 ATRACE_FORMAT_INSTANT("Transaction::setFrameTimelineInfo frameNumber: %" PRIu64
700 " vsyncId: %" PRId64,
701 bufferItem.mFrameNumber,
702 mPendingFrameTimelines.front().second.vsyncId);
703 t->setFrameTimelineInfo(mPendingFrameTimelines.front().second);
704 mPendingFrameTimelines.pop();
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100705 }
706
chaviw6a195272021-09-03 16:14:25 -0500707 mergePendingTransactions(t, bufferItem.mFrameNumber);
Robert Carr78c25dd2019-08-15 14:10:33 -0700708 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800709 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
710 t->setApplyToken(mApplyToken).apply(false, true);
711 mAppliedLastTransaction = true;
712 mLastAppliedFrameNumber = bufferItem.mFrameNumber;
713 } else {
714 t->setBufferHasBarrier(mSurfaceControl, mLastAppliedFrameNumber);
715 mAppliedLastTransaction = false;
Robert Carr78c25dd2019-08-15 14:10:33 -0700716 }
Vishnu Nairdab94092020-09-29 16:09:04 -0700717
chaviwd7deef72021-10-06 11:53:40 -0500718 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
Vishnu Nair1506b182021-02-22 14:35:15 -0800719 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
Vishnu Naira4fbca52021-07-07 16:52:34 -0700720 " graphicBufferId=%" PRIu64 "%s transform=%d",
chaviw3277faf2021-05-19 16:45:23 -0500721 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
Vishnu Nair1506b182021-02-22 14:35:15 -0800722 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700723 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
Vishnu Naira4fbca52021-07-07 16:52:34 -0700724 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000725 return OK;
Robert Carr78c25dd2019-08-15 14:10:33 -0700726}
727
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800728Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
729 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800730 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800731 }
732 return item.mCrop;
733}
734
chaviwd7deef72021-10-06 11:53:40 -0500735void BLASTBufferQueue::acquireAndReleaseBuffer() {
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000736 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500737 BufferItem bufferItem;
chaviw6ebdf5f2021-10-14 11:57:22 -0500738 status_t status =
739 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
740 if (status != OK) {
741 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
742 statusToString(status).c_str());
743 return;
744 }
chaviwd7deef72021-10-06 11:53:40 -0500745 mNumFrameAvailable--;
chaviw6ebdf5f2021-10-14 11:57:22 -0500746 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
chaviwd7deef72021-10-06 11:53:40 -0500747}
748
Vishnu Nairaef1de92020-10-22 12:15:53 -0700749void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000750 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
751 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
chaviwc1cf4022022-06-03 13:32:33 -0500752
Tianhao Yao4861b102022-02-03 20:18:35 +0000753 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000754 UNIQUE_LOCK_WITH_ASSERTION(mMutex);
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000755 BBQ_TRACE();
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000756 bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800757
Tianhao Yao4861b102022-02-03 20:18:35 +0000758 const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
759 BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
Valerie Haud3b90d22019-11-06 09:37:31 -0800760
Tianhao Yao4861b102022-02-03 20:18:35 +0000761 if (syncTransactionSet) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000762 // If we are going to re-use the same mSyncTransaction, release the buffer that may
763 // already be set in the Transaction. This is to allow us a free slot early to continue
764 // processing a new buffer.
765 if (!mAcquireSingleBuffer) {
766 auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
767 if (bufferData) {
768 BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
769 bufferData->frameNumber);
770 releaseBuffer(bufferData->generateReleaseCallbackId(),
771 bufferData->acquireFence);
Tianhao Yao4861b102022-02-03 20:18:35 +0000772 }
773 }
chaviw0acd33a2021-11-02 11:55:37 -0500774
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000775 if (waitForTransactionCallback) {
776 // We are waiting on a previous sync's transaction callback so allow another sync
777 // transaction to proceed.
778 //
779 // We need to first flush out the transactions that were in between the two syncs.
780 // We do this by merging them into mSyncTransaction so any buffer merging will get
781 // a release callback invoked.
782 while (mNumFrameAvailable > 0) {
783 // flush out the shadow queue
784 acquireAndReleaseBuffer();
785 }
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800786 } else {
787 // Make sure the frame available count is 0 before proceeding with a sync to ensure
788 // the correct frame is used for the sync. The only way mNumFrameAvailable would be
789 // greater than 0 is if we already ran out of buffers previously. This means we
790 // need to flush the buffers before proceeding with the sync.
791 while (mNumFrameAvailable > 0) {
792 BQA_LOGD("waiting until no queued buffers");
793 mCallbackCV.wait(_lock);
794 }
chaviwd7deef72021-10-06 11:53:40 -0500795 }
796 }
797
Tianhao Yao4861b102022-02-03 20:18:35 +0000798 // add to shadow queue
799 mNumFrameAvailable++;
chaviwc1cf4022022-06-03 13:32:33 -0500800 if (waitForTransactionCallback && mNumFrameAvailable >= 2) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000801 acquireAndReleaseBuffer();
802 }
803 ATRACE_INT(mQueuedBufferTrace.c_str(),
804 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
805
806 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s",
807 item.mFrameNumber, boolToString(syncTransactionSet));
808
809 if (syncTransactionSet) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800810 // Add to mSyncedFrameNumbers before waiting in case any buffers are released
811 // while waiting for a free buffer. The release and commit callback will try to
812 // acquire buffers if there are any available, but we don't want it to acquire
813 // in the case where a sync transaction wants the buffer.
814 mSyncedFrameNumbers.emplace(item.mFrameNumber);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000815 // If there's no available buffer and we're in a sync transaction, we need to wait
816 // instead of returning since we guarantee a buffer will be acquired for the sync.
817 while (acquireNextBufferLocked(mSyncTransaction) == BufferQueue::NO_BUFFER_AVAILABLE) {
818 BQA_LOGD("waiting for available buffer");
819 mCallbackCV.wait(_lock);
820 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000821
822 // Only need a commit callback when syncing to ensure the buffer that's synced has been
823 // sent to SF
Patrick Williams5312ec12024-08-23 16:11:10 -0500824 mSyncTransaction
825 ->addTransactionCommittedCallback(makeTransactionCommittedCallbackThunk(),
826 nullptr);
Tianhao Yao4861b102022-02-03 20:18:35 +0000827 if (mAcquireSingleBuffer) {
828 prevCallback = mTransactionReadyCallback;
829 prevTransaction = mSyncTransaction;
830 mTransactionReadyCallback = nullptr;
831 mSyncTransaction = nullptr;
832 }
chaviwc1cf4022022-06-03 13:32:33 -0500833 } else if (!waitForTransactionCallback) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000834 acquireNextBufferLocked(std::nullopt);
Valerie Hau0188adf2020-02-13 08:29:20 -0800835 }
836 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000837 if (prevCallback) {
838 prevCallback(prevTransaction);
chaviwd7deef72021-10-06 11:53:40 -0500839 }
Valerie Haud3b90d22019-11-06 09:37:31 -0800840}
841
Vishnu Nairaef1de92020-10-22 12:15:53 -0700842void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
843 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
844 // Do nothing since we are not storing unacquired buffer items locally.
845}
846
Vishnu Nairadf632b2021-01-07 14:05:08 -0800847void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000848 std::lock_guard _lock{mTimestampMutex};
Vishnu Nairadf632b2021-01-07 14:05:08 -0800849 mDequeueTimestamps[bufferId] = systemTime();
Patrick Williams4b9507d2024-07-25 09:55:52 -0500850};
Vishnu Nairadf632b2021-01-07 14:05:08 -0800851
852void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
Patrick Williams3ced5382024-08-21 15:39:32 -0500853 std::lock_guard _lock{mTimestampMutex};
854 mDequeueTimestamps.erase(bufferId);
Patrick Williamsf5b42de2024-08-01 16:08:51 -0500855}
Vishnu Nairadf632b2021-01-07 14:05:08 -0800856
Chavi Weingartenc398c012023-04-12 17:26:02 +0000857bool BLASTBufferQueue::syncNextTransaction(
Tianhao Yao4861b102022-02-03 20:18:35 +0000858 std::function<void(SurfaceComposerClient::Transaction*)> callback,
859 bool acquireSingleBuffer) {
Chavi Weingartenc398c012023-04-12 17:26:02 +0000860 LOG_ALWAYS_FATAL_IF(!callback,
861 "BLASTBufferQueue: callback passed in to syncNextTransaction must not be "
862 "NULL");
chaviw3b4bdcf2022-03-17 09:27:03 -0500863
Chavi Weingartenc398c012023-04-12 17:26:02 +0000864 std::lock_guard _lock{mMutex};
865 BBQ_TRACE();
866 if (mTransactionReadyCallback) {
867 ALOGW("Attempting to overwrite transaction callback in syncNextTransaction");
868 return false;
Tianhao Yao4861b102022-02-03 20:18:35 +0000869 }
chaviw3b4bdcf2022-03-17 09:27:03 -0500870
Chavi Weingartenc398c012023-04-12 17:26:02 +0000871 mTransactionReadyCallback = callback;
872 mSyncTransaction = new SurfaceComposerClient::Transaction();
873 mAcquireSingleBuffer = acquireSingleBuffer;
874 return true;
Tianhao Yao4861b102022-02-03 20:18:35 +0000875}
876
877void BLASTBufferQueue::stopContinuousSyncTransaction() {
878 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
879 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
880 {
881 std::lock_guard _lock{mMutex};
Chavi Weingartenc398c012023-04-12 17:26:02 +0000882 if (mAcquireSingleBuffer || !mTransactionReadyCallback) {
883 ALOGW("Attempting to stop continuous sync when none are active");
884 return;
Tianhao Yao4861b102022-02-03 20:18:35 +0000885 }
Chavi Weingartenc398c012023-04-12 17:26:02 +0000886
887 prevCallback = mTransactionReadyCallback;
888 prevTransaction = mSyncTransaction;
889
Tianhao Yao4861b102022-02-03 20:18:35 +0000890 mTransactionReadyCallback = nullptr;
891 mSyncTransaction = nullptr;
892 mAcquireSingleBuffer = true;
893 }
Chavi Weingartenc398c012023-04-12 17:26:02 +0000894
Tianhao Yao4861b102022-02-03 20:18:35 +0000895 if (prevCallback) {
896 prevCallback(prevTransaction);
897 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700898}
899
Chavi Weingartenc398c012023-04-12 17:26:02 +0000900void BLASTBufferQueue::clearSyncTransaction() {
901 std::lock_guard _lock{mMutex};
902 if (!mAcquireSingleBuffer) {
903 ALOGW("Attempting to clear sync transaction when none are active");
904 return;
905 }
906
907 mTransactionReadyCallback = nullptr;
908 mSyncTransaction = nullptr;
909}
910
Vishnu Nairea0de002020-11-17 17:42:37 -0800911bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700912 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
913 // Only reject buffers if scaling mode is freeze.
914 return false;
915 }
916
Chavi Weingarten70670e62023-02-22 17:36:40 +0000917 ui::Size bufferSize = getBufferSize(item);
Vishnu Nairea0de002020-11-17 17:42:37 -0800918 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800919 return false;
920 }
Vishnu Naire1a42322020-10-02 17:42:04 -0700921
Vishnu Nair670b3f72020-09-29 17:52:18 -0700922 // reject buffers if the buffer size doesn't match.
Vishnu Nairea0de002020-11-17 17:42:37 -0800923 return mSize != bufferSize;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700924}
Vishnu Nairbf255772020-10-16 10:54:41 -0700925
Robert Carr05086b22020-10-13 18:22:51 -0700926class BBQSurface : public Surface {
Robert Carr9c006e02020-10-14 13:41:57 -0700927private:
Vishnu Nair95b6d512021-08-30 15:31:08 -0700928 std::mutex mMutex;
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000929 sp<BLASTBufferQueue> mBbq GUARDED_BY(mMutex);
930 bool mDestroyed GUARDED_BY(mMutex) = false;
Vishnu Nair95b6d512021-08-30 15:31:08 -0700931
Robert Carr05086b22020-10-13 18:22:51 -0700932public:
Vishnu Nair992496b2020-10-22 17:27:21 -0700933 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
934 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
935 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
Robert Carr9c006e02020-10-14 13:41:57 -0700936
Robert Carr05086b22020-10-13 18:22:51 -0700937 void allocateBuffers() override {
938 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
939 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
940 auto gbp = getIGraphicBufferProducer();
941 std::thread ([reqWidth, reqHeight, gbp=getIGraphicBufferProducer(),
942 reqFormat=mReqFormat, reqUsage=mReqUsage] () {
943 gbp->allocateBuffers(reqWidth, reqHeight,
944 reqFormat, reqUsage);
945
946 }).detach();
947 }
Robert Carr9c006e02020-10-14 13:41:57 -0700948
Marin Shalamanovc5986772021-03-16 16:09:49 +0100949 status_t setFrameRate(float frameRate, int8_t compatibility,
950 int8_t changeFrameRateStrategy) override {
Ady Abraham6cdd3fd2023-09-07 18:45:58 -0700951 if (flags::bq_setframerate()) {
952 return Surface::setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
953 }
954
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000955 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700956 if (mDestroyed) {
957 return DEAD_OBJECT;
958 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100959 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
960 "BBQSurface::setFrameRate")) {
Robert Carr9c006e02020-10-14 13:41:57 -0700961 return BAD_VALUE;
962 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100963 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
Robert Carr9c006e02020-10-14 13:41:57 -0700964 }
Robert Carr9b611b72020-10-19 12:00:23 -0700965
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800966 status_t setFrameTimelineInfo(uint64_t frameNumber,
967 const FrameTimelineInfo& frameTimelineInfo) override {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000968 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700969 if (mDestroyed) {
970 return DEAD_OBJECT;
971 }
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800972 return mBbq->setFrameTimelineInfo(frameNumber, frameTimelineInfo);
Robert Carr9b611b72020-10-19 12:00:23 -0700973 }
Vishnu Nair95b6d512021-08-30 15:31:08 -0700974
975 void destroy() override {
976 Surface::destroy();
977
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000978 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700979 mDestroyed = true;
980 mBbq = nullptr;
981 }
Robert Carr05086b22020-10-13 18:22:51 -0700982};
983
Robert Carr9c006e02020-10-14 13:41:57 -0700984// TODO: Can we coalesce this with frame updates? Need to confirm
985// no timing issues.
Marin Shalamanov46084422020-10-13 12:33:42 +0200986status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
987 bool shouldBeSeamless) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000988 std::lock_guard _lock{mMutex};
Robert Carr9c006e02020-10-14 13:41:57 -0700989 SurfaceComposerClient::Transaction t;
990
Marin Shalamanov46084422020-10-13 12:33:42 +0200991 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
Robert Carr9c006e02020-10-14 13:41:57 -0700992}
993
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800994status_t BLASTBufferQueue::setFrameTimelineInfo(uint64_t frameNumber,
995 const FrameTimelineInfo& frameTimelineInfo) {
996 ATRACE_FORMAT("%s(%s) frameNumber: %" PRIu64 " vsyncId: %" PRId64, __func__, mName.c_str(),
997 frameNumber, frameTimelineInfo.vsyncId);
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000998 std::lock_guard _lock{mMutex};
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800999 mPendingFrameTimelines.push({frameNumber, frameTimelineInfo});
Jorim Jaggia3fe67b2020-12-01 00:24:33 +01001000 return OK;
Robert Carr9b611b72020-10-19 12:00:23 -07001001}
1002
Hongguang Chen621ec582021-02-16 15:42:35 -08001003void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001004 std::lock_guard _lock{mMutex};
Hongguang Chen621ec582021-02-16 15:42:35 -08001005 SurfaceComposerClient::Transaction t;
1006
1007 t.setSidebandStream(mSurfaceControl, stream).apply();
1008}
1009
Vishnu Nair992496b2020-10-22 17:27:21 -07001010sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001011 std::lock_guard _lock{mMutex};
Vishnu Nair992496b2020-10-22 17:27:21 -07001012 sp<IBinder> scHandle = nullptr;
1013 if (includeSurfaceControlHandle && mSurfaceControl) {
1014 scHandle = mSurfaceControl->getHandle();
1015 }
1016 return new BBQSurface(mProducer, true, scHandle, this);
Robert Carr05086b22020-10-13 18:22:51 -07001017}
1018
Vishnu Nairc4a40c12020-12-23 09:14:32 -08001019void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
1020 uint64_t frameNumber) {
1021 std::lock_guard _lock{mMutex};
1022 if (mLastAcquiredFrameNumber >= frameNumber) {
1023 // Apply the transaction since we have already acquired the desired frame.
1024 t->apply();
1025 } else {
chaviwaad6cf52021-03-23 17:27:20 -05001026 mPendingTransactions.emplace_back(frameNumber, *t);
1027 // Clear the transaction so it can't be applied elsewhere.
1028 t->clear();
Vishnu Nairc4a40c12020-12-23 09:14:32 -08001029 }
1030}
1031
chaviw6a195272021-09-03 16:14:25 -05001032void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
1033 std::lock_guard _lock{mMutex};
1034
1035 SurfaceComposerClient::Transaction t;
1036 mergePendingTransactions(&t, frameNumber);
Robert Carr79dc06a2022-02-22 15:28:59 -08001037 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
1038 t.setApplyToken(mApplyToken).apply(false, true);
chaviw6a195272021-09-03 16:14:25 -05001039}
1040
1041void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
1042 uint64_t frameNumber) {
1043 auto mergeTransaction =
1044 [&t, currentFrameNumber = frameNumber](
1045 std::tuple<uint64_t, SurfaceComposerClient::Transaction> pendingTransaction) {
1046 auto& [targetFrameNumber, transaction] = pendingTransaction;
1047 if (currentFrameNumber < targetFrameNumber) {
1048 return false;
1049 }
1050 t->merge(std::move(transaction));
1051 return true;
1052 };
1053
1054 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
1055 mPendingTransactions.end(), mergeTransaction),
1056 mPendingTransactions.end());
1057}
1058
chaviwd84085a2022-02-08 11:07:04 -06001059SurfaceComposerClient::Transaction* BLASTBufferQueue::gatherPendingTransactions(
1060 uint64_t frameNumber) {
1061 std::lock_guard _lock{mMutex};
1062 SurfaceComposerClient::Transaction* t = new SurfaceComposerClient::Transaction();
1063 mergePendingTransactions(t, frameNumber);
1064 return t;
1065}
1066
Vishnu Nair89496122020-12-14 17:14:53 -08001067// Maintains a single worker thread per process that services a list of runnables.
1068class AsyncWorker : public Singleton<AsyncWorker> {
1069private:
1070 std::thread mThread;
1071 bool mDone = false;
1072 std::deque<std::function<void()>> mRunnables;
1073 std::mutex mMutex;
1074 std::condition_variable mCv;
1075 void run() {
1076 std::unique_lock<std::mutex> lock(mMutex);
1077 while (!mDone) {
Vishnu Nair89496122020-12-14 17:14:53 -08001078 while (!mRunnables.empty()) {
Vishnu Nair51e4dc82021-10-01 15:32:33 -07001079 std::deque<std::function<void()>> runnables = std::move(mRunnables);
1080 mRunnables.clear();
1081 lock.unlock();
1082 // Run outside the lock since the runnable might trigger another
1083 // post to the async worker.
1084 execute(runnables);
1085 lock.lock();
Vishnu Nair89496122020-12-14 17:14:53 -08001086 }
Wonsik Kim567533e2021-05-04 19:31:29 -07001087 mCv.wait(lock);
Vishnu Nair89496122020-12-14 17:14:53 -08001088 }
1089 }
1090
Vishnu Nair51e4dc82021-10-01 15:32:33 -07001091 void execute(std::deque<std::function<void()>>& runnables) {
1092 while (!runnables.empty()) {
1093 std::function<void()> runnable = runnables.front();
1094 runnables.pop_front();
1095 runnable();
1096 }
1097 }
1098
Vishnu Nair89496122020-12-14 17:14:53 -08001099public:
1100 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
1101
1102 ~AsyncWorker() {
1103 mDone = true;
1104 mCv.notify_all();
1105 if (mThread.joinable()) {
1106 mThread.join();
1107 }
1108 }
1109
1110 void post(std::function<void()> runnable) {
1111 std::unique_lock<std::mutex> lock(mMutex);
1112 mRunnables.emplace_back(std::move(runnable));
1113 mCv.notify_one();
1114 }
1115};
1116ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
1117
1118// Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
1119class AsyncProducerListener : public BnProducerListener {
1120private:
1121 const sp<IProducerListener> mListener;
1122
1123public:
1124 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
1125
1126 void onBufferReleased() override {
1127 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
1128 }
1129
1130 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
1131 AsyncWorker::getInstance().post(
1132 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
1133 }
Sungtak Lee7c935092024-09-16 16:55:04 +00001134
1135 void onBufferDetached(int slot) override {
1136 AsyncWorker::getInstance().post(
1137 [listener = mListener, slot = slot]() { listener->onBufferDetached(slot); });
1138 }
1139
1140#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_CONSUMER_ATTACH_CALLBACK)
1141 void onBufferAttached() override {
1142 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferAttached(); });
1143 }
1144#endif
Vishnu Nair89496122020-12-14 17:14:53 -08001145};
1146
Patrick Williams078d7362024-08-27 10:20:39 -05001147#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
1148class BBQBufferQueueCore : public BufferQueueCore {
1149public:
1150 explicit BBQBufferQueueCore(const wp<BLASTBufferQueue>& bbq) : mBLASTBufferQueue{bbq} {}
1151
1152 void notifyBufferReleased() const override {
1153 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1154 if (!bbq) {
1155 return;
1156 }
1157 bbq->mBufferReleaseReader->interruptBlockingRead();
1158 }
1159
1160private:
1161 wp<BLASTBufferQueue> mBLASTBufferQueue;
1162};
1163#endif
1164
Vishnu Nair89496122020-12-14 17:14:53 -08001165// Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
1166// can be non-blocking when the producer is in the client process.
1167class BBQBufferQueueProducer : public BufferQueueProducer {
1168public:
Patrick Williamsca81c052024-08-15 12:38:34 -05001169 BBQBufferQueueProducer(const sp<BufferQueueCore>& core, const wp<BLASTBufferQueue>& bbq)
Brian Lindahlc794b692023-01-31 15:42:47 -07001170 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/),
Patrick Williamsca81c052024-08-15 12:38:34 -05001171 mBLASTBufferQueue(bbq) {}
Vishnu Nair89496122020-12-14 17:14:53 -08001172
1173 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
1174 QueueBufferOutput* output) override {
1175 if (!listener) {
1176 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
1177 }
1178
1179 return BufferQueueProducer::connect(new AsyncProducerListener(listener), api,
1180 producerControlledByApp, output);
1181 }
Vishnu Nair17dde612020-12-28 11:39:59 -08001182
Brian Lindahlc794b692023-01-31 15:42:47 -07001183 // We want to resize the frame history when changing the size of the buffer queue
1184 status_t setMaxDequeuedBufferCount(int maxDequeuedBufferCount) override {
1185 int maxBufferCount;
Patrick Williamsf5b42de2024-08-01 16:08:51 -05001186 if (status_t status = BufferQueueProducer::setMaxDequeuedBufferCount(maxDequeuedBufferCount,
1187 &maxBufferCount);
1188 status != OK) {
1189 return status;
Brian Lindahlc794b692023-01-31 15:42:47 -07001190 }
Patrick Williamsf5b42de2024-08-01 16:08:51 -05001191
1192 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1193 if (!bbq) {
1194 return OK;
1195 }
1196
1197 // if we can't determine the max buffer count, then just skip growing the history size
1198 size_t newFrameHistorySize = maxBufferCount + 2; // +2 because triple buffer rendering
1199 // optimize away resizing the frame history unless it will grow
1200 if (newFrameHistorySize > FrameEventHistory::INITIAL_MAX_FRAME_HISTORY) {
1201 ALOGV("increasing frame history size to %zu", newFrameHistorySize);
1202 bbq->resizeFrameEventHistory(newFrameHistorySize);
1203 }
1204
Patrick Williamsf5b42de2024-08-01 16:08:51 -05001205 return OK;
Brian Lindahlc794b692023-01-31 15:42:47 -07001206 }
1207
Vishnu Nair17dde612020-12-28 11:39:59 -08001208 int query(int what, int* value) override {
1209 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
1210 *value = 1;
Patrick Williamsf5b42de2024-08-01 16:08:51 -05001211 return OK;
Vishnu Nair17dde612020-12-28 11:39:59 -08001212 }
1213 return BufferQueueProducer::query(what, value);
1214 }
Brian Lindahlc794b692023-01-31 15:42:47 -07001215
Patrick Williams078d7362024-08-27 10:20:39 -05001216#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
1217 status_t waitForBufferRelease(std::unique_lock<std::mutex>& bufferQueueLock,
1218 nsecs_t timeout) const override {
1219 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1220 if (!bbq) {
1221 return OK;
1222 }
1223
1224 // BufferQueue has already checked if we have a free buffer. If there's an unread interrupt,
1225 // we want to ignore it. This must be done before unlocking the BufferQueue lock to ensure
1226 // we don't miss an interrupt.
1227 bbq->mBufferReleaseReader->clearInterrupts();
Patrick Williamsc16a4a52024-10-26 01:48:01 -05001228 UnlockGuard unlockGuard{bufferQueueLock};
Patrick Williams078d7362024-08-27 10:20:39 -05001229
1230 ATRACE_FORMAT("waiting for free buffer");
1231 ReleaseCallbackId id;
1232 sp<Fence> fence;
1233 uint32_t maxAcquiredBufferCount;
1234 status_t status =
1235 bbq->mBufferReleaseReader->readBlocking(id, fence, maxAcquiredBufferCount, timeout);
1236 if (status == TIMED_OUT) {
1237 return TIMED_OUT;
1238 } else if (status != OK) {
1239 // Waiting was interrupted or an error occurred. BufferQueueProducer will check if we
1240 // have a free buffer and call this method again if not.
1241 return OK;
1242 }
1243
1244 bbq->releaseBufferCallback(id, fence, maxAcquiredBufferCount);
1245 return OK;
1246 }
1247#endif
1248
Brian Lindahlc794b692023-01-31 15:42:47 -07001249private:
1250 const wp<BLASTBufferQueue> mBLASTBufferQueue;
Vishnu Nair89496122020-12-14 17:14:53 -08001251};
1252
1253// Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
1254// This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
1255// emulating one way binder call behavior. Without this, if the listener calls back into the queue,
1256// we can deadlock.
1257void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
1258 sp<IGraphicBufferConsumer>* outConsumer) {
1259 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
1260 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
1261
Patrick Williams078d7362024-08-27 10:20:39 -05001262#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
1263 auto core = sp<BBQBufferQueueCore>::make(this);
1264#else
1265 auto core = sp<BufferQueueCore>::make();
1266#endif
Vishnu Nair89496122020-12-14 17:14:53 -08001267 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
1268
Patrick Williams078d7362024-08-27 10:20:39 -05001269 auto producer = sp<BBQBufferQueueProducer>::make(core, this);
Vishnu Nair89496122020-12-14 17:14:53 -08001270 LOG_ALWAYS_FATAL_IF(producer == nullptr,
1271 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
1272
Patrick Williams078d7362024-08-27 10:20:39 -05001273 auto consumer = sp<BufferQueueConsumer>::make(core);
Vishnu Nair8b30dd12021-01-25 14:16:54 -08001274 consumer->setAllowExtraAcquire(true);
Vishnu Nair89496122020-12-14 17:14:53 -08001275 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
1276 "BLASTBufferQueue: failed to create BufferQueueConsumer");
1277
1278 *outProducer = producer;
1279 *outConsumer = consumer;
1280}
1281
Brian Lindahlc794b692023-01-31 15:42:47 -07001282void BLASTBufferQueue::resizeFrameEventHistory(size_t newSize) {
1283 // This can be null during creation of the buffer queue, but resizing won't do anything at that
1284 // point in time, so just ignore. This can go away once the class relationships and lifetimes of
1285 // objects are cleaned up with a major refactor of BufferQueue as a whole.
1286 if (mBufferItemConsumer != nullptr) {
1287 std::unique_lock _lock{mMutex};
1288 mBufferItemConsumer->resizeFrameEventHistory(newSize);
1289 }
1290}
1291
chaviw497e81c2021-02-04 17:09:47 -08001292PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
1293 PixelFormat convertedFormat = format;
1294 switch (format) {
1295 case PIXEL_FORMAT_TRANSPARENT:
1296 case PIXEL_FORMAT_TRANSLUCENT:
1297 convertedFormat = PIXEL_FORMAT_RGBA_8888;
1298 break;
1299 case PIXEL_FORMAT_OPAQUE:
1300 convertedFormat = PIXEL_FORMAT_RGBX_8888;
1301 break;
1302 }
1303 return convertedFormat;
1304}
1305
Robert Carr82d07c92021-05-10 11:36:43 -07001306uint32_t BLASTBufferQueue::getLastTransformHint() const {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001307 std::lock_guard _lock{mMutex};
Robert Carr82d07c92021-05-10 11:36:43 -07001308 if (mSurfaceControl != nullptr) {
1309 return mSurfaceControl->getTransformHint();
1310 } else {
1311 return 0;
1312 }
1313}
1314
chaviw0b020f82021-08-20 12:00:47 -05001315uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001316 std::lock_guard _lock{mMutex};
chaviw0b020f82021-08-20 12:00:47 -05001317 return mLastAcquiredFrameNumber;
1318}
1319
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001320bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001321 std::lock_guard _lock{mMutex};
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001322 return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
1323}
1324
Patrick Williamsf1e5df12022-10-17 21:37:42 +00001325void BLASTBufferQueue::setTransactionHangCallback(
1326 std::function<void(const std::string&)> callback) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001327 std::lock_guard _lock{mMutex};
Patrick Williams7c9fa272024-08-30 12:38:43 +00001328 mTransactionHangCallback = std::move(callback);
Robert Carr4c1b6462021-12-21 10:30:50 -08001329}
1330
Vishnu Nairaf15fab2024-07-30 08:59:26 -07001331void BLASTBufferQueue::setApplyToken(sp<IBinder> applyToken) {
1332 std::lock_guard _lock{mMutex};
1333 mApplyToken = std::move(applyToken);
1334}
1335
Patrick Williams7c9fa272024-08-30 12:38:43 +00001336#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
1337
Patrick Williamsc16a4a52024-10-26 01:48:01 -05001338void BLASTBufferQueue::drainBufferReleaseConsumer() {
1339 ATRACE_CALL();
1340 while (true) {
1341 ReleaseCallbackId id;
1342 sp<Fence> fence;
1343 uint32_t maxAcquiredBufferCount;
1344 status_t status =
1345 mBufferReleaseConsumer->readReleaseFence(id, fence, maxAcquiredBufferCount);
1346 if (status != OK) {
1347 return;
1348 }
1349 releaseBufferCallback(id, fence, maxAcquiredBufferCount);
1350 }
1351}
1352
Patrick Williams078d7362024-08-27 10:20:39 -05001353BLASTBufferQueue::BufferReleaseReader::BufferReleaseReader(BLASTBufferQueue& bbq) : mBbq{bbq} {
1354 mEpollFd = android::base::unique_fd{epoll_create1(EPOLL_CLOEXEC)};
Patrick Williams7c9fa272024-08-30 12:38:43 +00001355 LOG_ALWAYS_FATAL_IF(!mEpollFd.ok(),
1356 "Failed to create buffer release epoll file descriptor. errno=%d "
1357 "message='%s'",
1358 errno, strerror(errno));
1359
1360 epoll_event registerEndpointFd{};
1361 registerEndpointFd.events = EPOLLIN;
Patrick Williams078d7362024-08-27 10:20:39 -05001362 registerEndpointFd.data.fd = mBbq.mBufferReleaseConsumer->getFd();
1363 status_t status = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mBbq.mBufferReleaseConsumer->getFd(),
1364 &registerEndpointFd);
Patrick Williams7c9fa272024-08-30 12:38:43 +00001365 LOG_ALWAYS_FATAL_IF(status == -1,
1366 "Failed to register buffer release consumer file descriptor with epoll. "
1367 "errno=%d message='%s'",
1368 errno, strerror(errno));
1369
1370 mEventFd = android::base::unique_fd(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
1371 LOG_ALWAYS_FATAL_IF(!mEventFd.ok(),
1372 "Failed to create buffer release event file descriptor. errno=%d "
1373 "message='%s'",
1374 errno, strerror(errno));
1375
1376 epoll_event registerEventFd{};
1377 registerEventFd.events = EPOLLIN;
1378 registerEventFd.data.fd = mEventFd.get();
1379 status = epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mEventFd.get(), &registerEventFd);
1380 LOG_ALWAYS_FATAL_IF(status == -1,
1381 "Failed to register buffer release event file descriptor with epoll. "
1382 "errno=%d message='%s'",
1383 errno, strerror(errno));
1384}
1385
Patrick Williams7c9fa272024-08-30 12:38:43 +00001386status_t BLASTBufferQueue::BufferReleaseReader::readBlocking(ReleaseCallbackId& outId,
1387 sp<Fence>& outFence,
Patrick Williams078d7362024-08-27 10:20:39 -05001388 uint32_t& outMaxAcquiredBufferCount,
1389 nsecs_t timeout) {
1390 // TODO(b/363290953) epoll_wait only has millisecond timeout precision. If timeout is less than
1391 // 1ms, then we round timeout up to 1ms. Otherwise, we round timeout to the nearest
1392 // millisecond. Once epoll_pwait2 can be used in libgui, we can specify timeout with nanosecond
1393 // precision.
1394 int timeoutMs = -1;
1395 if (timeout == 0) {
1396 timeoutMs = 0;
1397 } else if (timeout > 0) {
1398 const int nsPerMs = 1000000;
1399 if (timeout < nsPerMs) {
1400 timeoutMs = 1;
1401 } else {
1402 timeoutMs = static_cast<int>(
1403 std::chrono::round<std::chrono::milliseconds>(std::chrono::nanoseconds{timeout})
1404 .count());
1405 }
1406 }
1407
Patrick Williams7c9fa272024-08-30 12:38:43 +00001408 epoll_event event{};
Patrick Williams078d7362024-08-27 10:20:39 -05001409 int eventCount;
1410 do {
1411 eventCount = epoll_wait(mEpollFd.get(), &event, 1 /*maxevents*/, timeoutMs);
1412 } while (eventCount == -1 && errno != EINTR);
1413
1414 if (eventCount == -1) {
1415 ALOGE("epoll_wait error while waiting for buffer release. errno=%d message='%s'", errno,
1416 strerror(errno));
1417 return UNKNOWN_ERROR;
1418 }
1419
1420 if (eventCount == 0) {
1421 return TIMED_OUT;
Patrick Williams7c9fa272024-08-30 12:38:43 +00001422 }
1423
1424 if (event.data.fd == mEventFd.get()) {
Patrick Williams078d7362024-08-27 10:20:39 -05001425 clearInterrupts();
Patrick Williams7c9fa272024-08-30 12:38:43 +00001426 return WOULD_BLOCK;
1427 }
1428
Patrick Williams078d7362024-08-27 10:20:39 -05001429 return mBbq.mBufferReleaseConsumer->readReleaseFence(outId, outFence,
1430 outMaxAcquiredBufferCount);
Patrick Williams7c9fa272024-08-30 12:38:43 +00001431}
1432
1433void BLASTBufferQueue::BufferReleaseReader::interruptBlockingRead() {
Patrick Williams078d7362024-08-27 10:20:39 -05001434 if (eventfd_write(mEventFd.get(), 1) == -1) {
Patrick Williams7c9fa272024-08-30 12:38:43 +00001435 ALOGE("failed to notify dequeue event. errno=%d message='%s'", errno, strerror(errno));
1436 }
1437}
1438
Patrick Williams078d7362024-08-27 10:20:39 -05001439void BLASTBufferQueue::BufferReleaseReader::clearInterrupts() {
1440 eventfd_t value;
1441 if (eventfd_read(mEventFd.get(), &value) == -1 && errno != EWOULDBLOCK) {
1442 ALOGE("error while reading from eventfd. errno=%d message='%s'", errno, strerror(errno));
1443 }
1444}
1445
Patrick Williams7c9fa272024-08-30 12:38:43 +00001446#endif
1447
Robert Carr78c25dd2019-08-15 14:10:33 -07001448} // namespace android