blob: 767f3e8c163461d6136136e2a2cb8b0a76337d4d [file] [log] [blame]
Robert Carr78c25dd2019-08-15 14:10:33 -07001/*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Valerie Haud3b90d22019-11-06 09:37:31 -080017#undef LOG_TAG
18#define LOG_TAG "BLASTBufferQueue"
19
Valerie Haua32c5522019-12-09 10:11:08 -080020#define ATRACE_TAG ATRACE_TAG_GRAPHICS
Vishnu Naire1a42322020-10-02 17:42:04 -070021//#define LOG_NDEBUG 0
Valerie Haua32c5522019-12-09 10:11:08 -080022
liulijuneb489f62022-10-17 22:02:14 +080023#include <cutils/atomic.h>
Robert Carr78c25dd2019-08-15 14:10:33 -070024#include <gui/BLASTBufferQueue.h>
25#include <gui/BufferItemConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080026#include <gui/BufferQueueConsumer.h>
27#include <gui/BufferQueueCore.h>
28#include <gui/BufferQueueProducer.h>
Ady Abraham107788e2023-10-17 12:31:08 -070029
Ady Abraham6cdd3fd2023-09-07 18:45:58 -070030#include <gui/FrameRateUtils.h>
Valerie Hau45e4b3b2019-12-03 10:49:17 -080031#include <gui/GLConsumer.h>
Vishnu Nair89496122020-12-14 17:14:53 -080032#include <gui/IProducerListener.h>
Robert Carr05086b22020-10-13 18:22:51 -070033#include <gui/Surface.h>
chaviw57ae4b22022-02-03 16:51:39 -060034#include <gui/TraceUtils.h>
Vishnu Nair89496122020-12-14 17:14:53 -080035#include <utils/Singleton.h>
Valerie Haua32c5522019-12-09 10:11:08 -080036#include <utils/Trace.h>
37
Ady Abraham0bde6b52021-05-18 13:57:02 -070038#include <private/gui/ComposerService.h>
Huihong Luo02186fb2022-02-23 14:21:54 -080039#include <private/gui/ComposerServiceAIDL.h>
Ady Abraham0bde6b52021-05-18 13:57:02 -070040
Patrick Williamsac70bc52024-07-09 17:11:28 -050041#include <android-base/stringprintf.h>
Chavi Weingartene0237bb2023-02-06 21:48:32 +000042#include <android-base/thread_annotations.h>
Patrick Williamsac70bc52024-07-09 17:11:28 -050043#include <sys/epoll.h>
44#include <sys/eventfd.h>
Robert Carr78c25dd2019-08-15 14:10:33 -070045#include <chrono>
46
Alec Mouri21d94322023-10-17 19:51:39 +000047#include <com_android_graphics_libgui_flags.h>
48
Ady Abraham6cdd3fd2023-09-07 18:45:58 -070049using namespace com::android::graphics::libgui;
Robert Carr78c25dd2019-08-15 14:10:33 -070050using namespace std::chrono_literals;
Patrick Williamsac70bc52024-07-09 17:11:28 -050051using android::base::unique_fd;
Robert Carr78c25dd2019-08-15 14:10:33 -070052
Vishnu Nairdab94092020-09-29 16:09:04 -070053namespace {
chaviw3277faf2021-05-19 16:45:23 -050054inline const char* boolToString(bool b) {
Vishnu Nairdab94092020-09-29 16:09:04 -070055 return b ? "true" : "false";
56}
57} // namespace
58
Robert Carr78c25dd2019-08-15 14:10:33 -070059namespace android {
60
Vishnu Nairdab94092020-09-29 16:09:04 -070061// Macros to include adapter info in log messages
chaviwd7deef72021-10-06 11:53:40 -050062#define BQA_LOGD(x, ...) \
63 ALOGD("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070064#define BQA_LOGV(x, ...) \
65 ALOGV("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairc6f89ee2020-12-11 14:27:32 -080066// enable logs for a single layer
67//#define BQA_LOGV(x, ...) \
68// ALOGV_IF((strstr(mName.c_str(), "SurfaceView") != nullptr), "[%s](f:%u,a:%u) " x, \
69// mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
Vishnu Nairdab94092020-09-29 16:09:04 -070070#define BQA_LOGE(x, ...) \
71 ALOGE("[%s](f:%u,a:%u) " x, mName.c_str(), mNumFrameAvailable, mNumAcquired, ##__VA_ARGS__)
72
chaviw57ae4b22022-02-03 16:51:39 -060073#define BBQ_TRACE(x, ...) \
74 ATRACE_FORMAT("%s - %s(f:%u,a:%u)" x, __FUNCTION__, mName.c_str(), mNumFrameAvailable, \
75 mNumAcquired, ##__VA_ARGS__)
76
Chavi Weingartene0237bb2023-02-06 21:48:32 +000077#define UNIQUE_LOCK_WITH_ASSERTION(mutex) \
78 std::unique_lock _lock{mutex}; \
79 base::ScopedLockAssertion assumeLocked(mutex);
80
Valerie Hau871d6352020-01-29 08:44:02 -080081void BLASTBufferItemConsumer::onDisconnect() {
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000082 Mutex::Autolock lock(mMutex);
83 mPreviouslyConnected = mCurrentlyConnected;
84 mCurrentlyConnected = false;
85 if (mPreviouslyConnected) {
86 mDisconnectEvents.push(mCurrentFrameNumber);
Valerie Hau871d6352020-01-29 08:44:02 -080087 }
Jiakai Zhangc33c63a2021-11-09 11:24:04 +000088 mFrameEventHistory.onDisconnect();
Valerie Hau871d6352020-01-29 08:44:02 -080089}
90
91void BLASTBufferItemConsumer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
92 FrameEventHistoryDelta* outDelta) {
Hongguang Chen621ec582021-02-16 15:42:35 -080093 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -080094 if (newTimestamps) {
95 // BufferQueueProducer only adds a new timestamp on
96 // queueBuffer
97 mCurrentFrameNumber = newTimestamps->frameNumber;
98 mFrameEventHistory.addQueue(*newTimestamps);
99 }
100 if (outDelta) {
101 // frame event histories will be processed
102 // only after the producer connects and requests
103 // deltas for the first time. Forward this intent
104 // to SF-side to turn event processing back on
105 mPreviouslyConnected = mCurrentlyConnected;
106 mCurrentlyConnected = true;
107 mFrameEventHistory.getAndResetDelta(outDelta);
108 }
109}
110
Alec Mouri21d94322023-10-17 19:51:39 +0000111void BLASTBufferItemConsumer::updateFrameTimestamps(
112 uint64_t frameNumber, uint64_t previousFrameNumber, nsecs_t refreshStartTime,
113 const sp<Fence>& glDoneFence, const sp<Fence>& presentFence,
114 const sp<Fence>& prevReleaseFence, CompositorTiming compositorTiming, nsecs_t latchTime,
115 nsecs_t dequeueReadyTime) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800116 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800117
118 // if the producer is not connected, don't bother updating,
119 // the next producer that connects won't access this frame event
120 if (!mCurrentlyConnected) return;
121 std::shared_ptr<FenceTime> glDoneFenceTime = std::make_shared<FenceTime>(glDoneFence);
122 std::shared_ptr<FenceTime> presentFenceTime = std::make_shared<FenceTime>(presentFence);
123 std::shared_ptr<FenceTime> releaseFenceTime = std::make_shared<FenceTime>(prevReleaseFence);
124
125 mFrameEventHistory.addLatch(frameNumber, latchTime);
Alec Mouri21d94322023-10-17 19:51:39 +0000126 if (flags::frametimestamps_previousrelease()) {
127 if (previousFrameNumber > 0) {
128 mFrameEventHistory.addRelease(previousFrameNumber, dequeueReadyTime,
129 std::move(releaseFenceTime));
130 }
131 } else {
132 mFrameEventHistory.addRelease(frameNumber, dequeueReadyTime, std::move(releaseFenceTime));
133 }
134
Valerie Hau871d6352020-01-29 08:44:02 -0800135 mFrameEventHistory.addPreComposition(frameNumber, refreshStartTime);
136 mFrameEventHistory.addPostComposition(frameNumber, glDoneFenceTime, presentFenceTime,
137 compositorTiming);
138}
139
140void BLASTBufferItemConsumer::getConnectionEvents(uint64_t frameNumber, bool* needsDisconnect) {
141 bool disconnect = false;
Hongguang Chen621ec582021-02-16 15:42:35 -0800142 Mutex::Autolock lock(mMutex);
Valerie Hau871d6352020-01-29 08:44:02 -0800143 while (!mDisconnectEvents.empty() && mDisconnectEvents.front() <= frameNumber) {
144 disconnect = true;
145 mDisconnectEvents.pop();
146 }
147 if (needsDisconnect != nullptr) *needsDisconnect = disconnect;
148}
149
Hongguang Chen621ec582021-02-16 15:42:35 -0800150void BLASTBufferItemConsumer::onSidebandStreamChanged() {
Ady Abrahamdbca1352021-12-15 11:58:56 -0800151 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
152 if (bbq != nullptr) {
Hongguang Chen621ec582021-02-16 15:42:35 -0800153 sp<NativeHandle> stream = getSidebandStream();
Ady Abrahamdbca1352021-12-15 11:58:56 -0800154 bbq->setSidebandStream(stream);
Hongguang Chen621ec582021-02-16 15:42:35 -0800155 }
156}
157
Ady Abraham107788e2023-10-17 12:31:08 -0700158#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_SETFRAMERATE)
Ady Abraham6cdd3fd2023-09-07 18:45:58 -0700159void BLASTBufferItemConsumer::onSetFrameRate(float frameRate, int8_t compatibility,
160 int8_t changeFrameRateStrategy) {
161 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
162 if (bbq != nullptr) {
163 bbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
164 }
165}
166#endif
167
Brian Lindahlc794b692023-01-31 15:42:47 -0700168void BLASTBufferItemConsumer::resizeFrameEventHistory(size_t newSize) {
169 Mutex::Autolock lock(mMutex);
170 mFrameEventHistory.resize(newSize);
171}
172
Vishnu Naird2aaab12022-02-10 14:49:09 -0800173BLASTBufferQueue::BLASTBufferQueue(const std::string& name, bool updateDestinationFrame)
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800174 : mSurfaceControl(nullptr),
175 mSize(1, 1),
Vishnu Nairea0de002020-11-17 17:42:37 -0800176 mRequestedSize(mSize),
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800177 mFormat(PIXEL_FORMAT_RGBA_8888),
Tianhao Yao4861b102022-02-03 20:18:35 +0000178 mTransactionReadyCallback(nullptr),
Vishnu Naird2aaab12022-02-10 14:49:09 -0800179 mSyncTransaction(nullptr),
180 mUpdateDestinationFrame(updateDestinationFrame) {
Vishnu Nair89496122020-12-14 17:14:53 -0800181 createBufferQueue(&mProducer, &mConsumer);
Valerie Hau0889c622020-02-19 15:04:47 -0800182 // since the adapter is in the client process, set dequeue timeout
183 // explicitly so that dequeueBuffer will block
184 mProducer->setDequeueTimeout(std::numeric_limits<int64_t>::max());
Valerie Hau65b8e872020-02-13 09:45:14 -0800185
Vishnu Nair1618c672021-02-05 13:08:26 -0800186 mBufferItemConsumer = new BLASTBufferItemConsumer(mConsumer,
187 GraphicBuffer::USAGE_HW_COMPOSER |
188 GraphicBuffer::USAGE_HW_TEXTURE,
Ady Abrahamdbca1352021-12-15 11:58:56 -0800189 1, false, this);
liulijuneb489f62022-10-17 22:02:14 +0800190 static std::atomic<uint32_t> nextId = 0;
191 mProducerId = nextId++;
192 mName = name + "#" + std::to_string(mProducerId);
193 auto consumerName = mName + "(BLAST Consumer)" + std::to_string(mProducerId);
194 mQueuedBufferTrace = "QueuedBuffer - " + mName + "BLAST#" + std::to_string(mProducerId);
Vishnu Nairdab94092020-09-29 16:09:04 -0700195 mBufferItemConsumer->setName(String8(consumerName.c_str()));
Robert Carr78c25dd2019-08-15 14:10:33 -0700196 mBufferItemConsumer->setFrameAvailableListener(this);
Robert Carr9f133d72020-04-01 15:51:46 -0700197
Huihong Luo02186fb2022-02-23 14:21:54 -0800198 ComposerServiceAIDL::getComposerService()->getMaxAcquiredBufferCount(&mMaxAcquiredBuffers);
Ady Abraham0bde6b52021-05-18 13:57:02 -0700199 mBufferItemConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBuffers);
chaviw69058fb2021-09-27 09:37:30 -0500200 mCurrentMaxAcquiredBufferCount = mMaxAcquiredBuffers;
Valerie Haua32c5522019-12-09 10:11:08 -0800201 mNumAcquired = 0;
202 mNumFrameAvailable = 0;
Robert Carr4c1b6462021-12-21 10:30:50 -0800203
204 TransactionCompletedListener::getInstance()->addQueueStallListener(
Patrick Williamsf1e5df12022-10-17 21:37:42 +0000205 [&](const std::string& reason) {
206 std::function<void(const std::string&)> callbackCopy;
207 {
208 std::unique_lock _lock{mMutex};
209 callbackCopy = mTransactionHangCallback;
210 }
211 if (callbackCopy) callbackCopy(reason);
212 },
213 this);
Robert Carr4c1b6462021-12-21 10:30:50 -0800214
Patrick Williamsac70bc52024-07-09 17:11:28 -0500215#if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
216 std::unique_ptr<gui::BufferReleaseChannel::ConsumerEndpoint> bufferReleaseConsumer;
217 gui::BufferReleaseChannel::open(mName, bufferReleaseConsumer, mBufferReleaseProducer);
218 mBufferReleaseReader.emplace(std::move(bufferReleaseConsumer));
219#endif
220
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800221 BQA_LOGV("BLASTBufferQueue created");
Vishnu Nair1cb8e892021-12-06 16:45:48 -0800222}
223
224BLASTBufferQueue::BLASTBufferQueue(const std::string& name, const sp<SurfaceControl>& surface,
225 int width, int height, int32_t format)
226 : BLASTBufferQueue(name) {
227 update(surface, width, height, format);
Robert Carr78c25dd2019-08-15 14:10:33 -0700228}
229
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800230BLASTBufferQueue::~BLASTBufferQueue() {
Robert Carr4c1b6462021-12-21 10:30:50 -0800231 TransactionCompletedListener::getInstance()->removeQueueStallListener(this);
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800232 if (mPendingTransactions.empty()) {
233 return;
234 }
235 BQA_LOGE("Applying pending transactions on dtor %d",
236 static_cast<uint32_t>(mPendingTransactions.size()));
237 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800238 mergePendingTransactions(&t, std::numeric_limits<uint64_t>::max() /* frameNumber */);
Robert Carr79dc06a2022-02-22 15:28:59 -0800239 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
240 t.setApplyToken(mApplyToken).apply(false, true);
chaviw3b4bdcf2022-03-17 09:27:03 -0500241
242 if (mTransactionReadyCallback) {
243 mTransactionReadyCallback(mSyncTransaction);
244 }
Vishnu Nairc4a40c12020-12-23 09:14:32 -0800245}
246
chaviw565ee542021-01-14 10:21:23 -0800247void BLASTBufferQueue::update(const sp<SurfaceControl>& surface, uint32_t width, uint32_t height,
Vishnu Naird2aaab12022-02-10 14:49:09 -0800248 int32_t format) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800249 LOG_ALWAYS_FATAL_IF(surface == nullptr, "BLASTBufferQueue: mSurfaceControl must not be NULL");
250
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000251 std::lock_guard _lock{mMutex};
chaviw565ee542021-01-14 10:21:23 -0800252 if (mFormat != format) {
253 mFormat = format;
chaviw497e81c2021-02-04 17:09:47 -0800254 mBufferItemConsumer->setDefaultBufferFormat(convertBufferFormat(format));
chaviw565ee542021-01-14 10:21:23 -0800255 }
256
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800257 const bool surfaceControlChanged = !SurfaceControl::isSameSurface(mSurfaceControl, surface);
Vishnu Nairab066512022-01-04 22:28:00 +0000258 if (surfaceControlChanged && mSurfaceControl != nullptr) {
259 BQA_LOGD("Updating SurfaceControl without recreating BBQ");
260 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800261 bool applyTransaction = false;
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800262
Vishnu Nair5fa91c22021-06-29 14:30:48 -0700263 // Always update the native object even though they might have the same layer handle, so we can
264 // get the updated transform hint from WM.
265 mSurfaceControl = surface;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800266 SurfaceComposerClient::Transaction t;
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800267 if (surfaceControlChanged) {
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800268 t.setFlags(mSurfaceControl, layer_state_t::eEnableBackpressure,
269 layer_state_t::eEnableBackpressure);
Patrick Williamsac70bc52024-07-09 17:11:28 -0500270 if (mBufferReleaseProducer) {
271 t.setBufferReleaseChannel(mSurfaceControl, mBufferReleaseProducer);
272 }
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800273 applyTransaction = true;
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800274 }
Vishnu Nair1e8bf102021-12-28 14:36:59 -0800275 mTransformHint = mSurfaceControl->getTransformHint();
276 mBufferItemConsumer->setTransformHint(mTransformHint);
Vishnu Naira4fbca52021-07-07 16:52:34 -0700277 BQA_LOGV("update width=%d height=%d format=%d mTransformHint=%d", width, height, format,
278 mTransformHint);
Arthur Hungb6aa9a02021-06-09 14:23:01 +0800279
Vishnu Nairea0de002020-11-17 17:42:37 -0800280 ui::Size newSize(width, height);
281 if (mRequestedSize != newSize) {
282 mRequestedSize.set(newSize);
283 mBufferItemConsumer->setDefaultBufferSize(mRequestedSize.width, mRequestedSize.height);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000284 if (mLastBufferInfo.scalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Vishnu Nair53c936c2020-12-03 11:46:37 -0800285 // If the buffer supports scaling, update the frame immediately since the client may
286 // want to scale the existing buffer to the new size.
287 mSize = mRequestedSize;
Vishnu Naird2aaab12022-02-10 14:49:09 -0800288 if (mUpdateDestinationFrame) {
289 t.setDestinationFrame(mSurfaceControl, Rect(newSize));
290 applyTransaction = true;
291 }
Vishnu Nair53c936c2020-12-03 11:46:37 -0800292 }
Robert Carrfc416512020-04-02 12:32:44 -0700293 }
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800294 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800295 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
296 t.setApplyToken(mApplyToken).apply(false, true);
Vishnu Nairf6eddb62021-01-27 22:02:11 -0800297 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700298}
299
chaviwd7deef72021-10-06 11:53:40 -0500300static std::optional<SurfaceControlStats> findMatchingStat(
301 const std::vector<SurfaceControlStats>& stats, const sp<SurfaceControl>& sc) {
302 for (auto stat : stats) {
303 if (SurfaceControl::isSameSurface(sc, stat.surfaceControl)) {
304 return stat;
305 }
306 }
307 return std::nullopt;
308}
309
310static void transactionCommittedCallbackThunk(void* context, nsecs_t latchTime,
311 const sp<Fence>& presentFence,
312 const std::vector<SurfaceControlStats>& stats) {
313 if (context == nullptr) {
314 return;
315 }
316 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
317 bq->transactionCommittedCallback(latchTime, presentFence, stats);
318}
319
320void BLASTBufferQueue::transactionCommittedCallback(nsecs_t /*latchTime*/,
321 const sp<Fence>& /*presentFence*/,
322 const std::vector<SurfaceControlStats>& stats) {
323 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000324 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600325 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500326 BQA_LOGV("transactionCommittedCallback");
327 if (!mSurfaceControlsWithPendingCallback.empty()) {
328 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
329 std::optional<SurfaceControlStats> stat = findMatchingStat(stats, pendingSC);
330 if (stat) {
331 uint64_t currFrameNumber = stat->frameEventStats.frameNumber;
332
333 // We need to check if we were waiting for a transaction callback in order to
334 // process any pending buffers and unblock. It's possible to get transaction
chaviwc1cf4022022-06-03 13:32:33 -0500335 // callbacks for previous requests so we need to ensure that there are no pending
336 // frame numbers that were in a sync. We remove the frame from mSyncedFrameNumbers
337 // set and then check if it's empty. If there are no more pending syncs, we can
338 // proceed with flushing the shadow queue.
chaviwc1cf4022022-06-03 13:32:33 -0500339 mSyncedFrameNumbers.erase(currFrameNumber);
Chavi Weingartend48797b2023-08-04 13:11:39 +0000340 if (mSyncedFrameNumbers.empty()) {
chaviwd7deef72021-10-06 11:53:40 -0500341 flushShadowQueue();
342 }
343 } else {
chaviw768bfa02021-11-01 09:50:57 -0500344 BQA_LOGE("Failed to find matching SurfaceControl in transactionCommittedCallback");
chaviwd7deef72021-10-06 11:53:40 -0500345 }
346 } else {
347 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
348 "empty.");
349 }
chaviwd7deef72021-10-06 11:53:40 -0500350 decStrong((void*)transactionCommittedCallbackThunk);
351 }
352}
353
Robert Carr78c25dd2019-08-15 14:10:33 -0700354static void transactionCallbackThunk(void* context, nsecs_t latchTime,
355 const sp<Fence>& presentFence,
356 const std::vector<SurfaceControlStats>& stats) {
357 if (context == nullptr) {
358 return;
359 }
Robert Carrfbcbb4c2020-11-02 14:14:34 -0800360 sp<BLASTBufferQueue> bq = static_cast<BLASTBufferQueue*>(context);
Robert Carr78c25dd2019-08-15 14:10:33 -0700361 bq->transactionCallback(latchTime, presentFence, stats);
362}
363
364void BLASTBufferQueue::transactionCallback(nsecs_t /*latchTime*/, const sp<Fence>& /*presentFence*/,
365 const std::vector<SurfaceControlStats>& stats) {
chaviw71c2cc42020-10-23 16:42:02 -0700366 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000367 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600368 BBQ_TRACE();
chaviw71c2cc42020-10-23 16:42:02 -0700369 BQA_LOGV("transactionCallback");
chaviw71c2cc42020-10-23 16:42:02 -0700370
chaviw42026162021-04-16 15:46:12 -0500371 if (!mSurfaceControlsWithPendingCallback.empty()) {
372 sp<SurfaceControl> pendingSC = mSurfaceControlsWithPendingCallback.front();
373 mSurfaceControlsWithPendingCallback.pop();
chaviwd7deef72021-10-06 11:53:40 -0500374 std::optional<SurfaceControlStats> statsOptional = findMatchingStat(stats, pendingSC);
375 if (statsOptional) {
376 SurfaceControlStats stat = *statsOptional;
Vishnu Nair71fcf912022-10-18 09:14:20 -0700377 if (stat.transformHint) {
378 mTransformHint = *stat.transformHint;
379 mBufferItemConsumer->setTransformHint(mTransformHint);
380 BQA_LOGV("updated mTransformHint=%d", mTransformHint);
381 }
Vishnu Nairde66dc72021-06-17 17:54:41 -0700382 // Update frametime stamps if the frame was latched and presented, indicated by a
383 // valid latch time.
384 if (stat.latchTime > 0) {
385 mBufferItemConsumer
386 ->updateFrameTimestamps(stat.frameEventStats.frameNumber,
Alec Mouri21d94322023-10-17 19:51:39 +0000387 stat.frameEventStats.previousFrameNumber,
Vishnu Nairde66dc72021-06-17 17:54:41 -0700388 stat.frameEventStats.refreshStartTime,
389 stat.frameEventStats.gpuCompositionDoneFence,
390 stat.presentFence, stat.previousReleaseFence,
391 stat.frameEventStats.compositorTiming,
392 stat.latchTime,
393 stat.frameEventStats.dequeueReadyTime);
394 }
Robert Carr405e2f62021-12-31 16:59:34 -0800395 auto currFrameNumber = stat.frameEventStats.frameNumber;
396 std::vector<ReleaseCallbackId> staleReleases;
397 for (const auto& [key, value]: mSubmitted) {
398 if (currFrameNumber > key.framenumber) {
399 staleReleases.push_back(key);
400 }
401 }
402 for (const auto& staleRelease : staleReleases) {
Robert Carr405e2f62021-12-31 16:59:34 -0800403 releaseBufferCallbackLocked(staleRelease,
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700404 stat.previousReleaseFence
405 ? stat.previousReleaseFence
406 : Fence::NO_FENCE,
407 stat.currentMaxAcquiredBufferCount,
408 true /* fakeRelease */);
Robert Carr405e2f62021-12-31 16:59:34 -0800409 }
chaviwd7deef72021-10-06 11:53:40 -0500410 } else {
chaviw768bfa02021-11-01 09:50:57 -0500411 BQA_LOGE("Failed to find matching SurfaceControl in transactionCallback");
chaviw42026162021-04-16 15:46:12 -0500412 }
413 } else {
414 BQA_LOGE("No matching SurfaceControls found: mSurfaceControlsWithPendingCallback was "
415 "empty.");
Valerie Haua32c5522019-12-09 10:11:08 -0800416 }
chaviw71c2cc42020-10-23 16:42:02 -0700417
chaviw71c2cc42020-10-23 16:42:02 -0700418 decStrong((void*)transactionCallbackThunk);
Robert Carr78c25dd2019-08-15 14:10:33 -0700419 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700420}
421
Vishnu Nair1506b182021-02-22 14:35:15 -0800422// Unlike transactionCallbackThunk the release buffer callback does not extend the life of the
423// BBQ. This is because if the BBQ is destroyed, then the buffers will be released by the client.
424// So we pass in a weak pointer to the BBQ and if it still alive, then we release the buffer.
425// Otherwise, this is a no-op.
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700426static void releaseBufferCallbackThunk(wp<BLASTBufferQueue> context, const ReleaseCallbackId& id,
chaviw69058fb2021-09-27 09:37:30 -0500427 const sp<Fence>& releaseFence,
428 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Vishnu Nair1506b182021-02-22 14:35:15 -0800429 sp<BLASTBufferQueue> blastBufferQueue = context.promote();
Vishnu Nair1506b182021-02-22 14:35:15 -0800430 if (blastBufferQueue) {
chaviw69058fb2021-09-27 09:37:30 -0500431 blastBufferQueue->releaseBufferCallback(id, releaseFence, currentMaxAcquiredBufferCount);
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700432 } else {
433 ALOGV("releaseBufferCallbackThunk %s blastBufferQueue is dead", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800434 }
435}
436
chaviwd7deef72021-10-06 11:53:40 -0500437void BLASTBufferQueue::flushShadowQueue() {
438 BQA_LOGV("flushShadowQueue");
439 int numFramesToFlush = mNumFrameAvailable;
440 while (numFramesToFlush > 0) {
441 acquireNextBufferLocked(std::nullopt);
442 numFramesToFlush--;
443 }
444}
445
chaviw69058fb2021-09-27 09:37:30 -0500446void BLASTBufferQueue::releaseBufferCallback(
447 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
448 std::optional<uint32_t> currentMaxAcquiredBufferCount) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000449 std::lock_guard _lock{mMutex};
chaviw57ae4b22022-02-03 16:51:39 -0600450 BBQ_TRACE();
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700451 releaseBufferCallbackLocked(id, releaseFence, currentMaxAcquiredBufferCount,
452 false /* fakeRelease */);
Patrick Williamsac70bc52024-07-09 17:11:28 -0500453 if (!mBufferReleaseReader) {
454 return;
455 }
456 // Drain the buffer release channel socket
457 while (true) {
458 ReleaseCallbackId releaseCallbackId;
459 sp<Fence> releaseFence;
460 if (status_t status =
461 mBufferReleaseReader->readNonBlocking(releaseCallbackId, releaseFence);
462 status != OK) {
463 break;
464 }
465 releaseBufferCallbackLocked(releaseCallbackId, releaseFence, std::nullopt,
466 false /* fakeRelease */);
467 }
Robert Carr405e2f62021-12-31 16:59:34 -0800468}
469
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700470void BLASTBufferQueue::releaseBufferCallbackLocked(
471 const ReleaseCallbackId& id, const sp<Fence>& releaseFence,
472 std::optional<uint32_t> currentMaxAcquiredBufferCount, bool fakeRelease) {
Robert Carr405e2f62021-12-31 16:59:34 -0800473 ATRACE_CALL();
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700474 BQA_LOGV("releaseBufferCallback %s", id.to_string().c_str());
Vishnu Nair1506b182021-02-22 14:35:15 -0800475
Ady Abraham899dcdb2021-06-15 16:56:21 -0700476 // Calculate how many buffers we need to hold before we release them back
477 // to the buffer queue. This will prevent higher latency when we are running
478 // on a lower refresh rate than the max supported. We only do that for EGL
479 // clients as others don't care about latency
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000480 const auto it = mSubmitted.find(id);
481 const bool isEGL = it != mSubmitted.end() && it->second.mApi == NATIVE_WINDOW_API_EGL;
Ady Abraham899dcdb2021-06-15 16:56:21 -0700482
chaviw69058fb2021-09-27 09:37:30 -0500483 if (currentMaxAcquiredBufferCount) {
484 mCurrentMaxAcquiredBufferCount = *currentMaxAcquiredBufferCount;
485 }
486
liulijunf90df632022-11-14 14:24:48 +0800487 const uint32_t numPendingBuffersToHold =
488 isEGL ? std::max(0, mMaxAcquiredBuffers - (int32_t)mCurrentMaxAcquiredBufferCount) : 0;
Robert Carr405e2f62021-12-31 16:59:34 -0800489
490 auto rb = ReleasedBuffer{id, releaseFence};
491 if (std::find(mPendingRelease.begin(), mPendingRelease.end(), rb) == mPendingRelease.end()) {
492 mPendingRelease.emplace_back(rb);
Vishnu Nair28fe2e62022-11-01 14:29:10 -0700493 if (fakeRelease) {
494 BQA_LOGE("Faking releaseBufferCallback from transactionCompleteCallback %" PRIu64,
495 id.framenumber);
496 BBQ_TRACE("FakeReleaseCallback");
497 }
Robert Carr405e2f62021-12-31 16:59:34 -0800498 }
Ady Abraham899dcdb2021-06-15 16:56:21 -0700499
500 // Release all buffers that are beyond the ones that we need to hold
501 while (mPendingRelease.size() > numPendingBuffersToHold) {
chaviw0acd33a2021-11-02 11:55:37 -0500502 const auto releasedBuffer = mPendingRelease.front();
Ady Abraham899dcdb2021-06-15 16:56:21 -0700503 mPendingRelease.pop_front();
chaviw0acd33a2021-11-02 11:55:37 -0500504 releaseBuffer(releasedBuffer.callbackId, releasedBuffer.releaseFence);
chaviwc1cf4022022-06-03 13:32:33 -0500505 // Don't process the transactions here if mSyncedFrameNumbers is not empty. That means
506 // are still transactions that have sync buffers in them that have not been applied or
507 // dropped. Instead, let onFrameAvailable handle processing them since it will merge with
508 // the syncTransaction.
509 if (mSyncedFrameNumbers.empty()) {
chaviwd7deef72021-10-06 11:53:40 -0500510 acquireNextBufferLocked(std::nullopt);
511 }
Vishnu Nair1506b182021-02-22 14:35:15 -0800512 }
513
Ady Abraham899dcdb2021-06-15 16:56:21 -0700514 ATRACE_INT("PendingRelease", mPendingRelease.size());
Vishnu Nair2a52ca62021-06-24 13:08:53 -0700515 ATRACE_INT(mQueuedBufferTrace.c_str(),
516 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
Vishnu Nair1506b182021-02-22 14:35:15 -0800517 mCallbackCV.notify_all();
518}
519
chaviw0acd33a2021-11-02 11:55:37 -0500520void BLASTBufferQueue::releaseBuffer(const ReleaseCallbackId& callbackId,
521 const sp<Fence>& releaseFence) {
522 auto it = mSubmitted.find(callbackId);
523 if (it == mSubmitted.end()) {
chaviw0acd33a2021-11-02 11:55:37 -0500524 return;
525 }
526 mNumAcquired--;
Patrick Williamsac70bc52024-07-09 17:11:28 -0500527 updateDequeueShouldBlockLocked();
Patrick Williams8f715012024-07-24 15:31:03 -0500528 if (mBufferReleaseReader) {
529 mBufferReleaseReader->interruptBlockingRead();
530 }
chaviw57ae4b22022-02-03 16:51:39 -0600531 BBQ_TRACE("frame=%" PRIu64, callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500532 BQA_LOGV("released %s", callbackId.to_string().c_str());
533 mBufferItemConsumer->releaseBuffer(it->second, releaseFence);
534 mSubmitted.erase(it);
chaviwc1cf4022022-06-03 13:32:33 -0500535 // Remove the frame number from mSyncedFrameNumbers since we can get a release callback
536 // without getting a transaction committed if the buffer was dropped.
537 mSyncedFrameNumbers.erase(callbackId.framenumber);
chaviw0acd33a2021-11-02 11:55:37 -0500538}
539
Chavi Weingarten70670e62023-02-22 17:36:40 +0000540static ui::Size getBufferSize(const BufferItem& item) {
541 uint32_t bufWidth = item.mGraphicBuffer->getWidth();
542 uint32_t bufHeight = item.mGraphicBuffer->getHeight();
543
544 // Take the buffer's orientation into account
545 if (item.mTransform & ui::Transform::ROT_90) {
546 std::swap(bufWidth, bufHeight);
547 }
548 return ui::Size(bufWidth, bufHeight);
549}
550
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000551status_t BLASTBufferQueue::acquireNextBufferLocked(
chaviwd7deef72021-10-06 11:53:40 -0500552 const std::optional<SurfaceComposerClient::Transaction*> transaction) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800553 // Check if we have frames available and we have not acquired the maximum number of buffers.
554 // Even with this check, the consumer can fail to acquire an additional buffer if the consumer
555 // has already acquired (mMaxAcquiredBuffers + 1) and the new buffer is not droppable. In this
556 // case mBufferItemConsumer->acquireBuffer will return with NO_BUFFER_AVAILABLE.
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000557 if (mNumFrameAvailable == 0) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800558 BQA_LOGV("Can't acquire next buffer. No available frames");
559 return BufferQueue::NO_BUFFER_AVAILABLE;
560 }
561
562 if (mNumAcquired >= (mMaxAcquiredBuffers + 2)) {
563 BQA_LOGV("Can't acquire next buffer. Already acquired max frames %d max:%d + 2",
564 mNumAcquired, mMaxAcquiredBuffers);
565 return BufferQueue::NO_BUFFER_AVAILABLE;
Valerie Haud3b90d22019-11-06 09:37:31 -0800566 }
567
Valerie Haua32c5522019-12-09 10:11:08 -0800568 if (mSurfaceControl == nullptr) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700569 BQA_LOGE("ERROR : surface control is null");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000570 return NAME_NOT_FOUND;
Valerie Haud3b90d22019-11-06 09:37:31 -0800571 }
572
Robert Carr78c25dd2019-08-15 14:10:33 -0700573 SurfaceComposerClient::Transaction localTransaction;
574 bool applyTransaction = true;
575 SurfaceComposerClient::Transaction* t = &localTransaction;
chaviwd7deef72021-10-06 11:53:40 -0500576 if (transaction) {
577 t = *transaction;
Robert Carr78c25dd2019-08-15 14:10:33 -0700578 applyTransaction = false;
579 }
580
Valerie Haua32c5522019-12-09 10:11:08 -0800581 BufferItem bufferItem;
Valerie Haud3b90d22019-11-06 09:37:31 -0800582
Vishnu Nairc6f89ee2020-12-11 14:27:32 -0800583 status_t status =
584 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800585 if (status == BufferQueue::NO_BUFFER_AVAILABLE) {
586 BQA_LOGV("Failed to acquire a buffer, err=NO_BUFFER_AVAILABLE");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000587 return status;
Vishnu Nair8b30dd12021-01-25 14:16:54 -0800588 } else if (status != OK) {
Vishnu Nairbf255772020-10-16 10:54:41 -0700589 BQA_LOGE("Failed to acquire a buffer, err=%s", statusToString(status).c_str());
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000590 return status;
Robert Carr78c25dd2019-08-15 14:10:33 -0700591 }
chaviw57ae4b22022-02-03 16:51:39 -0600592
Valerie Haua32c5522019-12-09 10:11:08 -0800593 auto buffer = bufferItem.mGraphicBuffer;
594 mNumFrameAvailable--;
Patrick Williamsac70bc52024-07-09 17:11:28 -0500595 updateDequeueShouldBlockLocked();
chaviw57ae4b22022-02-03 16:51:39 -0600596 BBQ_TRACE("frame=%" PRIu64, bufferItem.mFrameNumber);
Valerie Haua32c5522019-12-09 10:11:08 -0800597
598 if (buffer == nullptr) {
599 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Vishnu Nairbf255772020-10-16 10:54:41 -0700600 BQA_LOGE("Buffer was empty");
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000601 return BAD_VALUE;
Valerie Haua32c5522019-12-09 10:11:08 -0800602 }
603
Vishnu Nair670b3f72020-09-29 17:52:18 -0700604 if (rejectBuffer(bufferItem)) {
Vishnu Naira4fbca52021-07-07 16:52:34 -0700605 BQA_LOGE("rejecting buffer:active_size=%dx%d, requested_size=%dx%d "
Vishnu Nairea0de002020-11-17 17:42:37 -0800606 "buffer{size=%dx%d transform=%d}",
607 mSize.width, mSize.height, mRequestedSize.width, mRequestedSize.height,
608 buffer->getWidth(), buffer->getHeight(), bufferItem.mTransform);
609 mBufferItemConsumer->releaseBuffer(bufferItem, Fence::NO_FENCE);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000610 return acquireNextBufferLocked(transaction);
Vishnu Nair670b3f72020-09-29 17:52:18 -0700611 }
612
Valerie Haua32c5522019-12-09 10:11:08 -0800613 mNumAcquired++;
Patrick Williamsac70bc52024-07-09 17:11:28 -0500614 updateDequeueShouldBlockLocked();
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700615 mLastAcquiredFrameNumber = bufferItem.mFrameNumber;
616 ReleaseCallbackId releaseCallbackId(buffer->getId(), mLastAcquiredFrameNumber);
617 mSubmitted[releaseCallbackId] = bufferItem;
Robert Carr78c25dd2019-08-15 14:10:33 -0700618
Valerie Hau871d6352020-01-29 08:44:02 -0800619 bool needsDisconnect = false;
620 mBufferItemConsumer->getConnectionEvents(bufferItem.mFrameNumber, &needsDisconnect);
621
622 // if producer disconnected before, notify SurfaceFlinger
623 if (needsDisconnect) {
624 t->notifyProducerDisconnect(mSurfaceControl);
625 }
626
Robert Carr78c25dd2019-08-15 14:10:33 -0700627 // Ensure BLASTBufferQueue stays alive until we receive the transaction complete callback.
628 incStrong((void*)transactionCallbackThunk);
629
Chavi Weingarten70670e62023-02-22 17:36:40 +0000630 // Only update mSize for destination bounds if the incoming buffer matches the requested size.
631 // Otherwise, it could cause stretching since the destination bounds will update before the
632 // buffer with the new size is acquired.
Vishnu Nair5b5f6932023-04-12 16:28:19 -0700633 if (mRequestedSize == getBufferSize(bufferItem) ||
634 bufferItem.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
Chavi Weingarten70670e62023-02-22 17:36:40 +0000635 mSize = mRequestedSize;
636 }
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700637 Rect crop = computeCrop(bufferItem);
Chavi Weingartena5aedbd2021-04-09 13:37:33 +0000638 mLastBufferInfo.update(true /* hasBuffer */, bufferItem.mGraphicBuffer->getWidth(),
639 bufferItem.mGraphicBuffer->getHeight(), bufferItem.mTransform,
Vishnu Nair5cc9ac02021-04-19 13:23:38 -0700640 bufferItem.mScalingMode, crop);
Vishnu Nair53c936c2020-12-03 11:46:37 -0800641
Vishnu Nair1506b182021-02-22 14:35:15 -0800642 auto releaseBufferCallback =
643 std::bind(releaseBufferCallbackThunk, wp<BLASTBufferQueue>(this) /* callbackContext */,
chaviw69058fb2021-09-27 09:37:30 -0500644 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
chaviwba4320c2021-09-15 15:20:53 -0500645 sp<Fence> fence = bufferItem.mFence ? new Fence(bufferItem.mFence->dup()) : Fence::NO_FENCE;
Nergi Rahardi39f510f2024-05-23 15:16:54 +0900646
647 nsecs_t dequeueTime = -1;
648 {
649 std::lock_guard _lock{mTimestampMutex};
650 auto dequeueTimeIt = mDequeueTimestamps.find(buffer->getId());
651 if (dequeueTimeIt != mDequeueTimestamps.end()) {
652 dequeueTime = dequeueTimeIt->second;
653 mDequeueTimestamps.erase(dequeueTimeIt);
654 }
655 }
656
liulijuneb489f62022-10-17 22:02:14 +0800657 t->setBuffer(mSurfaceControl, buffer, fence, bufferItem.mFrameNumber, mProducerId,
Nergi Rahardi39f510f2024-05-23 15:16:54 +0900658 releaseBufferCallback, dequeueTime);
John Reck137069e2020-12-10 22:07:37 -0500659 t->setDataspace(mSurfaceControl, static_cast<ui::Dataspace>(bufferItem.mDataSpace));
660 t->setHdrMetadata(mSurfaceControl, bufferItem.mHdrMetadata);
661 t->setSurfaceDamageRegion(mSurfaceControl, bufferItem.mSurfaceDamage);
Robert Carr78c25dd2019-08-15 14:10:33 -0700662 t->addTransactionCompletedCallback(transactionCallbackThunk, static_cast<void*>(this));
chaviwf2dace72021-11-17 17:36:50 -0600663
chaviw42026162021-04-16 15:46:12 -0500664 mSurfaceControlsWithPendingCallback.push(mSurfaceControl);
Robert Carr78c25dd2019-08-15 14:10:33 -0700665
Vishnu Naird2aaab12022-02-10 14:49:09 -0800666 if (mUpdateDestinationFrame) {
667 t->setDestinationFrame(mSurfaceControl, Rect(mSize));
668 } else {
669 const bool ignoreDestinationFrame =
670 bufferItem.mScalingMode == NATIVE_WINDOW_SCALING_MODE_FREEZE;
671 t->setFlags(mSurfaceControl,
672 ignoreDestinationFrame ? layer_state_t::eIgnoreDestinationFrame : 0,
673 layer_state_t::eIgnoreDestinationFrame);
Vishnu Nair084514a2021-07-30 16:07:42 -0700674 }
Vishnu Nair6bdec7d2021-05-10 15:01:13 -0700675 t->setBufferCrop(mSurfaceControl, crop);
Valerie Haua32c5522019-12-09 10:11:08 -0800676 t->setTransform(mSurfaceControl, bufferItem.mTransform);
Valerie Hau2882e982020-01-23 13:33:10 -0800677 t->setTransformToDisplayInverse(mSurfaceControl, bufferItem.mTransformToDisplayInverse);
Vishnu Naird2aaab12022-02-10 14:49:09 -0800678 t->setAutoRefresh(mSurfaceControl, bufferItem.mAutoRefresh);
Ady Abrahamf0c56492020-12-17 18:04:15 -0800679 if (!bufferItem.mIsAutoTimestamp) {
680 t->setDesiredPresentTime(bufferItem.mTimestamp);
681 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700682
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800683 // Drop stale frame timeline infos
684 while (!mPendingFrameTimelines.empty() &&
685 mPendingFrameTimelines.front().first < bufferItem.mFrameNumber) {
686 ATRACE_FORMAT_INSTANT("dropping stale frameNumber: %" PRIu64 " vsyncId: %" PRId64,
687 mPendingFrameTimelines.front().first,
688 mPendingFrameTimelines.front().second.vsyncId);
689 mPendingFrameTimelines.pop();
690 }
691
692 if (!mPendingFrameTimelines.empty() &&
693 mPendingFrameTimelines.front().first == bufferItem.mFrameNumber) {
694 ATRACE_FORMAT_INSTANT("Transaction::setFrameTimelineInfo frameNumber: %" PRIu64
695 " vsyncId: %" PRId64,
696 bufferItem.mFrameNumber,
697 mPendingFrameTimelines.front().second.vsyncId);
698 t->setFrameTimelineInfo(mPendingFrameTimelines.front().second);
699 mPendingFrameTimelines.pop();
Jorim Jaggia3fe67b2020-12-01 00:24:33 +0100700 }
701
chaviw6a195272021-09-03 16:14:25 -0500702 mergePendingTransactions(t, bufferItem.mFrameNumber);
Robert Carr78c25dd2019-08-15 14:10:33 -0700703 if (applyTransaction) {
Robert Carr79dc06a2022-02-22 15:28:59 -0800704 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
705 t->setApplyToken(mApplyToken).apply(false, true);
706 mAppliedLastTransaction = true;
707 mLastAppliedFrameNumber = bufferItem.mFrameNumber;
708 } else {
709 t->setBufferHasBarrier(mSurfaceControl, mLastAppliedFrameNumber);
710 mAppliedLastTransaction = false;
Robert Carr78c25dd2019-08-15 14:10:33 -0700711 }
Vishnu Nairdab94092020-09-29 16:09:04 -0700712
chaviwd7deef72021-10-06 11:53:40 -0500713 BQA_LOGV("acquireNextBufferLocked size=%dx%d mFrameNumber=%" PRIu64
Vishnu Nair1506b182021-02-22 14:35:15 -0800714 " applyTransaction=%s mTimestamp=%" PRId64 "%s mPendingTransactions.size=%d"
Vishnu Naira4fbca52021-07-07 16:52:34 -0700715 " graphicBufferId=%" PRIu64 "%s transform=%d",
chaviw3277faf2021-05-19 16:45:23 -0500716 mSize.width, mSize.height, bufferItem.mFrameNumber, boolToString(applyTransaction),
Vishnu Nair1506b182021-02-22 14:35:15 -0800717 bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp ? "(auto)" : "",
Vishnu Nair4ba0c2e2021-06-24 11:27:17 -0700718 static_cast<uint32_t>(mPendingTransactions.size()), bufferItem.mGraphicBuffer->getId(),
Vishnu Naira4fbca52021-07-07 16:52:34 -0700719 bufferItem.mAutoRefresh ? " mAutoRefresh" : "", bufferItem.mTransform);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000720 return OK;
Robert Carr78c25dd2019-08-15 14:10:33 -0700721}
722
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800723Rect BLASTBufferQueue::computeCrop(const BufferItem& item) {
724 if (item.mScalingMode == NATIVE_WINDOW_SCALING_MODE_SCALE_CROP) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800725 return GLConsumer::scaleDownCrop(item.mCrop, mSize.width, mSize.height);
Valerie Hau45e4b3b2019-12-03 10:49:17 -0800726 }
727 return item.mCrop;
728}
729
chaviwd7deef72021-10-06 11:53:40 -0500730void BLASTBufferQueue::acquireAndReleaseBuffer() {
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000731 BBQ_TRACE();
chaviwd7deef72021-10-06 11:53:40 -0500732 BufferItem bufferItem;
chaviw6ebdf5f2021-10-14 11:57:22 -0500733 status_t status =
734 mBufferItemConsumer->acquireBuffer(&bufferItem, 0 /* expectedPresent */, false);
735 if (status != OK) {
736 BQA_LOGE("Failed to acquire a buffer in acquireAndReleaseBuffer, err=%s",
737 statusToString(status).c_str());
738 return;
739 }
chaviwd7deef72021-10-06 11:53:40 -0500740 mNumFrameAvailable--;
Patrick Williamsac70bc52024-07-09 17:11:28 -0500741 updateDequeueShouldBlockLocked();
chaviw6ebdf5f2021-10-14 11:57:22 -0500742 mBufferItemConsumer->releaseBuffer(bufferItem, bufferItem.mFence);
chaviwd7deef72021-10-06 11:53:40 -0500743}
744
Vishnu Nairaef1de92020-10-22 12:15:53 -0700745void BLASTBufferQueue::onFrameAvailable(const BufferItem& item) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000746 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
747 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
chaviwc1cf4022022-06-03 13:32:33 -0500748
Tianhao Yao4861b102022-02-03 20:18:35 +0000749 {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000750 UNIQUE_LOCK_WITH_ASSERTION(mMutex);
Chavi Weingartend00e0f72022-07-14 15:59:20 +0000751 BBQ_TRACE();
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000752 bool waitForTransactionCallback = !mSyncedFrameNumbers.empty();
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800753
Tianhao Yao4861b102022-02-03 20:18:35 +0000754 const bool syncTransactionSet = mTransactionReadyCallback != nullptr;
755 BQA_LOGV("onFrameAvailable-start syncTransactionSet=%s", boolToString(syncTransactionSet));
Valerie Haud3b90d22019-11-06 09:37:31 -0800756
Tianhao Yao4861b102022-02-03 20:18:35 +0000757 if (syncTransactionSet) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000758 // If we are going to re-use the same mSyncTransaction, release the buffer that may
759 // already be set in the Transaction. This is to allow us a free slot early to continue
760 // processing a new buffer.
761 if (!mAcquireSingleBuffer) {
762 auto bufferData = mSyncTransaction->getAndClearBuffer(mSurfaceControl);
763 if (bufferData) {
764 BQA_LOGD("Releasing previous buffer when syncing: framenumber=%" PRIu64,
765 bufferData->frameNumber);
766 releaseBuffer(bufferData->generateReleaseCallbackId(),
767 bufferData->acquireFence);
Tianhao Yao4861b102022-02-03 20:18:35 +0000768 }
769 }
chaviw0acd33a2021-11-02 11:55:37 -0500770
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000771 if (waitForTransactionCallback) {
772 // We are waiting on a previous sync's transaction callback so allow another sync
773 // transaction to proceed.
774 //
775 // We need to first flush out the transactions that were in between the two syncs.
776 // We do this by merging them into mSyncTransaction so any buffer merging will get
777 // a release callback invoked.
778 while (mNumFrameAvailable > 0) {
779 // flush out the shadow queue
780 acquireAndReleaseBuffer();
781 }
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800782 } else {
783 // Make sure the frame available count is 0 before proceeding with a sync to ensure
784 // the correct frame is used for the sync. The only way mNumFrameAvailable would be
785 // greater than 0 is if we already ran out of buffers previously. This means we
786 // need to flush the buffers before proceeding with the sync.
787 while (mNumFrameAvailable > 0) {
788 BQA_LOGD("waiting until no queued buffers");
789 mCallbackCV.wait(_lock);
790 }
chaviwd7deef72021-10-06 11:53:40 -0500791 }
792 }
793
Tianhao Yao4861b102022-02-03 20:18:35 +0000794 // add to shadow queue
Patrick Williamsac70bc52024-07-09 17:11:28 -0500795 mNumDequeued--;
Tianhao Yao4861b102022-02-03 20:18:35 +0000796 mNumFrameAvailable++;
Patrick Williamsac70bc52024-07-09 17:11:28 -0500797 updateDequeueShouldBlockLocked();
chaviwc1cf4022022-06-03 13:32:33 -0500798 if (waitForTransactionCallback && mNumFrameAvailable >= 2) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000799 acquireAndReleaseBuffer();
800 }
801 ATRACE_INT(mQueuedBufferTrace.c_str(),
802 mNumFrameAvailable + mNumAcquired - mPendingRelease.size());
803
804 BQA_LOGV("onFrameAvailable framenumber=%" PRIu64 " syncTransactionSet=%s",
805 item.mFrameNumber, boolToString(syncTransactionSet));
806
807 if (syncTransactionSet) {
Vishnu Nairb4b484a2023-01-20 10:00:18 -0800808 // Add to mSyncedFrameNumbers before waiting in case any buffers are released
809 // while waiting for a free buffer. The release and commit callback will try to
810 // acquire buffers if there are any available, but we don't want it to acquire
811 // in the case where a sync transaction wants the buffer.
812 mSyncedFrameNumbers.emplace(item.mFrameNumber);
Chavi Weingarten3a8f19b2022-12-27 22:00:24 +0000813 // If there's no available buffer and we're in a sync transaction, we need to wait
814 // instead of returning since we guarantee a buffer will be acquired for the sync.
815 while (acquireNextBufferLocked(mSyncTransaction) == BufferQueue::NO_BUFFER_AVAILABLE) {
816 BQA_LOGD("waiting for available buffer");
817 mCallbackCV.wait(_lock);
818 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000819
820 // Only need a commit callback when syncing to ensure the buffer that's synced has been
821 // sent to SF
822 incStrong((void*)transactionCommittedCallbackThunk);
823 mSyncTransaction->addTransactionCommittedCallback(transactionCommittedCallbackThunk,
824 static_cast<void*>(this));
Tianhao Yao4861b102022-02-03 20:18:35 +0000825 if (mAcquireSingleBuffer) {
826 prevCallback = mTransactionReadyCallback;
827 prevTransaction = mSyncTransaction;
828 mTransactionReadyCallback = nullptr;
829 mSyncTransaction = nullptr;
830 }
chaviwc1cf4022022-06-03 13:32:33 -0500831 } else if (!waitForTransactionCallback) {
Tianhao Yao4861b102022-02-03 20:18:35 +0000832 acquireNextBufferLocked(std::nullopt);
Valerie Hau0188adf2020-02-13 08:29:20 -0800833 }
834 }
Tianhao Yao4861b102022-02-03 20:18:35 +0000835 if (prevCallback) {
836 prevCallback(prevTransaction);
chaviwd7deef72021-10-06 11:53:40 -0500837 }
Valerie Haud3b90d22019-11-06 09:37:31 -0800838}
839
Vishnu Nairaef1de92020-10-22 12:15:53 -0700840void BLASTBufferQueue::onFrameReplaced(const BufferItem& item) {
841 BQA_LOGV("onFrameReplaced framenumber=%" PRIu64, item.mFrameNumber);
842 // Do nothing since we are not storing unacquired buffer items locally.
843}
844
Vishnu Nairadf632b2021-01-07 14:05:08 -0800845void BLASTBufferQueue::onFrameDequeued(const uint64_t bufferId) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000846 std::lock_guard _lock{mTimestampMutex};
Vishnu Nairadf632b2021-01-07 14:05:08 -0800847 mDequeueTimestamps[bufferId] = systemTime();
Patrick Williamsac70bc52024-07-09 17:11:28 -0500848 mNumDequeued++;
849}
Vishnu Nairadf632b2021-01-07 14:05:08 -0800850
851void BLASTBufferQueue::onFrameCancelled(const uint64_t bufferId) {
Patrick Williamsac70bc52024-07-09 17:11:28 -0500852 {
853 std::lock_guard _lock{mTimestampMutex};
854 mDequeueTimestamps.erase(bufferId);
855 }
856
857 {
858 std::lock_guard lock{mMutex};
859 mNumDequeued--;
860 updateDequeueShouldBlockLocked();
861 }
Patrick Williams8f715012024-07-24 15:31:03 -0500862
863 if (mBufferReleaseReader) {
864 mBufferReleaseReader->interruptBlockingRead();
865 }
Vishnu Nairadf632b2021-01-07 14:05:08 -0800866};
867
Chavi Weingartenc398c012023-04-12 17:26:02 +0000868bool BLASTBufferQueue::syncNextTransaction(
Tianhao Yao4861b102022-02-03 20:18:35 +0000869 std::function<void(SurfaceComposerClient::Transaction*)> callback,
870 bool acquireSingleBuffer) {
Chavi Weingartenc398c012023-04-12 17:26:02 +0000871 LOG_ALWAYS_FATAL_IF(!callback,
872 "BLASTBufferQueue: callback passed in to syncNextTransaction must not be "
873 "NULL");
chaviw3b4bdcf2022-03-17 09:27:03 -0500874
Chavi Weingartenc398c012023-04-12 17:26:02 +0000875 std::lock_guard _lock{mMutex};
876 BBQ_TRACE();
877 if (mTransactionReadyCallback) {
878 ALOGW("Attempting to overwrite transaction callback in syncNextTransaction");
879 return false;
Tianhao Yao4861b102022-02-03 20:18:35 +0000880 }
chaviw3b4bdcf2022-03-17 09:27:03 -0500881
Chavi Weingartenc398c012023-04-12 17:26:02 +0000882 mTransactionReadyCallback = callback;
883 mSyncTransaction = new SurfaceComposerClient::Transaction();
884 mAcquireSingleBuffer = acquireSingleBuffer;
885 return true;
Tianhao Yao4861b102022-02-03 20:18:35 +0000886}
887
888void BLASTBufferQueue::stopContinuousSyncTransaction() {
889 std::function<void(SurfaceComposerClient::Transaction*)> prevCallback = nullptr;
890 SurfaceComposerClient::Transaction* prevTransaction = nullptr;
891 {
892 std::lock_guard _lock{mMutex};
Chavi Weingartenc398c012023-04-12 17:26:02 +0000893 if (mAcquireSingleBuffer || !mTransactionReadyCallback) {
894 ALOGW("Attempting to stop continuous sync when none are active");
895 return;
Tianhao Yao4861b102022-02-03 20:18:35 +0000896 }
Chavi Weingartenc398c012023-04-12 17:26:02 +0000897
898 prevCallback = mTransactionReadyCallback;
899 prevTransaction = mSyncTransaction;
900
Tianhao Yao4861b102022-02-03 20:18:35 +0000901 mTransactionReadyCallback = nullptr;
902 mSyncTransaction = nullptr;
903 mAcquireSingleBuffer = true;
904 }
Chavi Weingartenc398c012023-04-12 17:26:02 +0000905
Tianhao Yao4861b102022-02-03 20:18:35 +0000906 if (prevCallback) {
907 prevCallback(prevTransaction);
908 }
Robert Carr78c25dd2019-08-15 14:10:33 -0700909}
910
Chavi Weingartenc398c012023-04-12 17:26:02 +0000911void BLASTBufferQueue::clearSyncTransaction() {
912 std::lock_guard _lock{mMutex};
913 if (!mAcquireSingleBuffer) {
914 ALOGW("Attempting to clear sync transaction when none are active");
915 return;
916 }
917
918 mTransactionReadyCallback = nullptr;
919 mSyncTransaction = nullptr;
920}
921
Vishnu Nairea0de002020-11-17 17:42:37 -0800922bool BLASTBufferQueue::rejectBuffer(const BufferItem& item) {
Vishnu Nair670b3f72020-09-29 17:52:18 -0700923 if (item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE) {
924 // Only reject buffers if scaling mode is freeze.
925 return false;
926 }
927
Chavi Weingarten70670e62023-02-22 17:36:40 +0000928 ui::Size bufferSize = getBufferSize(item);
Vishnu Nairea0de002020-11-17 17:42:37 -0800929 if (mRequestedSize != mSize && mRequestedSize == bufferSize) {
Vishnu Nairea0de002020-11-17 17:42:37 -0800930 return false;
931 }
Vishnu Naire1a42322020-10-02 17:42:04 -0700932
Vishnu Nair670b3f72020-09-29 17:52:18 -0700933 // reject buffers if the buffer size doesn't match.
Vishnu Nairea0de002020-11-17 17:42:37 -0800934 return mSize != bufferSize;
Vishnu Nair670b3f72020-09-29 17:52:18 -0700935}
Vishnu Nairbf255772020-10-16 10:54:41 -0700936
Patrick Williamsac70bc52024-07-09 17:11:28 -0500937void BLASTBufferQueue::updateDequeueShouldBlockLocked() {
938 int32_t buffersInUse = mNumDequeued + mNumFrameAvailable + mNumAcquired;
939 int32_t maxBufferCount = std::min(mMaxAcquiredBuffers + mMaxDequeuedBuffers, kMaxBufferCount);
940 bool bufferAvailable = buffersInUse < maxBufferCount;
941 // BLASTBufferQueueProducer should block until a buffer is released if
942 // (1) There are no free buffers available.
943 // (2) We're not in async mode. In async mode, BufferQueueProducer::dequeueBuffer returns
944 // WOULD_BLOCK instead of blocking when there are no free buffers.
945 // (3) We're not in shared buffer mode. In shared buffer mode, both the producer and consumer
946 // can access the same buffer simultaneously. BufferQueueProducer::dequeueBuffer returns
947 // the shared buffer immediately instead of blocking.
948 mDequeueShouldBlock = !(bufferAvailable || mAsyncMode || mSharedBufferMode);
949 ATRACE_INT("Dequeued", mNumDequeued);
950 ATRACE_INT("DequeueShouldBlock", mDequeueShouldBlock);
951}
952
Robert Carr05086b22020-10-13 18:22:51 -0700953class BBQSurface : public Surface {
Robert Carr9c006e02020-10-14 13:41:57 -0700954private:
Vishnu Nair95b6d512021-08-30 15:31:08 -0700955 std::mutex mMutex;
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000956 sp<BLASTBufferQueue> mBbq GUARDED_BY(mMutex);
957 bool mDestroyed GUARDED_BY(mMutex) = false;
Vishnu Nair95b6d512021-08-30 15:31:08 -0700958
Robert Carr05086b22020-10-13 18:22:51 -0700959public:
Vishnu Nair992496b2020-10-22 17:27:21 -0700960 BBQSurface(const sp<IGraphicBufferProducer>& igbp, bool controlledByApp,
961 const sp<IBinder>& scHandle, const sp<BLASTBufferQueue>& bbq)
962 : Surface(igbp, controlledByApp, scHandle), mBbq(bbq) {}
Robert Carr9c006e02020-10-14 13:41:57 -0700963
Robert Carr05086b22020-10-13 18:22:51 -0700964 void allocateBuffers() override {
965 uint32_t reqWidth = mReqWidth ? mReqWidth : mUserWidth;
966 uint32_t reqHeight = mReqHeight ? mReqHeight : mUserHeight;
967 auto gbp = getIGraphicBufferProducer();
968 std::thread ([reqWidth, reqHeight, gbp=getIGraphicBufferProducer(),
969 reqFormat=mReqFormat, reqUsage=mReqUsage] () {
970 gbp->allocateBuffers(reqWidth, reqHeight,
971 reqFormat, reqUsage);
972
973 }).detach();
974 }
Robert Carr9c006e02020-10-14 13:41:57 -0700975
Marin Shalamanovc5986772021-03-16 16:09:49 +0100976 status_t setFrameRate(float frameRate, int8_t compatibility,
977 int8_t changeFrameRateStrategy) override {
Ady Abraham6cdd3fd2023-09-07 18:45:58 -0700978 if (flags::bq_setframerate()) {
979 return Surface::setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
980 }
981
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000982 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700983 if (mDestroyed) {
984 return DEAD_OBJECT;
985 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100986 if (!ValidateFrameRate(frameRate, compatibility, changeFrameRateStrategy,
987 "BBQSurface::setFrameRate")) {
Robert Carr9c006e02020-10-14 13:41:57 -0700988 return BAD_VALUE;
989 }
Marin Shalamanovc5986772021-03-16 16:09:49 +0100990 return mBbq->setFrameRate(frameRate, compatibility, changeFrameRateStrategy);
Robert Carr9c006e02020-10-14 13:41:57 -0700991 }
Robert Carr9b611b72020-10-19 12:00:23 -0700992
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800993 status_t setFrameTimelineInfo(uint64_t frameNumber,
994 const FrameTimelineInfo& frameTimelineInfo) override {
Chavi Weingartene0237bb2023-02-06 21:48:32 +0000995 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -0700996 if (mDestroyed) {
997 return DEAD_OBJECT;
998 }
Ady Abrahamd6e409e2023-01-19 16:07:31 -0800999 return mBbq->setFrameTimelineInfo(frameNumber, frameTimelineInfo);
Robert Carr9b611b72020-10-19 12:00:23 -07001000 }
Vishnu Nair95b6d512021-08-30 15:31:08 -07001001
1002 void destroy() override {
1003 Surface::destroy();
1004
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001005 std::lock_guard _lock{mMutex};
Vishnu Nair95b6d512021-08-30 15:31:08 -07001006 mDestroyed = true;
1007 mBbq = nullptr;
1008 }
Robert Carr05086b22020-10-13 18:22:51 -07001009};
1010
Robert Carr9c006e02020-10-14 13:41:57 -07001011// TODO: Can we coalesce this with frame updates? Need to confirm
1012// no timing issues.
Marin Shalamanov46084422020-10-13 12:33:42 +02001013status_t BLASTBufferQueue::setFrameRate(float frameRate, int8_t compatibility,
1014 bool shouldBeSeamless) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001015 std::lock_guard _lock{mMutex};
Robert Carr9c006e02020-10-14 13:41:57 -07001016 SurfaceComposerClient::Transaction t;
1017
Marin Shalamanov46084422020-10-13 12:33:42 +02001018 return t.setFrameRate(mSurfaceControl, frameRate, compatibility, shouldBeSeamless).apply();
Robert Carr9c006e02020-10-14 13:41:57 -07001019}
1020
Ady Abrahamd6e409e2023-01-19 16:07:31 -08001021status_t BLASTBufferQueue::setFrameTimelineInfo(uint64_t frameNumber,
1022 const FrameTimelineInfo& frameTimelineInfo) {
1023 ATRACE_FORMAT("%s(%s) frameNumber: %" PRIu64 " vsyncId: %" PRId64, __func__, mName.c_str(),
1024 frameNumber, frameTimelineInfo.vsyncId);
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001025 std::lock_guard _lock{mMutex};
Ady Abrahamd6e409e2023-01-19 16:07:31 -08001026 mPendingFrameTimelines.push({frameNumber, frameTimelineInfo});
Jorim Jaggia3fe67b2020-12-01 00:24:33 +01001027 return OK;
Robert Carr9b611b72020-10-19 12:00:23 -07001028}
1029
Hongguang Chen621ec582021-02-16 15:42:35 -08001030void BLASTBufferQueue::setSidebandStream(const sp<NativeHandle>& stream) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001031 std::lock_guard _lock{mMutex};
Hongguang Chen621ec582021-02-16 15:42:35 -08001032 SurfaceComposerClient::Transaction t;
1033
1034 t.setSidebandStream(mSurfaceControl, stream).apply();
1035}
1036
Vishnu Nair992496b2020-10-22 17:27:21 -07001037sp<Surface> BLASTBufferQueue::getSurface(bool includeSurfaceControlHandle) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001038 std::lock_guard _lock{mMutex};
Vishnu Nair992496b2020-10-22 17:27:21 -07001039 sp<IBinder> scHandle = nullptr;
1040 if (includeSurfaceControlHandle && mSurfaceControl) {
1041 scHandle = mSurfaceControl->getHandle();
1042 }
1043 return new BBQSurface(mProducer, true, scHandle, this);
Robert Carr05086b22020-10-13 18:22:51 -07001044}
1045
Vishnu Nairc4a40c12020-12-23 09:14:32 -08001046void BLASTBufferQueue::mergeWithNextTransaction(SurfaceComposerClient::Transaction* t,
1047 uint64_t frameNumber) {
1048 std::lock_guard _lock{mMutex};
1049 if (mLastAcquiredFrameNumber >= frameNumber) {
1050 // Apply the transaction since we have already acquired the desired frame.
1051 t->apply();
1052 } else {
chaviwaad6cf52021-03-23 17:27:20 -05001053 mPendingTransactions.emplace_back(frameNumber, *t);
1054 // Clear the transaction so it can't be applied elsewhere.
1055 t->clear();
Vishnu Nairc4a40c12020-12-23 09:14:32 -08001056 }
1057}
1058
chaviw6a195272021-09-03 16:14:25 -05001059void BLASTBufferQueue::applyPendingTransactions(uint64_t frameNumber) {
1060 std::lock_guard _lock{mMutex};
1061
1062 SurfaceComposerClient::Transaction t;
1063 mergePendingTransactions(&t, frameNumber);
Robert Carr79dc06a2022-02-22 15:28:59 -08001064 // All transactions on our apply token are one-way. See comment on mAppliedLastTransaction
1065 t.setApplyToken(mApplyToken).apply(false, true);
chaviw6a195272021-09-03 16:14:25 -05001066}
1067
1068void BLASTBufferQueue::mergePendingTransactions(SurfaceComposerClient::Transaction* t,
1069 uint64_t frameNumber) {
1070 auto mergeTransaction =
1071 [&t, currentFrameNumber = frameNumber](
1072 std::tuple<uint64_t, SurfaceComposerClient::Transaction> pendingTransaction) {
1073 auto& [targetFrameNumber, transaction] = pendingTransaction;
1074 if (currentFrameNumber < targetFrameNumber) {
1075 return false;
1076 }
1077 t->merge(std::move(transaction));
1078 return true;
1079 };
1080
1081 mPendingTransactions.erase(std::remove_if(mPendingTransactions.begin(),
1082 mPendingTransactions.end(), mergeTransaction),
1083 mPendingTransactions.end());
1084}
1085
chaviwd84085a2022-02-08 11:07:04 -06001086SurfaceComposerClient::Transaction* BLASTBufferQueue::gatherPendingTransactions(
1087 uint64_t frameNumber) {
1088 std::lock_guard _lock{mMutex};
1089 SurfaceComposerClient::Transaction* t = new SurfaceComposerClient::Transaction();
1090 mergePendingTransactions(t, frameNumber);
1091 return t;
1092}
1093
Vishnu Nair89496122020-12-14 17:14:53 -08001094// Maintains a single worker thread per process that services a list of runnables.
1095class AsyncWorker : public Singleton<AsyncWorker> {
1096private:
1097 std::thread mThread;
1098 bool mDone = false;
1099 std::deque<std::function<void()>> mRunnables;
1100 std::mutex mMutex;
1101 std::condition_variable mCv;
1102 void run() {
1103 std::unique_lock<std::mutex> lock(mMutex);
1104 while (!mDone) {
Vishnu Nair89496122020-12-14 17:14:53 -08001105 while (!mRunnables.empty()) {
Vishnu Nair51e4dc82021-10-01 15:32:33 -07001106 std::deque<std::function<void()>> runnables = std::move(mRunnables);
1107 mRunnables.clear();
1108 lock.unlock();
1109 // Run outside the lock since the runnable might trigger another
1110 // post to the async worker.
1111 execute(runnables);
1112 lock.lock();
Vishnu Nair89496122020-12-14 17:14:53 -08001113 }
Wonsik Kim567533e2021-05-04 19:31:29 -07001114 mCv.wait(lock);
Vishnu Nair89496122020-12-14 17:14:53 -08001115 }
1116 }
1117
Vishnu Nair51e4dc82021-10-01 15:32:33 -07001118 void execute(std::deque<std::function<void()>>& runnables) {
1119 while (!runnables.empty()) {
1120 std::function<void()> runnable = runnables.front();
1121 runnables.pop_front();
1122 runnable();
1123 }
1124 }
1125
Vishnu Nair89496122020-12-14 17:14:53 -08001126public:
1127 AsyncWorker() : Singleton<AsyncWorker>() { mThread = std::thread(&AsyncWorker::run, this); }
1128
1129 ~AsyncWorker() {
1130 mDone = true;
1131 mCv.notify_all();
1132 if (mThread.joinable()) {
1133 mThread.join();
1134 }
1135 }
1136
1137 void post(std::function<void()> runnable) {
1138 std::unique_lock<std::mutex> lock(mMutex);
1139 mRunnables.emplace_back(std::move(runnable));
1140 mCv.notify_one();
1141 }
1142};
1143ANDROID_SINGLETON_STATIC_INSTANCE(AsyncWorker);
1144
1145// Asynchronously calls ProducerListener functions so we can emulate one way binder calls.
1146class AsyncProducerListener : public BnProducerListener {
1147private:
1148 const sp<IProducerListener> mListener;
1149
1150public:
1151 AsyncProducerListener(const sp<IProducerListener>& listener) : mListener(listener) {}
1152
1153 void onBufferReleased() override {
1154 AsyncWorker::getInstance().post([listener = mListener]() { listener->onBufferReleased(); });
1155 }
1156
1157 void onBuffersDiscarded(const std::vector<int32_t>& slots) override {
1158 AsyncWorker::getInstance().post(
1159 [listener = mListener, slots = slots]() { listener->onBuffersDiscarded(slots); });
1160 }
1161};
1162
1163// Extends the BufferQueueProducer to create a wrapper around the listener so the listener calls
1164// can be non-blocking when the producer is in the client process.
1165class BBQBufferQueueProducer : public BufferQueueProducer {
1166public:
Brian Lindahlc794b692023-01-31 15:42:47 -07001167 BBQBufferQueueProducer(const sp<BufferQueueCore>& core, wp<BLASTBufferQueue> bbq)
1168 : BufferQueueProducer(core, false /* consumerIsSurfaceFlinger*/),
1169 mBLASTBufferQueue(std::move(bbq)) {}
Vishnu Nair89496122020-12-14 17:14:53 -08001170
1171 status_t connect(const sp<IProducerListener>& listener, int api, bool producerControlledByApp,
1172 QueueBufferOutput* output) override {
1173 if (!listener) {
1174 return BufferQueueProducer::connect(listener, api, producerControlledByApp, output);
1175 }
1176
1177 return BufferQueueProducer::connect(new AsyncProducerListener(listener), api,
1178 producerControlledByApp, output);
1179 }
Vishnu Nair17dde612020-12-28 11:39:59 -08001180
Patrick Williamsac70bc52024-07-09 17:11:28 -05001181 status_t disconnect(int api, DisconnectMode mode) override {
1182 if (status_t status = BufferQueueProducer::disconnect(api, mode); status != OK) {
1183 return status;
1184 }
1185
1186 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1187 if (!bbq) {
1188 return OK;
1189 }
1190
1191 {
1192 std::lock_guard lock{bbq->mMutex};
1193 bbq->mNumDequeued = 0;
1194 bbq->mNumFrameAvailable = 0;
1195 bbq->mNumAcquired = 0;
1196 bbq->mSubmitted.clear();
1197 bbq->updateDequeueShouldBlockLocked();
1198 }
Patrick Williams8f715012024-07-24 15:31:03 -05001199
1200 if (bbq->mBufferReleaseReader) {
1201 bbq->mBufferReleaseReader->interruptBlockingRead();
1202 }
Patrick Williamsac70bc52024-07-09 17:11:28 -05001203
1204 return OK;
1205 }
1206
Brian Lindahlc794b692023-01-31 15:42:47 -07001207 // We want to resize the frame history when changing the size of the buffer queue
1208 status_t setMaxDequeuedBufferCount(int maxDequeuedBufferCount) override {
1209 int maxBufferCount;
1210 status_t status = BufferQueueProducer::setMaxDequeuedBufferCount(maxDequeuedBufferCount,
1211 &maxBufferCount);
Patrick Williamsac70bc52024-07-09 17:11:28 -05001212 if (status != OK) {
1213 return status;
Brian Lindahlc794b692023-01-31 15:42:47 -07001214 }
Patrick Williamsac70bc52024-07-09 17:11:28 -05001215
1216 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1217 if (!bbq) {
1218 return OK;
1219 }
1220
1221 {
1222 std::lock_guard lock{bbq->mMutex};
1223 bbq->mMaxDequeuedBuffers = maxDequeuedBufferCount;
1224 bbq->updateDequeueShouldBlockLocked();
1225 }
Patrick Williams8f715012024-07-24 15:31:03 -05001226
1227 if (bbq->mBufferReleaseReader) {
1228 bbq->mBufferReleaseReader->interruptBlockingRead();
1229 }
Patrick Williamsac70bc52024-07-09 17:11:28 -05001230
1231 size_t newFrameHistorySize = maxBufferCount + 2; // +2 because triple buffer rendering
1232 // optimize away resizing the frame history unless it will grow
1233 if (newFrameHistorySize > FrameEventHistory::INITIAL_MAX_FRAME_HISTORY) {
1234 ALOGV("increasing frame history size to %zu", newFrameHistorySize);
1235 bbq->resizeFrameEventHistory(newFrameHistorySize);
1236 }
1237
1238 return OK;
Brian Lindahlc794b692023-01-31 15:42:47 -07001239 }
1240
Vishnu Nair17dde612020-12-28 11:39:59 -08001241 int query(int what, int* value) override {
1242 if (what == NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER) {
1243 *value = 1;
1244 return NO_ERROR;
1245 }
1246 return BufferQueueProducer::query(what, value);
1247 }
Brian Lindahlc794b692023-01-31 15:42:47 -07001248
Patrick Williamsac70bc52024-07-09 17:11:28 -05001249 status_t setAsyncMode(bool asyncMode) override {
1250 if (status_t status = BufferQueueProducer::setAsyncMode(asyncMode); status != NO_ERROR) {
1251 return status;
1252 }
1253
1254 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1255 if (!bbq) {
1256 return NO_ERROR;
1257 }
1258
1259 {
1260 std::lock_guard lock{bbq->mMutex};
1261 bbq->mAsyncMode = asyncMode;
1262 bbq->updateDequeueShouldBlockLocked();
1263 }
1264
Patrick Williams8f715012024-07-24 15:31:03 -05001265 if (bbq->mBufferReleaseReader) {
1266 bbq->mBufferReleaseReader->interruptBlockingRead();
1267 }
Patrick Williamsac70bc52024-07-09 17:11:28 -05001268 return NO_ERROR;
1269 }
1270
1271 status_t setSharedBufferMode(bool sharedBufferMode) override {
1272 if (status_t status = BufferQueueProducer::setSharedBufferMode(sharedBufferMode);
1273 status != NO_ERROR) {
1274 return status;
1275 }
1276
1277 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1278 if (!bbq) {
1279 return NO_ERROR;
1280 }
1281
1282 {
1283 std::lock_guard lock{bbq->mMutex};
1284 bbq->mSharedBufferMode = sharedBufferMode;
1285 bbq->updateDequeueShouldBlockLocked();
1286 }
1287
Patrick Williams8f715012024-07-24 15:31:03 -05001288 if (bbq->mBufferReleaseReader) {
1289 bbq->mBufferReleaseReader->interruptBlockingRead();
1290 }
Patrick Williamsac70bc52024-07-09 17:11:28 -05001291 return NO_ERROR;
1292 }
1293
1294 status_t detachBuffer(int slot) override {
1295 if (status_t status = BufferQueueProducer::detachBuffer(slot); status != NO_ERROR) {
1296 return status;
1297 }
1298
1299 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1300 if (!bbq) {
1301 return NO_ERROR;
1302 }
1303
1304 {
1305 std::lock_guard lock{bbq->mMutex};
1306 bbq->mNumDequeued--;
1307 bbq->updateDequeueShouldBlockLocked();
1308 }
1309
Patrick Williams8f715012024-07-24 15:31:03 -05001310 if (bbq->mBufferReleaseReader) {
1311 bbq->mBufferReleaseReader->interruptBlockingRead();
1312 }
Patrick Williamsac70bc52024-07-09 17:11:28 -05001313 return NO_ERROR;
1314 }
1315
1316 // Override dequeueBuffer to block if there are no free buffers.
1317 //
1318 // Buffer releases are communicated via the BufferReleaseChannel. When dequeueBuffer determines
1319 // a free buffer is not available, it blocks on an epoll file descriptor. Epoll is configured to
1320 // detect messages on the BufferReleaseChannel's socket and an eventfd. The eventfd is signaled
1321 // whenever an event other than a buffer release occurs that may change the number of free
1322 // buffers. dequeueBuffer uses epoll in a similar manner as a condition variable by testing for
1323 // the availability of a free buffer in a loop, breaking the loop once a free buffer is
1324 // available.
1325 //
1326 // This is an optimization implemented to reduce thread scheduling delays in the previously
1327 // existing binder release callback. The binder buffer release callback is still used and there
1328 // are no guarantees around order between buffer releases via binder and the
1329 // BufferReleaseChannel. If we attempt to a release a buffer here that has already been released
1330 // via binder, the release is ignored.
1331 status_t dequeueBuffer(int* outSlot, sp<Fence>* outFence, uint32_t width, uint32_t height,
1332 PixelFormat format, uint64_t usage, uint64_t* outBufferAge,
1333 FrameEventHistoryDelta* outTimestamps) {
1334 sp<BLASTBufferQueue> bbq = mBLASTBufferQueue.promote();
1335 if (!bbq || !bbq->mBufferReleaseReader) {
1336 return BufferQueueProducer::dequeueBuffer(outSlot, outFence, width, height, format,
1337 usage, outBufferAge, outTimestamps);
1338 }
1339
1340 if (bbq->mDequeueShouldBlock) {
1341 ATRACE_FORMAT("waiting for free buffer");
1342 auto maxWaitTime = std::chrono::steady_clock::now() + 1s;
1343 do {
1344 auto timeout = std::chrono::duration_cast<std::chrono::milliseconds>(
1345 maxWaitTime - std::chrono::steady_clock::now());
1346 if (timeout <= 0ms) {
1347 break;
1348 }
1349
1350 ReleaseCallbackId releaseCallbackId;
1351 sp<Fence> releaseFence;
1352 status_t status = bbq->mBufferReleaseReader->readBlocking(releaseCallbackId,
1353 releaseFence, timeout);
1354 if (status == WOULD_BLOCK) {
1355 // readBlocking was interrupted. The loop will test if we have a free buffer.
1356 continue;
1357 }
1358
1359 if (status != OK) {
1360 // An error occurred or readBlocking timed out.
1361 break;
1362 }
1363
1364 std::lock_guard lock{bbq->mMutex};
1365 bbq->releaseBufferCallbackLocked(releaseCallbackId, releaseFence, std::nullopt,
1366 false);
1367 } while (bbq->mDequeueShouldBlock);
1368 }
1369
1370 return BufferQueueProducer::dequeueBuffer(outSlot, outFence, width, height, format, usage,
1371 outBufferAge, outTimestamps);
1372 }
1373
Brian Lindahlc794b692023-01-31 15:42:47 -07001374private:
1375 const wp<BLASTBufferQueue> mBLASTBufferQueue;
Vishnu Nair89496122020-12-14 17:14:53 -08001376};
1377
1378// Similar to BufferQueue::createBufferQueue but creates an adapter specific bufferqueue producer.
1379// This BQP allows invoking client specified ProducerListeners and invoke them asynchronously,
1380// emulating one way binder call behavior. Without this, if the listener calls back into the queue,
1381// we can deadlock.
1382void BLASTBufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer,
1383 sp<IGraphicBufferConsumer>* outConsumer) {
1384 LOG_ALWAYS_FATAL_IF(outProducer == nullptr, "BLASTBufferQueue: outProducer must not be NULL");
1385 LOG_ALWAYS_FATAL_IF(outConsumer == nullptr, "BLASTBufferQueue: outConsumer must not be NULL");
1386
1387 sp<BufferQueueCore> core(new BufferQueueCore());
1388 LOG_ALWAYS_FATAL_IF(core == nullptr, "BLASTBufferQueue: failed to create BufferQueueCore");
1389
Brian Lindahlc794b692023-01-31 15:42:47 -07001390 sp<IGraphicBufferProducer> producer(new BBQBufferQueueProducer(core, this));
Vishnu Nair89496122020-12-14 17:14:53 -08001391 LOG_ALWAYS_FATAL_IF(producer == nullptr,
1392 "BLASTBufferQueue: failed to create BBQBufferQueueProducer");
1393
Vishnu Nair8b30dd12021-01-25 14:16:54 -08001394 sp<BufferQueueConsumer> consumer(new BufferQueueConsumer(core));
1395 consumer->setAllowExtraAcquire(true);
Vishnu Nair89496122020-12-14 17:14:53 -08001396 LOG_ALWAYS_FATAL_IF(consumer == nullptr,
1397 "BLASTBufferQueue: failed to create BufferQueueConsumer");
1398
1399 *outProducer = producer;
1400 *outConsumer = consumer;
1401}
1402
Patrick Williamsac70bc52024-07-09 17:11:28 -05001403void BLASTBufferQueue::onFirstRef() {
1404 // safe default, most producers are expected to override this
1405 //
1406 // This is done in onFirstRef instead of BLASTBufferQueue's constructor because
1407 // BBQBufferQueueProducer::setMaxDequeuedBufferCount promotes a weak pointer to BLASTBufferQueue
1408 // to a strong pointer. If this is done in the constructor, then when the strong pointer goes
1409 // out of scope, it's the last reference so BLASTBufferQueue is deleted.
1410 mProducer->setMaxDequeuedBufferCount(2);
1411}
1412
Brian Lindahlc794b692023-01-31 15:42:47 -07001413void BLASTBufferQueue::resizeFrameEventHistory(size_t newSize) {
1414 // This can be null during creation of the buffer queue, but resizing won't do anything at that
1415 // point in time, so just ignore. This can go away once the class relationships and lifetimes of
1416 // objects are cleaned up with a major refactor of BufferQueue as a whole.
1417 if (mBufferItemConsumer != nullptr) {
1418 std::unique_lock _lock{mMutex};
1419 mBufferItemConsumer->resizeFrameEventHistory(newSize);
1420 }
1421}
1422
chaviw497e81c2021-02-04 17:09:47 -08001423PixelFormat BLASTBufferQueue::convertBufferFormat(PixelFormat& format) {
1424 PixelFormat convertedFormat = format;
1425 switch (format) {
1426 case PIXEL_FORMAT_TRANSPARENT:
1427 case PIXEL_FORMAT_TRANSLUCENT:
1428 convertedFormat = PIXEL_FORMAT_RGBA_8888;
1429 break;
1430 case PIXEL_FORMAT_OPAQUE:
1431 convertedFormat = PIXEL_FORMAT_RGBX_8888;
1432 break;
1433 }
1434 return convertedFormat;
1435}
1436
Robert Carr82d07c92021-05-10 11:36:43 -07001437uint32_t BLASTBufferQueue::getLastTransformHint() const {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001438 std::lock_guard _lock{mMutex};
Robert Carr82d07c92021-05-10 11:36:43 -07001439 if (mSurfaceControl != nullptr) {
1440 return mSurfaceControl->getTransformHint();
1441 } else {
1442 return 0;
1443 }
1444}
1445
chaviw0b020f82021-08-20 12:00:47 -05001446uint64_t BLASTBufferQueue::getLastAcquiredFrameNum() {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001447 std::lock_guard _lock{mMutex};
chaviw0b020f82021-08-20 12:00:47 -05001448 return mLastAcquiredFrameNumber;
1449}
1450
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001451bool BLASTBufferQueue::isSameSurfaceControl(const sp<SurfaceControl>& surfaceControl) const {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001452 std::lock_guard _lock{mMutex};
Vishnu Nair1e8bf102021-12-28 14:36:59 -08001453 return SurfaceControl::isSameSurface(mSurfaceControl, surfaceControl);
1454}
1455
Patrick Williamsf1e5df12022-10-17 21:37:42 +00001456void BLASTBufferQueue::setTransactionHangCallback(
1457 std::function<void(const std::string&)> callback) {
Chavi Weingartene0237bb2023-02-06 21:48:32 +00001458 std::lock_guard _lock{mMutex};
Robert Carr4c1b6462021-12-21 10:30:50 -08001459 mTransactionHangCallback = callback;
1460}
1461
Patrick Williamsac70bc52024-07-09 17:11:28 -05001462BLASTBufferQueue::BufferReleaseReader::BufferReleaseReader(
1463 std::unique_ptr<gui::BufferReleaseChannel::ConsumerEndpoint> endpoint)
1464 : mEndpoint(std::move(endpoint)) {
1465 mEpollFd = android::base::unique_fd(epoll_create1(0));
1466 if (!mEpollFd.ok()) {
1467 ALOGE("Failed to create buffer release epoll file descriptor. errno=%d message='%s'", errno,
1468 strerror(errno));
1469 }
1470
1471 epoll_event event;
1472 event.events = EPOLLIN;
1473 event.data.fd = mEndpoint->getFd();
1474 if (epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mEndpoint->getFd(), &event) == -1) {
1475 ALOGE("Failed to register buffer release consumer file descriptor with epoll. errno=%d "
1476 "message='%s'",
1477 errno, strerror(errno));
1478 }
1479
1480 mEventFd = android::base::unique_fd(eventfd(0, EFD_NONBLOCK));
1481 event.data.fd = mEventFd.get();
1482 if (epoll_ctl(mEpollFd.get(), EPOLL_CTL_ADD, mEventFd.get(), &event) == -1) {
1483 ALOGE("Failed to register buffer release eventfd with epoll. errno=%d message='%s'", errno,
1484 strerror(errno));
1485 }
1486}
1487
1488status_t BLASTBufferQueue::BufferReleaseReader::readNonBlocking(ReleaseCallbackId& outId,
1489 sp<Fence>& outFence) {
1490 std::lock_guard lock{mMutex};
1491 return mEndpoint->readReleaseFence(outId, outFence);
1492}
1493
1494status_t BLASTBufferQueue::BufferReleaseReader::readBlocking(ReleaseCallbackId& outId,
1495 sp<Fence>& outFence,
1496 std::chrono::milliseconds timeout) {
1497 epoll_event event;
1498 int eventCount = epoll_wait(mEpollFd.get(), &event, 1 /* maxevents */, timeout.count());
1499
1500 if (eventCount == -1) {
1501 ALOGE("epoll_wait error while waiting for buffer release. errno=%d message='%s'", errno,
1502 strerror(errno));
1503 return UNKNOWN_ERROR;
1504 }
1505
1506 if (eventCount == 0) {
1507 return TIMED_OUT;
1508 }
1509
1510 if (event.data.fd == mEventFd.get()) {
1511 uint64_t value;
1512 if (read(mEventFd.get(), &value, sizeof(uint64_t)) == -1 && errno != EWOULDBLOCK) {
1513 ALOGE("error while reading from eventfd. errno=%d message='%s'", errno,
1514 strerror(errno));
1515 }
1516 return WOULD_BLOCK;
1517 }
1518
1519 std::lock_guard lock{mMutex};
1520 return mEndpoint->readReleaseFence(outId, outFence);
1521}
1522
1523void BLASTBufferQueue::BufferReleaseReader::interruptBlockingRead() {
1524 uint64_t value = 1;
1525 if (write(mEventFd.get(), &value, sizeof(uint64_t)) == -1) {
1526 ALOGE("failed to notify dequeue event. errno=%d message='%s'", errno, strerror(errno));
1527 }
1528}
1529
Robert Carr78c25dd2019-08-15 14:10:33 -07001530} // namespace android