John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 17 | #include "RenderThread.h" |
| 18 | |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 19 | #include "../renderstate/RenderState.h" |
Derek Sollenberger | c4fbada | 2016-11-07 16:05:41 -0500 | [diff] [blame] | 20 | #include "../pipeline/skia/SkiaOpenGLReadback.h" |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 21 | #include "CanvasContext.h" |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 22 | #include "EglManager.h" |
Derek Sollenberger | c4fbada | 2016-11-07 16:05:41 -0500 | [diff] [blame] | 23 | #include "OpenGLReadback.h" |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 24 | #include "RenderProxy.h" |
Derek Sollenberger | 0e3cba3 | 2016-11-09 11:58:36 -0500 | [diff] [blame] | 25 | #include "VulkanManager.h" |
John Reck | 12efa55 | 2016-11-15 10:22:01 -0800 | [diff] [blame] | 26 | #include "utils/FatVector.h" |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 27 | |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 28 | #include <gui/DisplayEventReceiver.h> |
John Reck | b36016c | 2015-03-11 08:50:53 -0700 | [diff] [blame] | 29 | #include <gui/ISurfaceComposer.h> |
| 30 | #include <gui/SurfaceComposerClient.h> |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 31 | #include <sys/resource.h> |
John Reck | cba287b | 2015-11-10 12:52:44 -0800 | [diff] [blame] | 32 | #include <utils/Condition.h> |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 33 | #include <utils/Log.h> |
John Reck | cba287b | 2015-11-10 12:52:44 -0800 | [diff] [blame] | 34 | #include <utils/Mutex.h> |
Chris Craik | 65fe5ee | 2015-01-26 18:06:29 -0800 | [diff] [blame] | 35 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 36 | namespace android { |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 37 | namespace uirenderer { |
| 38 | namespace renderthread { |
| 39 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 40 | // Number of events to read at a time from the DisplayEventReceiver pipe. |
| 41 | // The value should be large enough that we can quickly drain the pipe |
| 42 | // using just a few large reads. |
| 43 | static const size_t EVENT_BUFFER_SIZE = 100; |
| 44 | |
| 45 | // Slight delay to give the UI time to push us a new frame before we replay |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 46 | static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 47 | |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 48 | TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {} |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 49 | |
| 50 | RenderTask* TaskQueue::next() { |
| 51 | RenderTask* ret = mHead; |
| 52 | if (ret) { |
| 53 | mHead = ret->mNext; |
| 54 | if (!mHead) { |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 55 | mTail = nullptr; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 56 | } |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 57 | ret->mNext = nullptr; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 58 | } |
| 59 | return ret; |
| 60 | } |
| 61 | |
| 62 | RenderTask* TaskQueue::peek() { |
| 63 | return mHead; |
| 64 | } |
| 65 | |
| 66 | void TaskQueue::queue(RenderTask* task) { |
| 67 | // Since the RenderTask itself forms the linked list it is not allowed |
| 68 | // to have the same task queued twice |
| 69 | LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!"); |
| 70 | if (mTail) { |
| 71 | // Fast path if we can just append |
| 72 | if (mTail->mRunAt <= task->mRunAt) { |
| 73 | mTail->mNext = task; |
| 74 | mTail = task; |
| 75 | } else { |
| 76 | // Need to find the proper insertion point |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 77 | RenderTask* previous = nullptr; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 78 | RenderTask* next = mHead; |
| 79 | while (next && next->mRunAt <= task->mRunAt) { |
| 80 | previous = next; |
| 81 | next = next->mNext; |
| 82 | } |
| 83 | if (!previous) { |
| 84 | task->mNext = mHead; |
| 85 | mHead = task; |
| 86 | } else { |
| 87 | previous->mNext = task; |
| 88 | if (next) { |
| 89 | task->mNext = next; |
| 90 | } else { |
| 91 | mTail = task; |
| 92 | } |
| 93 | } |
| 94 | } |
| 95 | } else { |
| 96 | mTail = mHead = task; |
| 97 | } |
| 98 | } |
| 99 | |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 100 | void TaskQueue::queueAtFront(RenderTask* task) { |
John Reck | 2f94448 | 2017-03-27 14:34:28 -0700 | [diff] [blame^] | 101 | LOG_ALWAYS_FATAL_IF(task->mNext || mHead == task, "Task is already in the queue!"); |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 102 | if (mTail) { |
| 103 | task->mNext = mHead; |
| 104 | mHead = task; |
| 105 | } else { |
| 106 | mTail = mHead = task; |
| 107 | } |
| 108 | } |
| 109 | |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 110 | void TaskQueue::remove(RenderTask* task) { |
| 111 | // TaskQueue is strict here to enforce that users are keeping track of |
| 112 | // their RenderTasks due to how their memory is managed |
| 113 | LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task, |
| 114 | "Cannot remove a task that isn't in the queue!"); |
| 115 | |
| 116 | // If task is the head we can just call next() to pop it off |
| 117 | // Otherwise we need to scan through to find the task before it |
| 118 | if (peek() == task) { |
| 119 | next(); |
| 120 | } else { |
| 121 | RenderTask* previous = mHead; |
| 122 | while (previous->mNext != task) { |
| 123 | previous = previous->mNext; |
| 124 | } |
| 125 | previous->mNext = task->mNext; |
| 126 | if (mTail == task) { |
| 127 | mTail = previous; |
| 128 | } |
| 129 | } |
| 130 | } |
| 131 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 132 | class DispatchFrameCallbacks : public RenderTask { |
| 133 | private: |
| 134 | RenderThread* mRenderThread; |
| 135 | public: |
Chih-Hung Hsieh | c6baf56 | 2016-04-27 11:29:23 -0700 | [diff] [blame] | 136 | explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {} |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 137 | |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 138 | virtual void run() override { |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 139 | mRenderThread->dispatchFrameCallbacks(); |
| 140 | } |
| 141 | }; |
| 142 | |
John Reck | 6b50780 | 2015-11-03 10:09:59 -0800 | [diff] [blame] | 143 | static bool gHasRenderThreadInstance = false; |
| 144 | |
| 145 | bool RenderThread::hasInstance() { |
| 146 | return gHasRenderThreadInstance; |
| 147 | } |
| 148 | |
| 149 | RenderThread& RenderThread::getInstance() { |
| 150 | // This is a pointer because otherwise __cxa_finalize |
| 151 | // will try to delete it like a Good Citizen but that causes us to crash |
| 152 | // because we don't want to delete the RenderThread normally. |
| 153 | static RenderThread* sInstance = new RenderThread(); |
| 154 | gHasRenderThreadInstance = true; |
| 155 | return *sInstance; |
| 156 | } |
| 157 | |
| 158 | RenderThread::RenderThread() : Thread(true) |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 159 | , mNextWakeup(LLONG_MAX) |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 160 | , mDisplayEventReceiver(nullptr) |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 161 | , mVsyncRequested(false) |
| 162 | , mFrameCallbackTaskPending(false) |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 163 | , mFrameCallbackTask(nullptr) |
| 164 | , mRenderState(nullptr) |
Derek Sollenberger | 0e3cba3 | 2016-11-09 11:58:36 -0500 | [diff] [blame] | 165 | , mEglManager(nullptr) |
| 166 | , mVkManager(nullptr) { |
Chris Craik | 2507c34 | 2015-05-04 14:36:49 -0700 | [diff] [blame] | 167 | Properties::load(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 168 | mFrameCallbackTask = new DispatchFrameCallbacks(this); |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 169 | mLooper = new Looper(false); |
| 170 | run("RenderThread"); |
| 171 | } |
| 172 | |
| 173 | RenderThread::~RenderThread() { |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 174 | LOG_ALWAYS_FATAL("Can't destroy the render thread"); |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 175 | } |
| 176 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 177 | void RenderThread::initializeDisplayEventReceiver() { |
| 178 | LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?"); |
| 179 | mDisplayEventReceiver = new DisplayEventReceiver(); |
| 180 | status_t status = mDisplayEventReceiver->initCheck(); |
| 181 | LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver " |
| 182 | "failed with status: %d", status); |
| 183 | |
| 184 | // Register the FD |
| 185 | mLooper->addFd(mDisplayEventReceiver->getFd(), 0, |
| 186 | Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this); |
| 187 | } |
| 188 | |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 189 | void RenderThread::initThreadLocals() { |
John Reck | b36016c | 2015-03-11 08:50:53 -0700 | [diff] [blame] | 190 | sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay( |
| 191 | ISurfaceComposer::eDisplayIdMain)); |
| 192 | status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo); |
| 193 | LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n"); |
| 194 | nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps); |
| 195 | mTimeLord.setFrameInterval(frameIntervalNanos); |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 196 | initializeDisplayEventReceiver(); |
| 197 | mEglManager = new EglManager(*this); |
John Reck | 0e89e2b | 2014-10-31 14:49:06 -0700 | [diff] [blame] | 198 | mRenderState = new RenderState(*this); |
John Reck | 2d5b8d7 | 2016-07-28 15:36:11 -0700 | [diff] [blame] | 199 | mJankTracker = new JankTracker(mDisplayInfo); |
Derek Sollenberger | 0e3cba3 | 2016-11-09 11:58:36 -0500 | [diff] [blame] | 200 | mVkManager = new VulkanManager(*this); |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 201 | } |
| 202 | |
Derek Sollenberger | c4fbada | 2016-11-07 16:05:41 -0500 | [diff] [blame] | 203 | Readback& RenderThread::readback() { |
| 204 | |
| 205 | if (!mReadback) { |
| 206 | auto renderType = Properties::getRenderPipelineType(); |
| 207 | switch (renderType) { |
| 208 | case RenderPipelineType::OpenGL: |
| 209 | mReadback = new OpenGLReadbackImpl(*this); |
| 210 | break; |
| 211 | case RenderPipelineType::SkiaGL: |
| 212 | case RenderPipelineType::SkiaVulkan: |
| 213 | // It works to use the OpenGL pipeline for Vulkan but this is not |
| 214 | // ideal as it causes us to create an OpenGL context in addition |
| 215 | // to the Vulkan one. |
| 216 | mReadback = new skiapipeline::SkiaOpenGLReadback(*this); |
| 217 | break; |
| 218 | default: |
| 219 | LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType); |
| 220 | break; |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | return *mReadback; |
| 225 | } |
| 226 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 227 | int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) { |
| 228 | if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) { |
| 229 | ALOGE("Display event receiver pipe was closed or an error occurred. " |
| 230 | "events=0x%x", events); |
| 231 | return 0; // remove the callback |
| 232 | } |
| 233 | |
| 234 | if (!(events & Looper::EVENT_INPUT)) { |
| 235 | ALOGW("Received spurious callback for unhandled poll event. " |
| 236 | "events=0x%x", events); |
| 237 | return 1; // keep the callback |
| 238 | } |
| 239 | |
| 240 | reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue(); |
| 241 | |
| 242 | return 1; // keep the callback |
| 243 | } |
| 244 | |
| 245 | static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) { |
| 246 | DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE]; |
| 247 | nsecs_t latest = 0; |
| 248 | ssize_t n; |
| 249 | while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) { |
| 250 | for (ssize_t i = 0; i < n; i++) { |
| 251 | const DisplayEventReceiver::Event& ev = buf[i]; |
| 252 | switch (ev.header.type) { |
| 253 | case DisplayEventReceiver::DISPLAY_EVENT_VSYNC: |
| 254 | latest = ev.header.timestamp; |
| 255 | break; |
| 256 | } |
| 257 | } |
| 258 | } |
| 259 | if (n < 0) { |
| 260 | ALOGW("Failed to get events from display event receiver, status=%d", status_t(n)); |
| 261 | } |
| 262 | return latest; |
| 263 | } |
| 264 | |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 265 | void RenderThread::drainDisplayEventQueue() { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 266 | ATRACE_CALL(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 267 | nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver); |
| 268 | if (vsyncEvent > 0) { |
| 269 | mVsyncRequested = false; |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 270 | if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 271 | ATRACE_NAME("queue mFrameCallbackTask"); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 272 | mFrameCallbackTaskPending = true; |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 273 | nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY); |
| 274 | queueAt(mFrameCallbackTask, runAt); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 275 | } |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | void RenderThread::dispatchFrameCallbacks() { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 280 | ATRACE_CALL(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 281 | mFrameCallbackTaskPending = false; |
| 282 | |
| 283 | std::set<IFrameCallback*> callbacks; |
| 284 | mFrameCallbacks.swap(callbacks); |
| 285 | |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 286 | if (callbacks.size()) { |
| 287 | // Assume one of them will probably animate again so preemptively |
| 288 | // request the next vsync in case it occurs mid-frame |
| 289 | requestVsync(); |
| 290 | for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) { |
| 291 | (*it)->doFrame(); |
| 292 | } |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 293 | } |
| 294 | } |
| 295 | |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 296 | void RenderThread::requestVsync() { |
| 297 | if (!mVsyncRequested) { |
| 298 | mVsyncRequested = true; |
| 299 | status_t status = mDisplayEventReceiver->requestNextVsync(); |
| 300 | LOG_ALWAYS_FATAL_IF(status != NO_ERROR, |
| 301 | "requestNextVsync failed with status: %d", status); |
| 302 | } |
| 303 | } |
| 304 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 305 | bool RenderThread::threadLoop() { |
John Reck | 21be43e | 2014-08-14 10:25:16 -0700 | [diff] [blame] | 306 | setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY); |
John Reck | 3b20251 | 2014-06-23 13:13:08 -0700 | [diff] [blame] | 307 | initThreadLocals(); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 308 | |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 309 | int timeoutMillis = -1; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 310 | for (;;) { |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 311 | int result = mLooper->pollOnce(timeoutMillis); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 312 | LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR, |
| 313 | "RenderThread Looper POLL_ERROR!"); |
| 314 | |
| 315 | nsecs_t nextWakeup; |
John Reck | 12efa55 | 2016-11-15 10:22:01 -0800 | [diff] [blame] | 316 | { |
| 317 | FatVector<RenderTask*, 10> workQueue; |
| 318 | // Process our queue, if we have anything. By first acquiring |
| 319 | // all the pending events then processing them we avoid vsync |
| 320 | // starvation if more tasks are queued while we are processing tasks. |
| 321 | while (RenderTask* task = nextTask(&nextWakeup)) { |
| 322 | workQueue.push_back(task); |
| 323 | } |
| 324 | for (auto task : workQueue) { |
| 325 | task->run(); |
| 326 | // task may have deleted itself, do not reference it again |
| 327 | } |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 328 | } |
| 329 | if (nextWakeup == LLONG_MAX) { |
| 330 | timeoutMillis = -1; |
| 331 | } else { |
John Reck | a6260b8 | 2014-01-29 18:31:51 -0800 | [diff] [blame] | 332 | nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC); |
| 333 | timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 334 | if (timeoutMillis < 0) { |
| 335 | timeoutMillis = 0; |
| 336 | } |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 337 | } |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 338 | |
| 339 | if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) { |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 340 | drainDisplayEventQueue(); |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 341 | mFrameCallbacks.insert( |
| 342 | mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end()); |
| 343 | mPendingRegistrationFrameCallbacks.clear(); |
| 344 | requestVsync(); |
| 345 | } |
John Reck | a22c9b2 | 2015-01-14 10:40:15 -0800 | [diff] [blame] | 346 | |
| 347 | if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) { |
| 348 | // TODO: Clean this up. This is working around an issue where a combination |
| 349 | // of bad timing and slow drawing can result in dropping a stale vsync |
| 350 | // on the floor (correct!) but fails to schedule to listen for the |
| 351 | // next vsync (oops), so none of the callbacks are run. |
| 352 | requestVsync(); |
| 353 | } |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 354 | } |
| 355 | |
| 356 | return false; |
| 357 | } |
| 358 | |
| 359 | void RenderThread::queue(RenderTask* task) { |
| 360 | AutoMutex _lock(mLock); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 361 | mQueue.queue(task); |
| 362 | if (mNextWakeup && task->mRunAt < mNextWakeup) { |
| 363 | mNextWakeup = 0; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 364 | mLooper->wake(); |
| 365 | } |
| 366 | } |
| 367 | |
Chris Craik | 0a24b14 | 2015-10-19 17:10:19 -0700 | [diff] [blame] | 368 | void RenderThread::queueAndWait(RenderTask* task) { |
John Reck | cba287b | 2015-11-10 12:52:44 -0800 | [diff] [blame] | 369 | // These need to be local to the thread to avoid the Condition |
| 370 | // signaling the wrong thread. The easiest way to achieve that is to just |
| 371 | // make this on the stack, although that has a slight cost to it |
| 372 | Mutex mutex; |
| 373 | Condition condition; |
| 374 | SignalingRenderTask syncTask(task, &mutex, &condition); |
| 375 | |
| 376 | AutoMutex _lock(mutex); |
Chris Craik | 0a24b14 | 2015-10-19 17:10:19 -0700 | [diff] [blame] | 377 | queue(&syncTask); |
Tom Cherry | 298a146 | 2017-02-28 14:07:09 -0800 | [diff] [blame] | 378 | while (!syncTask.hasRun()) { |
| 379 | condition.wait(mutex); |
| 380 | } |
Chris Craik | 0a24b14 | 2015-10-19 17:10:19 -0700 | [diff] [blame] | 381 | } |
| 382 | |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 383 | void RenderThread::queueAtFront(RenderTask* task) { |
| 384 | AutoMutex _lock(mLock); |
| 385 | mQueue.queueAtFront(task); |
| 386 | mLooper->wake(); |
| 387 | } |
| 388 | |
John Reck | a733f89 | 2014-12-19 11:37:21 -0800 | [diff] [blame] | 389 | void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) { |
| 390 | task->mRunAt = runAtNs; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 391 | queue(task); |
| 392 | } |
| 393 | |
| 394 | void RenderThread::remove(RenderTask* task) { |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 395 | AutoMutex _lock(mLock); |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 396 | mQueue.remove(task); |
| 397 | } |
| 398 | |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 399 | void RenderThread::postFrameCallback(IFrameCallback* callback) { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 400 | mPendingRegistrationFrameCallbacks.insert(callback); |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 401 | } |
| 402 | |
John Reck | 01a5ea3 | 2014-12-03 13:01:07 -0800 | [diff] [blame] | 403 | bool RenderThread::removeFrameCallback(IFrameCallback* callback) { |
| 404 | size_t erased; |
| 405 | erased = mFrameCallbacks.erase(callback); |
| 406 | erased |= mPendingRegistrationFrameCallbacks.erase(callback); |
| 407 | return erased; |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 408 | } |
| 409 | |
| 410 | void RenderThread::pushBackFrameCallback(IFrameCallback* callback) { |
| 411 | if (mFrameCallbacks.erase(callback)) { |
| 412 | mPendingRegistrationFrameCallbacks.insert(callback); |
| 413 | } |
John Reck | e45b1fd | 2014-04-15 09:50:16 -0700 | [diff] [blame] | 414 | } |
| 415 | |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 416 | RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) { |
| 417 | AutoMutex _lock(mLock); |
| 418 | RenderTask* next = mQueue.peek(); |
| 419 | if (!next) { |
| 420 | mNextWakeup = LLONG_MAX; |
| 421 | } else { |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 422 | mNextWakeup = next->mRunAt; |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 423 | // Most tasks won't be delayed, so avoid unnecessary systemTime() calls |
| 424 | if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) { |
| 425 | next = mQueue.next(); |
John Reck | a5dda64 | 2014-05-22 15:43:54 -0700 | [diff] [blame] | 426 | } else { |
Chris Craik | d41c4d8 | 2015-01-05 15:51:13 -0800 | [diff] [blame] | 427 | next = nullptr; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 428 | } |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 429 | } |
John Reck | 4f02bf4 | 2014-01-03 18:09:17 -0800 | [diff] [blame] | 430 | if (nextWakeup) { |
| 431 | *nextWakeup = mNextWakeup; |
| 432 | } |
| 433 | return next; |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 434 | } |
| 435 | |
John Reck | cec24ae | 2013-11-05 13:27:50 -0800 | [diff] [blame] | 436 | } /* namespace renderthread */ |
| 437 | } /* namespace uirenderer */ |
| 438 | } /* namespace android */ |