blob: 4f0b3bbbcfb641984dc88ab73bd0303d0709cbf7 [file] [log] [blame]
Dan Stozaec460082018-12-17 15:35:09 -08001/*
2 * Copyright 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define ATRACE_TAG ATRACE_TAG_GRAPHICS
19#undef LOG_TAG
20#define LOG_TAG "RegionSamplingThread"
21
22#include "RegionSamplingThread.h"
23
Kevin DuBois413287f2019-02-25 08:46:47 -080024#include <cutils/properties.h>
Dan Stozaec460082018-12-17 15:35:09 -080025#include <gui/IRegionSamplingListener.h>
26#include <utils/Trace.h>
Kevin DuBois413287f2019-02-25 08:46:47 -080027#include <string>
Dan Stozaec460082018-12-17 15:35:09 -080028
29#include "DisplayDevice.h"
30#include "Layer.h"
31#include "SurfaceFlinger.h"
32
33namespace android {
Kevin DuBois413287f2019-02-25 08:46:47 -080034using namespace std::chrono_literals;
Dan Stozaec460082018-12-17 15:35:09 -080035
36template <typename T>
37struct SpHash {
38 size_t operator()(const sp<T>& p) const { return std::hash<T*>()(p.get()); }
39};
40
Kevin DuBois413287f2019-02-25 08:46:47 -080041constexpr auto lumaSamplingStepTag = "LumaSamplingStep";
42enum class samplingStep {
43 noWorkNeeded,
44 idleTimerWaiting,
45 waitForZeroPhase,
46 waitForSamplePhase,
47 sample
48};
49
50constexpr auto defaultRegionSamplingOffset = -3ms;
51constexpr auto defaultRegionSamplingPeriod = 100ms;
52constexpr auto defaultRegionSamplingTimerTimeout = 100ms;
53// TODO: (b/127403193) duration to string conversion could probably be constexpr
54template <typename Rep, typename Per>
55inline std::string toNsString(std::chrono::duration<Rep, Per> t) {
56 return std::to_string(std::chrono::duration_cast<std::chrono::nanoseconds>(t).count());
Dan Stozaec460082018-12-17 15:35:09 -080057}
58
Kevin DuBois413287f2019-02-25 08:46:47 -080059RegionSamplingThread::EnvironmentTimingTunables::EnvironmentTimingTunables() {
60 char value[PROPERTY_VALUE_MAX] = {};
61
62 property_get("debug.sf.region_sampling_offset_ns", value,
63 toNsString(defaultRegionSamplingOffset).c_str());
64 int const samplingOffsetNsRaw = atoi(value);
65
66 property_get("debug.sf.region_sampling_period_ns", value,
67 toNsString(defaultRegionSamplingPeriod).c_str());
68 int const samplingPeriodNsRaw = atoi(value);
69
70 property_get("debug.sf.region_sampling_timer_timeout_ns", value,
71 toNsString(defaultRegionSamplingTimerTimeout).c_str());
72 int const samplingTimerTimeoutNsRaw = atoi(value);
73
74 if ((samplingPeriodNsRaw < 0) || (samplingTimerTimeoutNsRaw < 0)) {
75 ALOGW("User-specified sampling tuning options nonsensical. Using defaults");
76 mSamplingOffset = defaultRegionSamplingOffset;
77 mSamplingPeriod = defaultRegionSamplingPeriod;
78 mSamplingTimerTimeout = defaultRegionSamplingTimerTimeout;
79 } else {
80 mSamplingOffset = std::chrono::nanoseconds(samplingOffsetNsRaw);
81 mSamplingPeriod = std::chrono::nanoseconds(samplingPeriodNsRaw);
82 mSamplingTimerTimeout = std::chrono::nanoseconds(samplingTimerTimeoutNsRaw);
83 }
84}
85
86struct SamplingOffsetCallback : DispSync::Callback {
87 SamplingOffsetCallback(RegionSamplingThread& samplingThread, Scheduler& scheduler,
88 std::chrono::nanoseconds targetSamplingOffset)
89 : mRegionSamplingThread(samplingThread),
90 mScheduler(scheduler),
91 mTargetSamplingOffset(targetSamplingOffset) {}
92
93 ~SamplingOffsetCallback() { stopVsyncListener(); }
94
95 SamplingOffsetCallback(const SamplingOffsetCallback&) = delete;
96 SamplingOffsetCallback& operator=(const SamplingOffsetCallback&) = delete;
97
98 void startVsyncListener() {
99 std::lock_guard lock(mMutex);
100 if (mVsyncListening) return;
101
102 mPhaseIntervalSetting = Phase::ZERO;
103 mScheduler.withPrimaryDispSync([this](android::DispSync& sync) {
104 sync.addEventListener("SamplingThreadDispSyncListener", 0, this);
105 });
106 mVsyncListening = true;
107 }
108
109 void stopVsyncListener() {
110 std::lock_guard lock(mMutex);
111 stopVsyncListenerLocked();
112 }
113
114private:
115 void stopVsyncListenerLocked() /*REQUIRES(mMutex)*/ {
116 if (!mVsyncListening) return;
117
118 mScheduler.withPrimaryDispSync(
119 [this](android::DispSync& sync) { sync.removeEventListener(this); });
120 mVsyncListening = false;
121 }
122
123 void onDispSyncEvent(nsecs_t /* when */) final {
124 std::unique_lock<decltype(mMutex)> lock(mMutex);
125
126 if (mPhaseIntervalSetting == Phase::ZERO) {
127 ATRACE_INT(lumaSamplingStepTag, static_cast<int>(samplingStep::waitForSamplePhase));
128 mPhaseIntervalSetting = Phase::SAMPLING;
129 mScheduler.withPrimaryDispSync([this](android::DispSync& sync) {
130 sync.changePhaseOffset(this, mTargetSamplingOffset.count());
131 });
132 return;
133 }
134
135 if (mPhaseIntervalSetting == Phase::SAMPLING) {
136 mPhaseIntervalSetting = Phase::ZERO;
137 mScheduler.withPrimaryDispSync(
138 [this](android::DispSync& sync) { sync.changePhaseOffset(this, 0); });
139 stopVsyncListenerLocked();
140 lock.unlock();
141 mRegionSamplingThread.notifySamplingOffset();
142 return;
143 }
144 }
145
146 RegionSamplingThread& mRegionSamplingThread;
147 Scheduler& mScheduler;
148 const std::chrono::nanoseconds mTargetSamplingOffset;
149 mutable std::mutex mMutex;
150 enum class Phase {
151 ZERO,
152 SAMPLING
153 } mPhaseIntervalSetting /*GUARDED_BY(mMutex) macro doesnt work with unique_lock?*/
154 = Phase::ZERO;
155 bool mVsyncListening /*GUARDED_BY(mMutex)*/ = false;
156};
157
158RegionSamplingThread::RegionSamplingThread(SurfaceFlinger& flinger, Scheduler& scheduler,
159 const TimingTunables& tunables)
160 : mFlinger(flinger),
161 mScheduler(scheduler),
162 mTunables(tunables),
163 mIdleTimer(std::chrono::duration_cast<std::chrono::milliseconds>(
164 mTunables.mSamplingTimerTimeout),
165 [] {}, [this] { checkForStaleLuma(); }),
166 mPhaseCallback(std::make_unique<SamplingOffsetCallback>(*this, mScheduler,
167 tunables.mSamplingOffset)),
168 lastSampleTime(0ns) {
169 {
170 std::lock_guard threadLock(mThreadMutex);
171 mThread = std::thread([this]() { threadMain(); });
172 pthread_setname_np(mThread.native_handle(), "RegionSamplingThread");
173 }
174 mIdleTimer.start();
175}
176
177RegionSamplingThread::RegionSamplingThread(SurfaceFlinger& flinger, Scheduler& scheduler)
178 : RegionSamplingThread(flinger, scheduler,
179 TimingTunables{defaultRegionSamplingOffset,
180 defaultRegionSamplingPeriod,
181 defaultRegionSamplingTimerTimeout}) {}
182
Dan Stozaec460082018-12-17 15:35:09 -0800183RegionSamplingThread::~RegionSamplingThread() {
Kevin DuBois413287f2019-02-25 08:46:47 -0800184 mIdleTimer.stop();
185
Dan Stozaec460082018-12-17 15:35:09 -0800186 {
187 std::lock_guard lock(mMutex);
188 mRunning = false;
189 mCondition.notify_one();
190 }
191
192 std::lock_guard threadLock(mThreadMutex);
193 if (mThread.joinable()) {
194 mThread.join();
195 }
196}
197
198void RegionSamplingThread::addListener(const Rect& samplingArea, const sp<IBinder>& stopLayerHandle,
199 const sp<IRegionSamplingListener>& listener) {
200 wp<Layer> stopLayer = stopLayerHandle != nullptr
201 ? static_cast<Layer::Handle*>(stopLayerHandle.get())->owner
202 : nullptr;
203
204 sp<IBinder> asBinder = IInterface::asBinder(listener);
205 asBinder->linkToDeath(this);
206 std::lock_guard lock(mMutex);
207 mDescriptors.emplace(wp<IBinder>(asBinder), Descriptor{samplingArea, stopLayer, listener});
208}
209
210void RegionSamplingThread::removeListener(const sp<IRegionSamplingListener>& listener) {
211 std::lock_guard lock(mMutex);
212 mDescriptors.erase(wp<IBinder>(IInterface::asBinder(listener)));
213}
214
Kevin DuBois413287f2019-02-25 08:46:47 -0800215void RegionSamplingThread::checkForStaleLuma() {
Dan Stozaec460082018-12-17 15:35:09 -0800216 std::lock_guard lock(mMutex);
Kevin DuBois413287f2019-02-25 08:46:47 -0800217
218 if (mDiscardedFrames) {
219 ATRACE_INT(lumaSamplingStepTag, static_cast<int>(samplingStep::waitForZeroPhase));
220 mDiscardedFrames = false;
221 mPhaseCallback->startVsyncListener();
222 }
223}
224
225void RegionSamplingThread::notifyNewContent() {
226 doSample();
227}
228
229void RegionSamplingThread::notifySamplingOffset() {
230 doSample();
231}
232
233void RegionSamplingThread::doSample() {
234 std::lock_guard lock(mMutex);
235 auto now = std::chrono::nanoseconds(systemTime(SYSTEM_TIME_MONOTONIC));
236 if (lastSampleTime + mTunables.mSamplingPeriod > now) {
237 ATRACE_INT(lumaSamplingStepTag, static_cast<int>(samplingStep::idleTimerWaiting));
238 mDiscardedFrames = true;
239 return;
240 }
241
242 ATRACE_INT(lumaSamplingStepTag, static_cast<int>(samplingStep::sample));
243
244 mDiscardedFrames = false;
245 lastSampleTime = now;
246
247 mIdleTimer.reset();
248 mPhaseCallback->stopVsyncListener();
249
Dan Stozaec460082018-12-17 15:35:09 -0800250 mSampleRequested = true;
251 mCondition.notify_one();
252}
253
254void RegionSamplingThread::binderDied(const wp<IBinder>& who) {
255 std::lock_guard lock(mMutex);
256 mDescriptors.erase(who);
257}
258
259namespace {
260// Using Rec. 709 primaries
261float getLuma(float r, float g, float b) {
262 constexpr auto rec709_red_primary = 0.2126f;
263 constexpr auto rec709_green_primary = 0.7152f;
264 constexpr auto rec709_blue_primary = 0.0722f;
265 return rec709_red_primary * r + rec709_green_primary * g + rec709_blue_primary * b;
266}
267
268float sampleArea(const uint32_t* data, int32_t stride, const Rect& area) {
269 std::array<int32_t, 256> brightnessBuckets = {};
270 const int32_t majoritySampleNum = area.getWidth() * area.getHeight() / 2;
271
272 for (int32_t row = area.top; row < area.bottom; ++row) {
273 const uint32_t* rowBase = data + row * stride;
274 for (int32_t column = area.left; column < area.right; ++column) {
275 uint32_t pixel = rowBase[column];
276 const float r = (pixel & 0xFF) / 255.0f;
277 const float g = ((pixel >> 8) & 0xFF) / 255.0f;
278 const float b = ((pixel >> 16) & 0xFF) / 255.0f;
279 const uint8_t luma = std::round(getLuma(r, g, b) * 255.0f);
280 ++brightnessBuckets[luma];
281 if (brightnessBuckets[luma] > majoritySampleNum) return luma / 255.0f;
282 }
283 }
284
285 int32_t accumulated = 0;
286 size_t bucket = 0;
287 while (bucket++ < brightnessBuckets.size()) {
288 accumulated += brightnessBuckets[bucket];
289 if (accumulated > majoritySampleNum) break;
290 }
291
292 return bucket / 255.0f;
293}
294} // anonymous namespace
295
Kevin DuBois7cbcc372019-02-25 14:53:28 -0800296std::vector<float> RegionSamplingThread::sampleBuffer(
297 const sp<GraphicBuffer>& buffer, const Point& leftTop,
298 const std::vector<RegionSamplingThread::Descriptor>& descriptors) {
Dan Stozaec460082018-12-17 15:35:09 -0800299 void* data_raw = nullptr;
300 buffer->lock(GRALLOC_USAGE_SW_READ_OFTEN, &data_raw);
301 std::shared_ptr<uint32_t> data(reinterpret_cast<uint32_t*>(data_raw),
302 [&buffer](auto) { buffer->unlock(); });
303 if (!data) return {};
304
305 const int32_t stride = buffer->getStride();
306 std::vector<float> lumas(descriptors.size());
307 std::transform(descriptors.begin(), descriptors.end(), lumas.begin(),
308 [&](auto const& descriptor) {
309 return sampleArea(data.get(), stride, descriptor.area - leftTop);
310 });
311 return lumas;
312}
313
314void RegionSamplingThread::captureSample() {
315 ATRACE_CALL();
316
317 if (mDescriptors.empty()) {
318 return;
319 }
320
321 std::vector<RegionSamplingThread::Descriptor> descriptors;
322 Region sampleRegion;
323 for (const auto& [listener, descriptor] : mDescriptors) {
324 sampleRegion.orSelf(descriptor.area);
325 descriptors.emplace_back(descriptor);
326 }
327
Kevin DuBois7cbcc372019-02-25 14:53:28 -0800328 const Rect sampledArea = sampleRegion.bounds();
Dan Stozaec460082018-12-17 15:35:09 -0800329
330 sp<const DisplayDevice> device = mFlinger.getDefaultDisplayDevice();
331 DisplayRenderArea renderArea(device, sampledArea, sampledArea.getWidth(),
332 sampledArea.getHeight(), ui::Dataspace::V0_SRGB,
333 ui::Transform::ROT_0);
334
335 std::unordered_set<sp<IRegionSamplingListener>, SpHash<IRegionSamplingListener>> listeners;
336
337 auto traverseLayers = [&](const LayerVector::Visitor& visitor) {
338 bool stopLayerFound = false;
339 auto filterVisitor = [&](Layer* layer) {
340 // We don't want to capture any layers beyond the stop layer
341 if (stopLayerFound) return;
342
343 // Likewise if we just found a stop layer, set the flag and abort
344 for (const auto& [area, stopLayer, listener] : descriptors) {
345 if (layer == stopLayer.promote().get()) {
346 stopLayerFound = true;
347 return;
348 }
349 }
350
351 // Compute the layer's position on the screen
Kevin DuBois7cbcc372019-02-25 14:53:28 -0800352 const Rect bounds = Rect(layer->getBounds());
353 const ui::Transform transform = layer->getTransform();
Dan Stozaec460082018-12-17 15:35:09 -0800354 constexpr bool roundOutwards = true;
355 Rect transformed = transform.transform(bounds, roundOutwards);
356
357 // If this layer doesn't intersect with the larger sampledArea, skip capturing it
358 Rect ignore;
359 if (!transformed.intersect(sampledArea, &ignore)) return;
360
361 // If the layer doesn't intersect a sampling area, skip capturing it
362 bool intersectsAnyArea = false;
363 for (const auto& [area, stopLayer, listener] : descriptors) {
364 if (transformed.intersect(area, &ignore)) {
365 intersectsAnyArea = true;
366 listeners.insert(listener);
367 }
368 }
369 if (!intersectsAnyArea) return;
370
371 ALOGV("Traversing [%s] [%d, %d, %d, %d]", layer->getName().string(), bounds.left,
372 bounds.top, bounds.right, bounds.bottom);
373 visitor(layer);
374 };
375 mFlinger.traverseLayersInDisplay(device, filterVisitor);
376 };
377
378 const uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_HW_RENDER;
379 sp<GraphicBuffer> buffer =
380 new GraphicBuffer(sampledArea.getWidth(), sampledArea.getHeight(),
381 PIXEL_FORMAT_RGBA_8888, 1, usage, "RegionSamplingThread");
382
383 // When calling into SF, we post a message into the SF message queue (so the
384 // screen capture runs on the main thread). This message blocks until the
385 // screenshot is actually captured, but before the capture occurs, the main
386 // thread may perform a normal refresh cycle. At the end of this cycle, it
387 // can request another sample (because layers changed), which triggers a
388 // call into sampleNow. When sampleNow attempts to grab the mutex, we can
389 // deadlock.
390 //
391 // To avoid this, we drop the mutex while we call into SF.
392 mMutex.unlock();
Kevin DuBois7cbcc372019-02-25 14:53:28 -0800393 mFlinger.captureScreenCommon(renderArea, traverseLayers, buffer, false);
Dan Stozaec460082018-12-17 15:35:09 -0800394 mMutex.lock();
395
396 std::vector<Descriptor> activeDescriptors;
397 for (const auto& descriptor : descriptors) {
398 if (listeners.count(descriptor.listener) != 0) {
399 activeDescriptors.emplace_back(descriptor);
400 }
401 }
402
403 ALOGV("Sampling %zu descriptors", activeDescriptors.size());
404 std::vector<float> lumas = sampleBuffer(buffer, sampledArea.leftTop(), activeDescriptors);
405
406 if (lumas.size() != activeDescriptors.size()) {
Kevin DuBois7cbcc372019-02-25 14:53:28 -0800407 ALOGW("collected %zu median luma values for %zu descriptors", lumas.size(),
408 activeDescriptors.size());
Dan Stozaec460082018-12-17 15:35:09 -0800409 return;
410 }
411
412 for (size_t d = 0; d < activeDescriptors.size(); ++d) {
413 activeDescriptors[d].listener->onSampleCollected(lumas[d]);
414 }
Kevin DuBois413287f2019-02-25 08:46:47 -0800415 ATRACE_INT(lumaSamplingStepTag, static_cast<int>(samplingStep::noWorkNeeded));
Dan Stozaec460082018-12-17 15:35:09 -0800416}
417
418void RegionSamplingThread::threadMain() {
419 std::lock_guard lock(mMutex);
420 while (mRunning) {
421 if (mSampleRequested) {
422 mSampleRequested = false;
423 captureSample();
424 }
425 mCondition.wait(mMutex,
426 [this]() REQUIRES(mMutex) { return mSampleRequested || !mRunning; });
427 }
428}
429
430} // namespace android