Siarhei Vishniakou | a04181f | 2021-03-26 05:56:49 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2021 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "LatencyAggregator" |
| 18 | #include "LatencyAggregator.h" |
| 19 | |
| 20 | #include <inttypes.h> |
| 21 | |
| 22 | #include <android-base/stringprintf.h> |
| 23 | #include <input/Input.h> |
| 24 | #include <log/log.h> |
| 25 | #include <server_configurable_flags/get_flags.h> |
| 26 | |
| 27 | using android::base::StringPrintf; |
| 28 | using std::chrono_literals::operator""ms; |
| 29 | |
| 30 | // Category (=namespace) name for the input settings that are applied at boot time |
| 31 | static const char* INPUT_NATIVE_BOOT = "input_native_boot"; |
| 32 | // Feature flag name for the threshold of end-to-end touch latency that would trigger |
| 33 | // SlowEventReported atom to be pushed |
| 34 | static const char* SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS = |
| 35 | "slow_event_min_reporting_latency_millis"; |
| 36 | // Feature flag name for the minimum delay before reporting a slow event after having just reported |
| 37 | // a slow event. This helps limit the amount of data sent to the server |
| 38 | static const char* SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS = |
| 39 | "slow_event_min_reporting_interval_millis"; |
| 40 | |
| 41 | // If an event has end-to-end latency > 200 ms, it will get reported as a slow event. |
| 42 | std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY = 200ms; |
| 43 | // If we receive two slow events less than 1 min apart, we will only report 1 of them. |
| 44 | std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL = 60000ms; |
| 45 | |
| 46 | static std::chrono::milliseconds getSlowEventMinReportingLatency() { |
| 47 | std::string millis = server_configurable_flags:: |
| 48 | GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS, |
| 49 | std::to_string( |
| 50 | DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY.count())); |
| 51 | return std::chrono::milliseconds(std::stoi(millis)); |
| 52 | } |
| 53 | |
| 54 | static std::chrono::milliseconds getSlowEventMinReportingInterval() { |
| 55 | std::string millis = server_configurable_flags:: |
| 56 | GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS, |
| 57 | std::to_string( |
| 58 | DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL.count())); |
| 59 | return std::chrono::milliseconds(std::stoi(millis)); |
| 60 | } |
| 61 | |
| 62 | namespace android::inputdispatcher { |
| 63 | |
| 64 | void Sketch::addValue(nsecs_t value) { |
| 65 | // TODO(b/167947340): replace with real sketch |
| 66 | } |
| 67 | |
| 68 | android::util::BytesField Sketch::serialize() { |
| 69 | return android::util::BytesField("TODO(b/167947340): use real sketch data", 4 /*length*/); |
| 70 | } |
| 71 | |
| 72 | void Sketch::reset() { |
| 73 | // TODO(b/167947340): reset the sketch |
| 74 | } |
| 75 | |
| 76 | LatencyAggregator::LatencyAggregator() { |
| 77 | AStatsManager_setPullAtomCallback(android::util::INPUT_EVENT_LATENCY_SKETCH, nullptr, |
| 78 | LatencyAggregator::pullAtomCallback, this); |
| 79 | } |
| 80 | |
| 81 | LatencyAggregator::~LatencyAggregator() { |
| 82 | AStatsManager_clearPullAtomCallback(android::util::INPUT_EVENT_LATENCY_SKETCH); |
| 83 | } |
| 84 | |
| 85 | AStatsManager_PullAtomCallbackReturn LatencyAggregator::pullAtomCallback(int32_t atomTag, |
| 86 | AStatsEventList* data, |
| 87 | void* cookie) { |
| 88 | LatencyAggregator* pAggregator = reinterpret_cast<LatencyAggregator*>(cookie); |
| 89 | if (pAggregator == nullptr) { |
| 90 | LOG_ALWAYS_FATAL("pAggregator is null!"); |
| 91 | } |
| 92 | return pAggregator->pullData(data); |
| 93 | } |
| 94 | |
| 95 | void LatencyAggregator::processTimeline(const InputEventTimeline& timeline) { |
| 96 | processStatistics(timeline); |
| 97 | processSlowEvent(timeline); |
| 98 | } |
| 99 | |
| 100 | void LatencyAggregator::processStatistics(const InputEventTimeline& timeline) { |
| 101 | std::array<Sketch, SketchIndex::SIZE>& sketches = |
| 102 | timeline.isDown ? mDownSketches : mMoveSketches; |
| 103 | |
| 104 | // Process common ones first |
| 105 | const nsecs_t eventToRead = timeline.readTime - timeline.eventTime; |
| 106 | |
| 107 | sketches[SketchIndex::EVENT_TO_READ].addValue(eventToRead); |
| 108 | |
| 109 | // Now process per-connection ones |
| 110 | for (const auto& [connectionToken, connectionTimeline] : timeline.connectionTimelines) { |
| 111 | if (!connectionTimeline.isComplete()) { |
| 112 | continue; |
| 113 | } |
| 114 | const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime; |
| 115 | const nsecs_t deliverToConsume = |
| 116 | connectionTimeline.consumeTime - connectionTimeline.deliveryTime; |
| 117 | const nsecs_t consumeToFinish = |
| 118 | connectionTimeline.finishTime - connectionTimeline.consumeTime; |
| 119 | const nsecs_t gpuCompletedTime = |
| 120 | connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME]; |
| 121 | const nsecs_t presentTime = |
| 122 | connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME]; |
| 123 | const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime; |
| 124 | const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime; |
| 125 | const nsecs_t endToEnd = presentTime - timeline.eventTime; |
| 126 | |
| 127 | sketches[SketchIndex::READ_TO_DELIVER].addValue(readToDeliver); |
| 128 | sketches[SketchIndex::DELIVER_TO_CONSUME].addValue(deliverToConsume); |
| 129 | sketches[SketchIndex::CONSUME_TO_FINISH].addValue(consumeToFinish); |
| 130 | sketches[SketchIndex::CONSUME_TO_GPU_COMPLETE].addValue(consumeToGpuComplete); |
| 131 | sketches[SketchIndex::GPU_COMPLETE_TO_PRESENT].addValue(gpuCompleteToPresent); |
| 132 | sketches[SketchIndex::END_TO_END].addValue(endToEnd); |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | AStatsManager_PullAtomCallbackReturn LatencyAggregator::pullData(AStatsEventList* data) { |
| 137 | android::util::addAStatsEvent(data, android::util::INPUT_EVENT_LATENCY_SKETCH, |
| 138 | // DOWN sketches |
| 139 | mDownSketches[SketchIndex::EVENT_TO_READ].serialize(), |
| 140 | mDownSketches[SketchIndex::READ_TO_DELIVER].serialize(), |
| 141 | mDownSketches[SketchIndex::DELIVER_TO_CONSUME].serialize(), |
| 142 | mDownSketches[SketchIndex::CONSUME_TO_FINISH].serialize(), |
| 143 | mDownSketches[SketchIndex::CONSUME_TO_GPU_COMPLETE].serialize(), |
| 144 | mDownSketches[SketchIndex::GPU_COMPLETE_TO_PRESENT].serialize(), |
| 145 | mDownSketches[SketchIndex::END_TO_END].serialize(), |
| 146 | // MOVE sketches |
| 147 | mMoveSketches[SketchIndex::EVENT_TO_READ].serialize(), |
| 148 | mMoveSketches[SketchIndex::READ_TO_DELIVER].serialize(), |
| 149 | mMoveSketches[SketchIndex::DELIVER_TO_CONSUME].serialize(), |
| 150 | mMoveSketches[SketchIndex::CONSUME_TO_FINISH].serialize(), |
| 151 | mMoveSketches[SketchIndex::CONSUME_TO_GPU_COMPLETE].serialize(), |
| 152 | mMoveSketches[SketchIndex::GPU_COMPLETE_TO_PRESENT].serialize(), |
| 153 | mMoveSketches[SketchIndex::END_TO_END].serialize()); |
| 154 | |
| 155 | for (size_t i = 0; i < SketchIndex::SIZE; i++) { |
| 156 | mDownSketches[i].reset(); |
| 157 | mMoveSketches[i].reset(); |
| 158 | } |
| 159 | return AStatsManager_PULL_SUCCESS; |
| 160 | } |
| 161 | |
| 162 | void LatencyAggregator::processSlowEvent(const InputEventTimeline& timeline) { |
| 163 | static const std::chrono::duration sSlowEventThreshold = getSlowEventMinReportingLatency(); |
| 164 | static const std::chrono::duration sSlowEventReportingInterval = |
| 165 | getSlowEventMinReportingInterval(); |
| 166 | for (const auto& [token, connectionTimeline] : timeline.connectionTimelines) { |
| 167 | if (!connectionTimeline.isComplete()) { |
| 168 | continue; |
| 169 | } |
| 170 | mNumEventsSinceLastSlowEventReport++; |
| 171 | const nsecs_t presentTime = |
| 172 | connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME]; |
| 173 | const std::chrono::nanoseconds endToEndLatency = |
| 174 | std::chrono::nanoseconds(presentTime - timeline.eventTime); |
| 175 | if (endToEndLatency < sSlowEventThreshold) { |
| 176 | continue; |
| 177 | } |
| 178 | // This is a slow event. Before we report it, check if we are reporting too often |
| 179 | const std::chrono::duration elapsedSinceLastReport = |
| 180 | std::chrono::nanoseconds(timeline.eventTime - mLastSlowEventTime); |
| 181 | if (elapsedSinceLastReport < sSlowEventReportingInterval) { |
| 182 | mNumSkippedSlowEvents++; |
| 183 | continue; |
| 184 | } |
| 185 | |
| 186 | const nsecs_t eventToRead = timeline.readTime - timeline.eventTime; |
| 187 | const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime; |
| 188 | const nsecs_t deliverToConsume = |
| 189 | connectionTimeline.consumeTime - connectionTimeline.deliveryTime; |
| 190 | const nsecs_t consumeToFinish = |
| 191 | connectionTimeline.finishTime - connectionTimeline.consumeTime; |
| 192 | const nsecs_t gpuCompletedTime = |
| 193 | connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME]; |
| 194 | const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime; |
| 195 | const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime; |
| 196 | |
| 197 | android::util::stats_write(android::util::SLOW_INPUT_EVENT_REPORTED, timeline.isDown, |
| 198 | static_cast<int32_t>(ns2us(eventToRead)), |
| 199 | static_cast<int32_t>(ns2us(readToDeliver)), |
| 200 | static_cast<int32_t>(ns2us(deliverToConsume)), |
| 201 | static_cast<int32_t>(ns2us(consumeToFinish)), |
| 202 | static_cast<int32_t>(ns2us(consumeToGpuComplete)), |
| 203 | static_cast<int32_t>(ns2us(gpuCompleteToPresent)), |
| 204 | static_cast<int32_t>(ns2us(endToEndLatency.count())), |
| 205 | static_cast<int32_t>(mNumEventsSinceLastSlowEventReport), |
| 206 | static_cast<int32_t>(mNumSkippedSlowEvents)); |
| 207 | mNumEventsSinceLastSlowEventReport = 0; |
| 208 | mNumSkippedSlowEvents = 0; |
| 209 | mLastSlowEventTime = timeline.readTime; |
| 210 | } |
| 211 | } |
| 212 | |
| 213 | std::string LatencyAggregator::dump(const char* prefix) { |
| 214 | return StringPrintf("%sLatencyAggregator:", prefix) + |
| 215 | StringPrintf("\n%s mLastSlowEventTime=%" PRId64, prefix, mLastSlowEventTime) + |
| 216 | StringPrintf("\n%s mNumEventsSinceLastSlowEventReport = %zu", prefix, |
| 217 | mNumEventsSinceLastSlowEventReport) + |
| 218 | StringPrintf("\n%s mNumSkippedSlowEvents = %zu", prefix, mNumSkippedSlowEvents); |
| 219 | } |
| 220 | |
| 221 | } // namespace android::inputdispatcher |