jioana | 24878b5 | 2024-09-10 10:13:27 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright 2024 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "LatencyAggregatorWithHistograms" |
| 18 | #include "../InputDeviceMetricsSource.h" |
| 19 | #include "InputDispatcher.h" |
| 20 | |
| 21 | #include <inttypes.h> |
| 22 | #include <log/log_event_list.h> |
| 23 | #include <statslog.h> |
| 24 | |
| 25 | #include <android-base/logging.h> |
| 26 | #include <android-base/stringprintf.h> |
| 27 | #include <input/Input.h> |
| 28 | #include <log/log.h> |
| 29 | #include <server_configurable_flags/get_flags.h> |
| 30 | |
| 31 | using android::base::StringPrintf; |
| 32 | using std::chrono_literals::operator""ms; |
| 33 | |
| 34 | namespace { |
| 35 | |
| 36 | // Convert the provided nanoseconds into hundreds of microseconds. |
| 37 | // Use hundreds of microseconds (as opposed to microseconds) to preserve space. |
| 38 | static inline int64_t ns2hus(nsecs_t nanos) { |
| 39 | return ns2us(nanos) / 100; |
| 40 | } |
| 41 | |
| 42 | // Category (=namespace) name for the input settings that are applied at boot time |
| 43 | static const char* INPUT_NATIVE_BOOT = "input_native_boot"; |
| 44 | // Feature flag name for the threshold of end-to-end touch latency that would trigger |
| 45 | // SlowEventReported atom to be pushed |
| 46 | static const char* SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS = |
| 47 | "slow_event_min_reporting_latency_millis"; |
| 48 | // Feature flag name for the minimum delay before reporting a slow event after having just reported |
| 49 | // a slow event. This helps limit the amount of data sent to the server |
| 50 | static const char* SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS = |
| 51 | "slow_event_min_reporting_interval_millis"; |
| 52 | |
| 53 | // If an event has end-to-end latency > 200 ms, it will get reported as a slow event. |
| 54 | std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY = 200ms; |
| 55 | // If we receive two slow events less than 1 min apart, we will only report 1 of them. |
| 56 | std::chrono::milliseconds DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL = 60000ms; |
| 57 | |
| 58 | static std::chrono::milliseconds getSlowEventMinReportingLatency() { |
| 59 | std::string millis = server_configurable_flags:: |
| 60 | GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_LATENCY_MILLIS, |
| 61 | std::to_string( |
| 62 | DEFAULT_SLOW_EVENT_MIN_REPORTING_LATENCY.count())); |
| 63 | return std::chrono::milliseconds(std::stoi(millis)); |
| 64 | } |
| 65 | |
| 66 | static std::chrono::milliseconds getSlowEventMinReportingInterval() { |
| 67 | std::string millis = server_configurable_flags:: |
| 68 | GetServerConfigurableFlag(INPUT_NATIVE_BOOT, SLOW_EVENT_MIN_REPORTING_INTERVAL_MILLIS, |
| 69 | std::to_string( |
| 70 | DEFAULT_SLOW_EVENT_MIN_REPORTING_INTERVAL.count())); |
| 71 | return std::chrono::milliseconds(std::stoi(millis)); |
| 72 | } |
| 73 | |
| 74 | } // namespace |
| 75 | |
| 76 | namespace android::inputdispatcher { |
| 77 | |
| 78 | int32_t LatencyStageIndexToAtomEnum(LatencyStageIndex latencyStageIndex) { |
| 79 | switch (latencyStageIndex) { |
| 80 | case LatencyStageIndex::EVENT_TO_READ: |
| 81 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__EVENT_TO_READ; |
| 82 | case LatencyStageIndex::READ_TO_DELIVER: |
| 83 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__READ_TO_DELIVER; |
| 84 | case LatencyStageIndex::DELIVER_TO_CONSUME: |
| 85 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__DELIVER_TO_CONSUME; |
| 86 | case LatencyStageIndex::CONSUME_TO_FINISH: |
| 87 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__CONSUME_TO_FINISH; |
| 88 | case LatencyStageIndex::CONSUME_TO_GPU_COMPLETE: |
| 89 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__CONSUME_TO_GPU_COMPLETE; |
| 90 | case LatencyStageIndex::GPU_COMPLETE_TO_PRESENT: |
| 91 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__GPU_COMPLETE_TO_PRESENT; |
| 92 | case LatencyStageIndex::END_TO_END: |
| 93 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__END_TO_END; |
| 94 | default: |
| 95 | return util::INPUT_EVENT_LATENCY_REPORTED__LATENCY_STAGE__UNKNOWN_LATENCY_STAGE; |
| 96 | } |
| 97 | } |
| 98 | |
| 99 | int32_t InputEventTypeEnumToAtomEnum(InputEventActionType inputEventActionType) { |
| 100 | switch (inputEventActionType) { |
| 101 | case InputEventActionType::UNKNOWN_INPUT_EVENT: |
| 102 | return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__UNKNOWN_INPUT_EVENT; |
| 103 | case InputEventActionType::MOTION_ACTION_DOWN: |
| 104 | return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_DOWN; |
| 105 | case InputEventActionType::MOTION_ACTION_MOVE: |
| 106 | return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_MOVE; |
| 107 | case InputEventActionType::MOTION_ACTION_UP: |
| 108 | return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_UP; |
| 109 | case InputEventActionType::MOTION_ACTION_HOVER_MOVE: |
| 110 | return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_HOVER_MOVE; |
| 111 | case InputEventActionType::MOTION_ACTION_SCROLL: |
| 112 | return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__MOTION_ACTION_SCROLL; |
| 113 | case InputEventActionType::KEY: |
| 114 | return util::INPUT_EVENT_LATENCY_REPORTED__INPUT_EVENT_TYPE__KEY; |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | void LatencyAggregatorWithHistograms::processTimeline(const InputEventTimeline& timeline) { |
| 119 | processStatistics(timeline); |
| 120 | processSlowEvent(timeline); |
| 121 | } |
| 122 | |
| 123 | void LatencyAggregatorWithHistograms::addSampleToHistogram( |
| 124 | const InputEventLatencyIdentifier& identifier, LatencyStageIndex latencyStageIndex, |
| 125 | nsecs_t latency) { |
| 126 | // Only record positive values for the statistics |
| 127 | if (latency > 0) { |
| 128 | auto it = mHistograms.find(identifier); |
| 129 | if (it != mHistograms.end()) { |
| 130 | it->second[static_cast<size_t>(latencyStageIndex)].addSample(ns2hus(latency)); |
| 131 | } |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | void LatencyAggregatorWithHistograms::processStatistics(const InputEventTimeline& timeline) { |
| 136 | // Only gather data for Down, Move and Up motion events and Key events |
| 137 | if (!(timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_DOWN || |
| 138 | timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_MOVE || |
| 139 | timeline.inputEventActionType == InputEventActionType::MOTION_ACTION_UP || |
| 140 | timeline.inputEventActionType == InputEventActionType::KEY)) |
| 141 | return; |
| 142 | |
| 143 | // Don't collect data for unidentified devices. This situation can occur for the first few input |
| 144 | // events produced when an input device is first connected |
| 145 | if (timeline.vendorId == 0xFFFF && timeline.productId == 0xFFFF) return; |
| 146 | |
| 147 | InputEventLatencyIdentifier identifier = {timeline.vendorId, timeline.productId, |
| 148 | timeline.sources, timeline.inputEventActionType}; |
| 149 | // Check if there's a value in mHistograms map associated to identifier. |
| 150 | // If not, add an array with 7 empty histograms as an entry |
| 151 | if (mHistograms.count(identifier) == 0) { |
| 152 | if (static_cast<int32_t>(timeline.inputEventActionType) - 1 < 0) { |
| 153 | LOG(FATAL) << "Action index is smaller than 0. Action type: " |
| 154 | << ftl::enum_string(timeline.inputEventActionType); |
| 155 | return; |
| 156 | } |
| 157 | size_t actionIndex = |
| 158 | static_cast<size_t>(static_cast<int32_t>(timeline.inputEventActionType) - 1); |
| 159 | if (actionIndex >= NUM_INPUT_EVENT_TYPES) { |
| 160 | LOG(FATAL) << "Action index greater than the number of input event types. Action Type: " |
| 161 | << ftl::enum_string(timeline.inputEventActionType) |
| 162 | << "; Action Type Index: " << actionIndex; |
| 163 | return; |
| 164 | } |
| 165 | |
| 166 | std::array<Histogram, 7> histograms = |
| 167 | {Histogram(allBinSizes[binSizesMappings[0][actionIndex]]), |
| 168 | Histogram(allBinSizes[binSizesMappings[1][actionIndex]]), |
| 169 | Histogram(allBinSizes[binSizesMappings[2][actionIndex]]), |
| 170 | Histogram(allBinSizes[binSizesMappings[3][actionIndex]]), |
| 171 | Histogram(allBinSizes[binSizesMappings[4][actionIndex]]), |
| 172 | Histogram(allBinSizes[binSizesMappings[5][actionIndex]]), |
| 173 | Histogram(allBinSizes[binSizesMappings[6][actionIndex]])}; |
| 174 | mHistograms.insert({identifier, histograms}); |
| 175 | } |
| 176 | |
| 177 | // Process common ones first |
| 178 | const nsecs_t eventToRead = timeline.readTime - timeline.eventTime; |
| 179 | addSampleToHistogram(identifier, LatencyStageIndex::EVENT_TO_READ, eventToRead); |
| 180 | |
| 181 | // Now process per-connection ones |
| 182 | for (const auto& [connectionToken, connectionTimeline] : timeline.connectionTimelines) { |
| 183 | if (!connectionTimeline.isComplete()) { |
| 184 | continue; |
| 185 | } |
| 186 | const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime; |
| 187 | const nsecs_t deliverToConsume = |
| 188 | connectionTimeline.consumeTime - connectionTimeline.deliveryTime; |
| 189 | const nsecs_t consumeToFinish = |
| 190 | connectionTimeline.finishTime - connectionTimeline.consumeTime; |
| 191 | const nsecs_t gpuCompletedTime = |
| 192 | connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME]; |
| 193 | const nsecs_t presentTime = |
| 194 | connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME]; |
| 195 | const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime; |
| 196 | const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime; |
| 197 | const nsecs_t endToEnd = presentTime - timeline.eventTime; |
| 198 | |
| 199 | addSampleToHistogram(identifier, LatencyStageIndex::READ_TO_DELIVER, readToDeliver); |
| 200 | addSampleToHistogram(identifier, LatencyStageIndex::DELIVER_TO_CONSUME, deliverToConsume); |
| 201 | addSampleToHistogram(identifier, LatencyStageIndex::CONSUME_TO_FINISH, consumeToFinish); |
| 202 | addSampleToHistogram(identifier, LatencyStageIndex::CONSUME_TO_GPU_COMPLETE, |
| 203 | consumeToGpuComplete); |
| 204 | addSampleToHistogram(identifier, LatencyStageIndex::GPU_COMPLETE_TO_PRESENT, |
| 205 | gpuCompleteToPresent); |
| 206 | addSampleToHistogram(identifier, LatencyStageIndex::END_TO_END, endToEnd); |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | void LatencyAggregatorWithHistograms::pushLatencyStatistics() { |
| 211 | for (auto& [id, histograms] : mHistograms) { |
| 212 | auto [vendorId, productId, sources, action] = id; |
| 213 | for (size_t latencyStageIndex = static_cast<size_t>(LatencyStageIndex::EVENT_TO_READ); |
| 214 | latencyStageIndex < static_cast<size_t>(LatencyStageIndex::SIZE); |
| 215 | ++latencyStageIndex) { |
| 216 | // Convert sources set to vector for atom logging: |
| 217 | std::vector<int32_t> sourcesVector = {}; |
| 218 | for (auto& elem : sources) { |
| 219 | sourcesVector.push_back(static_cast<int32_t>(elem)); |
| 220 | } |
| 221 | |
| 222 | // convert histogram bin counts array to vector for atom logging: |
| 223 | std::array arr = histograms[latencyStageIndex].getBinCounts(); |
| 224 | std::vector<int32_t> binCountsVector(arr.begin(), arr.end()); |
| 225 | |
| 226 | if (static_cast<int32_t>(action) - 1 < 0) { |
| 227 | ALOGW("Action index is smaller than 0. Action type: %s", |
| 228 | ftl::enum_string(action).c_str()); |
| 229 | continue; |
| 230 | } |
| 231 | size_t actionIndex = static_cast<size_t>(static_cast<int32_t>(action) - 1); |
| 232 | if (actionIndex >= NUM_INPUT_EVENT_TYPES) { |
| 233 | ALOGW("Action index greater than the number of input event types. Action Type: %s; " |
| 234 | "Action Type Index: %zu", |
| 235 | ftl::enum_string(action).c_str(), actionIndex); |
| 236 | continue; |
| 237 | } |
| 238 | |
| 239 | stats_write(android::util::INPUT_EVENT_LATENCY_REPORTED, vendorId, productId, |
| 240 | sourcesVector, InputEventTypeEnumToAtomEnum(action), |
| 241 | LatencyStageIndexToAtomEnum( |
| 242 | static_cast<LatencyStageIndex>(latencyStageIndex)), |
| 243 | histogramVersions[latencyStageIndex][actionIndex], binCountsVector); |
| 244 | } |
| 245 | } |
| 246 | mHistograms.clear(); |
| 247 | } |
| 248 | |
| 249 | // TODO (b/270049345): For now, we just copied the code from LatencyAggregator to populate the old |
| 250 | // atom, but eventually we should migrate this to use the new SlowEventReported atom |
| 251 | void LatencyAggregatorWithHistograms::processSlowEvent(const InputEventTimeline& timeline) { |
| 252 | static const std::chrono::duration sSlowEventThreshold = getSlowEventMinReportingLatency(); |
| 253 | static const std::chrono::duration sSlowEventReportingInterval = |
| 254 | getSlowEventMinReportingInterval(); |
| 255 | for (const auto& [token, connectionTimeline] : timeline.connectionTimelines) { |
| 256 | if (!connectionTimeline.isComplete()) { |
| 257 | continue; |
| 258 | } |
| 259 | mNumEventsSinceLastSlowEventReport++; |
| 260 | const nsecs_t presentTime = |
| 261 | connectionTimeline.graphicsTimeline[GraphicsTimeline::PRESENT_TIME]; |
| 262 | const std::chrono::nanoseconds endToEndLatency = |
| 263 | std::chrono::nanoseconds(presentTime - timeline.eventTime); |
| 264 | if (endToEndLatency < sSlowEventThreshold) { |
| 265 | continue; |
| 266 | } |
| 267 | // This is a slow event. Before we report it, check if we are reporting too often |
| 268 | const std::chrono::duration elapsedSinceLastReport = |
| 269 | std::chrono::nanoseconds(timeline.eventTime - mLastSlowEventTime); |
| 270 | if (elapsedSinceLastReport < sSlowEventReportingInterval) { |
| 271 | mNumSkippedSlowEvents++; |
| 272 | continue; |
| 273 | } |
| 274 | |
| 275 | const nsecs_t eventToRead = timeline.readTime - timeline.eventTime; |
| 276 | const nsecs_t readToDeliver = connectionTimeline.deliveryTime - timeline.readTime; |
| 277 | const nsecs_t deliverToConsume = |
| 278 | connectionTimeline.consumeTime - connectionTimeline.deliveryTime; |
| 279 | const nsecs_t consumeToFinish = |
| 280 | connectionTimeline.finishTime - connectionTimeline.consumeTime; |
| 281 | const nsecs_t gpuCompletedTime = |
| 282 | connectionTimeline.graphicsTimeline[GraphicsTimeline::GPU_COMPLETED_TIME]; |
| 283 | const nsecs_t consumeToGpuComplete = gpuCompletedTime - connectionTimeline.consumeTime; |
| 284 | const nsecs_t gpuCompleteToPresent = presentTime - gpuCompletedTime; |
| 285 | |
| 286 | android::util::stats_write(android::util::SLOW_INPUT_EVENT_REPORTED, |
| 287 | timeline.inputEventActionType == |
| 288 | InputEventActionType::MOTION_ACTION_DOWN, |
| 289 | static_cast<int32_t>(ns2us(eventToRead)), |
| 290 | static_cast<int32_t>(ns2us(readToDeliver)), |
| 291 | static_cast<int32_t>(ns2us(deliverToConsume)), |
| 292 | static_cast<int32_t>(ns2us(consumeToFinish)), |
| 293 | static_cast<int32_t>(ns2us(consumeToGpuComplete)), |
| 294 | static_cast<int32_t>(ns2us(gpuCompleteToPresent)), |
| 295 | static_cast<int32_t>(ns2us(endToEndLatency.count())), |
| 296 | static_cast<int32_t>(mNumEventsSinceLastSlowEventReport), |
| 297 | static_cast<int32_t>(mNumSkippedSlowEvents)); |
| 298 | mNumEventsSinceLastSlowEventReport = 0; |
| 299 | mNumSkippedSlowEvents = 0; |
| 300 | mLastSlowEventTime = timeline.readTime; |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | std::string LatencyAggregatorWithHistograms::dump(const char* prefix) const { |
| 305 | std::string statisticsStr = StringPrintf("%s Histograms:\n", prefix); |
| 306 | for (const auto& [id, histograms] : mHistograms) { |
| 307 | auto [vendorId, productId, sources, action] = id; |
| 308 | |
| 309 | std::string identifierStr = |
| 310 | StringPrintf("%s Identifier: vendor %d, product %d, sources: {", prefix, vendorId, |
| 311 | productId); |
| 312 | bool firstSource = true; |
| 313 | for (const auto& source : sources) { |
| 314 | if (!firstSource) { |
| 315 | identifierStr += ", "; |
| 316 | } |
| 317 | identifierStr += StringPrintf("%d", static_cast<int32_t>(source)); |
| 318 | firstSource = false; |
| 319 | } |
| 320 | identifierStr += StringPrintf("}, action: %d\n", static_cast<int32_t>(action)); |
| 321 | |
| 322 | std::string histogramsStr; |
| 323 | for (size_t stageIndex = 0; stageIndex < static_cast<size_t>(LatencyStageIndex::SIZE); |
| 324 | stageIndex++) { |
| 325 | const auto& histogram = histograms[stageIndex]; |
| 326 | const std::array<int, NUM_BINS>& binCounts = histogram.getBinCounts(); |
| 327 | |
| 328 | histogramsStr += StringPrintf("%s %zu: ", prefix, stageIndex); |
| 329 | histogramsStr += StringPrintf("%d", binCounts[0]); |
| 330 | for (size_t bin = 1; bin < NUM_BINS; bin++) { |
| 331 | histogramsStr += StringPrintf(", %d", binCounts[bin]); |
| 332 | } |
| 333 | histogramsStr += StringPrintf("\n"); |
| 334 | } |
| 335 | statisticsStr += identifierStr + histogramsStr; |
| 336 | } |
| 337 | |
| 338 | return StringPrintf("%sLatencyAggregatorWithHistograms:\n", prefix) + statisticsStr + |
| 339 | StringPrintf("%s mLastSlowEventTime=%" PRId64 "\n", prefix, mLastSlowEventTime) + |
| 340 | StringPrintf("%s mNumEventsSinceLastSlowEventReport = %zu\n", prefix, |
| 341 | mNumEventsSinceLastSlowEventReport) + |
| 342 | StringPrintf("%s mNumSkippedSlowEvents = %zu\n", prefix, mNumSkippedSlowEvents); |
| 343 | } |
| 344 | |
| 345 | } // namespace android::inputdispatcher |