jioana | 24878b5 | 2024-09-10 10:13:27 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2024 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #pragma once |
| 18 | |
| 19 | #include <android-base/thread_annotations.h> |
| 20 | #include <utils/Timers.h> |
| 21 | |
| 22 | #include "InputEventTimeline.h" |
| 23 | |
| 24 | namespace android::inputdispatcher { |
| 25 | |
| 26 | static constexpr size_t NUM_BINS = 20; |
| 27 | static constexpr size_t NUM_INPUT_EVENT_TYPES = 6; |
| 28 | |
| 29 | enum class LatencyStageIndex : size_t { |
| 30 | EVENT_TO_READ = 0, |
| 31 | READ_TO_DELIVER = 1, |
| 32 | DELIVER_TO_CONSUME = 2, |
| 33 | CONSUME_TO_FINISH = 3, |
| 34 | CONSUME_TO_GPU_COMPLETE = 4, |
| 35 | GPU_COMPLETE_TO_PRESENT = 5, |
| 36 | END_TO_END = 6, |
| 37 | SIZE = 7, // must be last |
| 38 | }; |
| 39 | |
| 40 | // Let's create a full timeline here: |
| 41 | // eventTime |
| 42 | // readTime |
| 43 | // <---- after this point, the data becomes per-connection |
| 44 | // deliveryTime // time at which the event was sent to the receiver |
| 45 | // consumeTime // time at which the receiver read the event |
| 46 | // finishTime // time at which the dispatcher reads the response from the receiver that the event |
| 47 | // was processed |
| 48 | // GraphicsTimeline::GPU_COMPLETED_TIME |
| 49 | // GraphicsTimeline::PRESENT_TIME |
| 50 | |
| 51 | /** |
| 52 | * Keep histograms with latencies of the provided events |
| 53 | */ |
| 54 | class LatencyAggregatorWithHistograms final : public InputEventTimelineProcessor { |
| 55 | public: |
| 56 | /** |
| 57 | * Record a complete event timeline |
| 58 | */ |
| 59 | void processTimeline(const InputEventTimeline& timeline) override; |
| 60 | |
| 61 | void pushLatencyStatistics() override; |
| 62 | |
| 63 | std::string dump(const char* prefix) const override; |
| 64 | |
| 65 | private: |
| 66 | // ---------- Slow event handling ---------- |
| 67 | void processSlowEvent(const InputEventTimeline& timeline); |
| 68 | nsecs_t mLastSlowEventTime = 0; |
| 69 | // How many slow events have been skipped due to rate limiting |
| 70 | size_t mNumSkippedSlowEvents = 0; |
| 71 | // How many events have been received since the last time we reported a slow event |
| 72 | size_t mNumEventsSinceLastSlowEventReport = 0; |
| 73 | |
| 74 | // ---------- Statistics handling ---------- |
| 75 | /** |
| 76 | * Data structure to gather time samples into NUM_BINS buckets |
| 77 | */ |
| 78 | class Histogram { |
| 79 | public: |
| 80 | Histogram(const std::array<int, NUM_BINS - 1>& binSizes) : mBinSizes(binSizes) { |
| 81 | mBinCounts.fill(0); |
| 82 | } |
| 83 | |
| 84 | // Increments binCounts of the appropriate bin when adding a new sample |
| 85 | void addSample(int64_t sample) { |
| 86 | size_t binIndex = getSampleBinIndex(sample); |
| 87 | mBinCounts[binIndex]++; |
| 88 | } |
| 89 | |
| 90 | const std::array<int32_t, NUM_BINS>& getBinCounts() const { return mBinCounts; } |
| 91 | |
| 92 | private: |
| 93 | // reference to an array that represents the range of values each bin holds. |
| 94 | // in bin i+1 live samples such that *mBinSizes[i] <= sample < *mBinSizes[i+1] |
| 95 | const std::array<int, NUM_BINS - 1>& mBinSizes; |
| 96 | std::array<int32_t, NUM_BINS> |
| 97 | mBinCounts; // the number of samples that currently live in each bin |
| 98 | |
| 99 | size_t getSampleBinIndex(int64_t sample) { |
| 100 | auto it = std::upper_bound(mBinSizes.begin(), mBinSizes.end(), sample); |
| 101 | return std::distance(mBinSizes.begin(), it); |
| 102 | } |
| 103 | }; |
| 104 | |
| 105 | void processStatistics(const InputEventTimeline& timeline); |
| 106 | |
| 107 | // Identifier for the an input event. If two input events have the same identifiers we |
| 108 | // want to use the same histograms to count the latency samples |
| 109 | using InputEventLatencyIdentifier = |
| 110 | std::tuple<uint16_t /*vendorId*/, uint16_t /*productId*/, |
| 111 | const std::set<InputDeviceUsageSource> /*sources*/, |
| 112 | InputEventActionType /*inputEventActionType*/>; |
| 113 | |
| 114 | // Maps an input event identifier to an array of 7 histograms, one for each latency |
| 115 | // stage. It is cleared after an atom push |
| 116 | std::map<InputEventLatencyIdentifier, std::array<Histogram, 7>> mHistograms; |
| 117 | |
| 118 | void addSampleToHistogram(const InputEventLatencyIdentifier& identifier, |
| 119 | LatencyStageIndex latencyStageIndex, nsecs_t time); |
| 120 | |
| 121 | // Stores all possible arrays of bin sizes. The order in the vector does not matter, as long |
| 122 | // as binSizesMappings points to the right index |
| 123 | static constexpr std::array<std::array<int, NUM_BINS - 1>, 6> allBinSizes = { |
| 124 | {{10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100}, |
| 125 | {1, 2, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32}, |
| 126 | {15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180, 195, 210, 225, 240, 255, 270, |
| 127 | 285}, |
| 128 | {40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680, |
| 129 | 720, 760}, |
| 130 | {20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280, 300, 320, 340, 360, |
| 131 | 380}, |
| 132 | {200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, |
| 133 | 1700, 1800, 1900, 2000}}}; |
| 134 | |
| 135 | // Stores indexes in allBinSizes to use with each {LatencyStage, InputEventType} pair. |
| 136 | // Bin sizes for a certain latencyStage and inputEventType are at: |
| 137 | // *(allBinSizes[binSizesMappings[latencyStageIndex][inputEventTypeIndex]]) |
| 138 | // inputEventTypeIndex is the int value of InputEventActionType enum decreased by 1 since we |
| 139 | // don't want to record latencies for unknown events. |
| 140 | // e.g. MOTION_ACTION_DOWN is 0, MOTION_ACTION_MOVE is 1... |
| 141 | static constexpr std::array<std::array<int8_t, NUM_INPUT_EVENT_TYPES>, |
| 142 | static_cast<size_t>(LatencyStageIndex::SIZE)> |
| 143 | binSizesMappings = {{{0, 0, 0, 0, 0, 0}, |
| 144 | {1, 1, 1, 1, 1, 1}, |
| 145 | {1, 1, 1, 1, 1, 1}, |
| 146 | {2, 2, 2, 2, 2, 2}, |
| 147 | {3, 3, 3, 3, 3, 3}, |
| 148 | {4, 4, 4, 4, 4, 4}, |
| 149 | {5, 5, 5, 5, 5, 5}}}; |
| 150 | |
| 151 | // Similar to binSizesMappings, but holds the index of the array of bin ranges to use on the |
| 152 | // server. The index gets pushed with the atom within the histogram_version field. |
| 153 | static constexpr std::array<std::array<int8_t, NUM_INPUT_EVENT_TYPES>, |
| 154 | static_cast<size_t>(LatencyStageIndex::SIZE)> |
| 155 | histogramVersions = {{{0, 0, 0, 0, 0, 0}, |
| 156 | {1, 1, 1, 1, 1, 1}, |
| 157 | {1, 1, 1, 1, 1, 1}, |
| 158 | {2, 2, 2, 2, 2, 2}, |
| 159 | {3, 3, 3, 3, 3, 3}, |
| 160 | {4, 4, 4, 4, 4, 4}, |
| 161 | {5, 5, 5, 5, 5, 5}}}; |
| 162 | }; |
| 163 | |
| 164 | } // namespace android::inputdispatcher |