Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "FastThread" |
| 18 | //#define LOG_NDEBUG 0 |
| 19 | |
| 20 | #define ATRACE_TAG ATRACE_TAG_AUDIO |
| 21 | |
| 22 | #include "Configuration.h" |
Elliott Hughes | e348c5b | 2014-05-21 18:47:50 -0700 | [diff] [blame] | 23 | #include <linux/futex.h> |
| 24 | #include <sys/syscall.h> |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 25 | #include <audio_utils/clock.h> |
Mathias Agopian | 05d19b0 | 2017-02-28 16:28:19 -0800 | [diff] [blame] | 26 | #include <cutils/atomic.h> |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 27 | #include <utils/Log.h> |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 28 | #include <utils/Trace.h> |
| 29 | #include "FastThread.h" |
Glenn Kasten | 045ee7e | 2015-02-17 16:22:04 -0800 | [diff] [blame] | 30 | #include "FastThreadDumpState.h" |
Andy Hung | b776e37 | 2023-05-24 11:53:47 -0700 | [diff] [blame] | 31 | #include <afutils/TypedLogger.h> |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 32 | |
| 33 | #define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep |
| 34 | #define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 35 | #define MIN_WARMUP_CYCLES 2 // minimum number of consecutive in-range loop cycles |
| 36 | // to wait for warmup |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 37 | #define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup |
| 38 | |
| 39 | namespace android { |
| 40 | |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 41 | FastThread::FastThread(const char *cycleMs, const char *loadUs) : Thread(false /*canCallJava*/), |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 42 | // re-initialized to &sInitial by subclass constructor |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 43 | mPrevious(nullptr), mCurrent(nullptr), |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 44 | /* mOldTs({0, 0}), */ |
| 45 | mOldTsValid(false), |
| 46 | mSleepNs(-1), |
| 47 | mPeriodNs(0), |
| 48 | mUnderrunNs(0), |
| 49 | mOverrunNs(0), |
| 50 | mForceNs(0), |
| 51 | mWarmupNsMin(0), |
| 52 | mWarmupNsMax(LONG_MAX), |
| 53 | // re-initialized to &mDummySubclassDumpState by subclass constructor |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 54 | mDummyDumpState(nullptr), |
| 55 | mDumpState(nullptr), |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 56 | mIgnoreNextOverrun(true), |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 57 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 58 | // mOldLoad |
| 59 | mOldLoadValid(false), |
| 60 | mBounds(0), |
| 61 | mFull(false), |
| 62 | // mTcu |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 63 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 64 | mColdGen(0), |
| 65 | mIsWarm(false), |
| 66 | /* mMeasuredWarmupTs({0, 0}), */ |
| 67 | mWarmupCycles(0), |
| 68 | mWarmupConsecutiveInRangeCycles(0), |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 69 | mTimestampStatus(INVALID_OPERATION), |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 70 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 71 | mCommand(FastThreadState::INITIAL), |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 72 | #if 0 |
| 73 | frameCount(0), |
| 74 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 75 | mAttemptedWrite(false) |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 76 | // mCycleMs(cycleMs) |
| 77 | // mLoadUs(loadUs) |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 78 | { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 79 | mOldTs.tv_sec = 0; |
| 80 | mOldTs.tv_nsec = 0; |
| 81 | mMeasuredWarmupTs.tv_sec = 0; |
| 82 | mMeasuredWarmupTs.tv_nsec = 0; |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 83 | strlcpy(mCycleMs, cycleMs, sizeof(mCycleMs)); |
| 84 | strlcpy(mLoadUs, loadUs, sizeof(mLoadUs)); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 87 | bool FastThread::threadLoop() |
| 88 | { |
Glenn Kasten | 388d571 | 2017-04-07 14:38:41 -0700 | [diff] [blame] | 89 | // LOGT now works even if tlNBLogWriter is nullptr, but we're considering changing that, |
Glenn Kasten | eef598c | 2017-04-03 14:41:13 -0700 | [diff] [blame] | 90 | // so this initialization permits a future change to remove the check for nullptr. |
Mikhail Naganov | 01d09d9 | 2018-09-18 12:38:57 -0700 | [diff] [blame] | 91 | tlNBLogWriter = mDummyNBLogWriter.get(); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 92 | for (;;) { |
| 93 | |
| 94 | // either nanosleep, sched_yield, or busy wait |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 95 | if (mSleepNs >= 0) { |
| 96 | if (mSleepNs > 0) { |
| 97 | ALOG_ASSERT(mSleepNs < 1000000000); |
| 98 | const struct timespec req = {0, mSleepNs}; |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 99 | nanosleep(&req, nullptr); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 100 | } else { |
| 101 | sched_yield(); |
| 102 | } |
| 103 | } |
| 104 | // default to long sleep for next cycle |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 105 | mSleepNs = FAST_DEFAULT_NS; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 106 | |
| 107 | // poll for state change |
| 108 | const FastThreadState *next = poll(); |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 109 | if (next == nullptr) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 110 | // continue to use the default initial state until a real state is available |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 111 | // FIXME &sInitial not available, should save address earlier |
| 112 | //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial); |
| 113 | next = mCurrent; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 114 | } |
| 115 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 116 | mCommand = next->mCommand; |
| 117 | if (next != mCurrent) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 118 | |
| 119 | // As soon as possible of learning of a new dump area, start using it |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 120 | mDumpState = next->mDumpState != nullptr ? next->mDumpState : mDummyDumpState; |
| 121 | tlNBLogWriter = next->mNBLogWriter != nullptr ? |
Mikhail Naganov | 01d09d9 | 2018-09-18 12:38:57 -0700 | [diff] [blame] | 122 | next->mNBLogWriter : mDummyNBLogWriter.get(); |
Mikhail Naganov | 9b6599e | 2019-07-29 15:23:21 -0700 | [diff] [blame] | 123 | setNBLogWriter(tlNBLogWriter); // This is used for debugging only |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 124 | |
| 125 | // We want to always have a valid reference to the previous (non-idle) state. |
| 126 | // However, the state queue only guarantees access to current and previous states. |
| 127 | // So when there is a transition from a non-idle state into an idle state, we make a |
| 128 | // copy of the last known non-idle state so it is still available on return from idle. |
| 129 | // The possible transitions are: |
| 130 | // non-idle -> non-idle update previous from current in-place |
| 131 | // non-idle -> idle update previous from copy of current |
| 132 | // idle -> idle don't update previous |
| 133 | // idle -> non-idle don't update previous |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 134 | if (!(mCurrent->mCommand & FastThreadState::IDLE)) { |
| 135 | if (mCommand & FastThreadState::IDLE) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 136 | onIdle(); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 137 | mOldTsValid = false; |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 138 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 139 | mOldLoadValid = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 140 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 141 | mIgnoreNextOverrun = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 142 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 143 | mPrevious = mCurrent; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 144 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 145 | mCurrent = next; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 146 | } |
| 147 | #if !LOG_NDEBUG |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 148 | next = nullptr; // not referenced again |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 149 | #endif |
| 150 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 151 | mDumpState->mCommand = mCommand; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 152 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 153 | // FIXME what does this comment mean? |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 154 | // << current, previous, command, dumpState >> |
| 155 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 156 | switch (mCommand) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 157 | case FastThreadState::INITIAL: |
| 158 | case FastThreadState::HOT_IDLE: |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 159 | mSleepNs = FAST_HOT_IDLE_NS; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 160 | continue; |
| 161 | case FastThreadState::COLD_IDLE: |
| 162 | // only perform a cold idle command once |
| 163 | // FIXME consider checking previous state and only perform if previous != COLD_IDLE |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 164 | if (mCurrent->mColdGen != mColdGen) { |
| 165 | int32_t *coldFutexAddr = mCurrent->mColdFutexAddr; |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 166 | ALOG_ASSERT(coldFutexAddr != nullptr); |
| 167 | const int32_t old = android_atomic_dec(coldFutexAddr); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 168 | if (old <= 0) { |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 169 | syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, nullptr); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 170 | } |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 171 | const int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 172 | if (!(policy == SCHED_FIFO || policy == SCHED_RR)) { |
Glenn Kasten | 1bfe09a | 2017-02-21 13:05:56 -0800 | [diff] [blame] | 173 | ALOGE("did not receive expected priority boost on time"); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 174 | } |
| 175 | // This may be overly conservative; there could be times that the normal mixer |
| 176 | // requests such a brief cold idle that it doesn't require resetting this flag. |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 177 | mIsWarm = false; |
| 178 | mMeasuredWarmupTs.tv_sec = 0; |
| 179 | mMeasuredWarmupTs.tv_nsec = 0; |
| 180 | mWarmupCycles = 0; |
| 181 | mWarmupConsecutiveInRangeCycles = 0; |
| 182 | mSleepNs = -1; |
| 183 | mColdGen = mCurrent->mColdGen; |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 184 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 185 | mBounds = 0; |
| 186 | mFull = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 187 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 188 | mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs); |
| 189 | mTimestampStatus = INVALID_OPERATION; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 190 | } else { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 191 | mSleepNs = FAST_HOT_IDLE_NS; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 192 | } |
| 193 | continue; |
| 194 | case FastThreadState::EXIT: |
| 195 | onExit(); |
| 196 | return false; |
| 197 | default: |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 198 | LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand)); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 199 | break; |
| 200 | } |
| 201 | |
| 202 | // there is a non-idle state available to us; did the state change? |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 203 | if (mCurrent != mPrevious) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 204 | onStateChange(); |
| 205 | #if 1 // FIXME shouldn't need this |
| 206 | // only process state change once |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 207 | mPrevious = mCurrent; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 208 | #endif |
| 209 | } |
| 210 | |
| 211 | // do work using current state here |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 212 | mAttemptedWrite = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 213 | onWork(); |
| 214 | |
| 215 | // To be exactly periodic, compute the next sleep time based on current time. |
| 216 | // This code doesn't have long-term stability when the sink is non-blocking. |
| 217 | // FIXME To avoid drift, use the local audio clock or watch the sink's fill status. |
| 218 | struct timespec newTs; |
| 219 | int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); |
| 220 | if (rc == 0) { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 221 | if (mOldTsValid) { |
| 222 | time_t sec = newTs.tv_sec - mOldTs.tv_sec; |
| 223 | long nsec = newTs.tv_nsec - mOldTs.tv_nsec; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 224 | ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0), |
| 225 | "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld", |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 226 | mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 227 | if (nsec < 0) { |
| 228 | --sec; |
| 229 | nsec += 1000000000; |
| 230 | } |
| 231 | // To avoid an initial underrun on fast tracks after exiting standby, |
| 232 | // do not start pulling data from tracks and mixing until warmup is complete. |
| 233 | // Warmup is considered complete after the earlier of: |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 234 | // MIN_WARMUP_CYCLES consecutive in-range write() attempts, |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 235 | // where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 236 | // MAX_WARMUP_CYCLES write() attempts. |
| 237 | // This is overly conservative, but to get better accuracy requires a new HAL API. |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 238 | if (!mIsWarm && mAttemptedWrite) { |
| 239 | mMeasuredWarmupTs.tv_sec += sec; |
| 240 | mMeasuredWarmupTs.tv_nsec += nsec; |
| 241 | if (mMeasuredWarmupTs.tv_nsec >= 1000000000) { |
| 242 | mMeasuredWarmupTs.tv_sec++; |
| 243 | mMeasuredWarmupTs.tv_nsec -= 1000000000; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 244 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 245 | ++mWarmupCycles; |
| 246 | if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) { |
| 247 | ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9); |
| 248 | ++mWarmupConsecutiveInRangeCycles; |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 249 | } else { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 250 | ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9); |
| 251 | mWarmupConsecutiveInRangeCycles = 0; |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 252 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 253 | if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) || |
| 254 | (mWarmupCycles >= MAX_WARMUP_CYCLES)) { |
| 255 | mIsWarm = true; |
| 256 | mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs; |
| 257 | mDumpState->mWarmupCycles = mWarmupCycles; |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 258 | const double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1e3) + |
| 259 | (mMeasuredWarmupTs.tv_nsec * 1e-6); |
| 260 | LOG_WARMUP_TIME(measuredWarmupMs); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 261 | } |
| 262 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 263 | mSleepNs = -1; |
| 264 | if (mIsWarm) { |
| 265 | if (sec > 0 || nsec > mUnderrunNs) { |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 266 | ATRACE_NAME("underrun"); // NOLINT(misc-const-correctness) |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 267 | // FIXME only log occasionally |
| 268 | ALOGV("underrun: time since last cycle %d.%03ld sec", |
| 269 | (int) sec, nsec / 1000000L); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 270 | mDumpState->mUnderruns++; |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 271 | LOG_UNDERRUN(audio_utils_ns_from_timespec(&newTs)); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 272 | mIgnoreNextOverrun = true; |
| 273 | } else if (nsec < mOverrunNs) { |
| 274 | if (mIgnoreNextOverrun) { |
| 275 | mIgnoreNextOverrun = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 276 | } else { |
| 277 | // FIXME only log occasionally |
| 278 | ALOGV("overrun: time since last cycle %d.%03ld sec", |
| 279 | (int) sec, nsec / 1000000L); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 280 | mDumpState->mOverruns++; |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 281 | LOG_OVERRUN(audio_utils_ns_from_timespec(&newTs)); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 282 | } |
| 283 | // This forces a minimum cycle time. It: |
| 284 | // - compensates for an audio HAL with jitter due to sample rate conversion |
| 285 | // - works with a variable buffer depth audio HAL that never pulls at a |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 286 | // rate < than mOverrunNs per buffer. |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 287 | // - recovers from overrun immediately after underrun |
| 288 | // It doesn't work with a non-blocking audio HAL. |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 289 | mSleepNs = mForceNs - nsec; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 290 | } else { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 291 | mIgnoreNextOverrun = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 292 | } |
| 293 | } |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 294 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 295 | if (mIsWarm) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 296 | // advance the FIFO queue bounds |
Andy Hung | f0859f3 | 2023-05-25 16:28:04 -0700 | [diff] [blame] | 297 | const size_t i = mBounds & (mDumpState->mSamplingN - 1); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 298 | mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF); |
| 299 | if (mFull) { |
Ivan Lozano | 7765714 | 2018-01-02 21:01:16 +0000 | [diff] [blame] | 300 | //mBounds += 0x10000; |
| 301 | __builtin_add_overflow(mBounds, 0x10000, &mBounds); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 302 | } else if (!(mBounds & (mDumpState->mSamplingN - 1))) { |
| 303 | mFull = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 304 | } |
| 305 | // compute the delta value of clock_gettime(CLOCK_MONOTONIC) |
| 306 | uint32_t monotonicNs = nsec; |
| 307 | if (sec > 0 && sec < 4) { |
wendy lin | cd4c724 | 2019-06-12 10:37:11 +0800 | [diff] [blame] | 308 | monotonicNs += sec * 1000000000U; // unsigned to prevent signed overflow. |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 309 | } |
| 310 | // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) |
| 311 | uint32_t loadNs = 0; |
| 312 | struct timespec newLoad; |
| 313 | rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad); |
| 314 | if (rc == 0) { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 315 | if (mOldLoadValid) { |
| 316 | sec = newLoad.tv_sec - mOldLoad.tv_sec; |
| 317 | nsec = newLoad.tv_nsec - mOldLoad.tv_nsec; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 318 | if (nsec < 0) { |
| 319 | --sec; |
| 320 | nsec += 1000000000; |
| 321 | } |
| 322 | loadNs = nsec; |
| 323 | if (sec > 0 && sec < 4) { |
wendy lin | cd4c724 | 2019-06-12 10:37:11 +0800 | [diff] [blame] | 324 | loadNs += sec * 1000000000U; // unsigned to prevent signed overflow. |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 325 | } |
| 326 | } else { |
| 327 | // first time through the loop |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 328 | mOldLoadValid = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 329 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 330 | mOldLoad = newLoad; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 331 | } |
| 332 | #ifdef CPU_FREQUENCY_STATISTICS |
| 333 | // get the absolute value of CPU clock frequency in kHz |
| 334 | int cpuNum = sched_getcpu(); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 335 | uint32_t kHz = mTcu.getCpukHz(cpuNum); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 336 | kHz = (kHz << 4) | (cpuNum & 0xF); |
| 337 | #endif |
| 338 | // save values in FIFO queues for dumpsys |
| 339 | // these stores #1, #2, #3 are not atomic with respect to each other, |
| 340 | // or with respect to store #4 below |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 341 | mDumpState->mMonotonicNs[i] = monotonicNs; |
Eric Tan | cf3d82c | 2018-09-04 15:44:45 -0700 | [diff] [blame] | 342 | LOG_WORK_TIME(monotonicNs); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 343 | mDumpState->mLoadNs[i] = loadNs; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 344 | #ifdef CPU_FREQUENCY_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 345 | mDumpState->mCpukHz[i] = kHz; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 346 | #endif |
| 347 | // this store #4 is not atomic with respect to stores #1, #2, #3 above, but |
| 348 | // the newest open & oldest closed halves are atomic with respect to each other |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 349 | mDumpState->mBounds = mBounds; |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 350 | ATRACE_INT(mCycleMs, monotonicNs / 1000000); |
| 351 | ATRACE_INT(mLoadUs, loadNs / 1000); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 352 | } |
| 353 | #endif |
| 354 | } else { |
| 355 | // first time through the loop |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 356 | mOldTsValid = true; |
| 357 | mSleepNs = mPeriodNs; |
| 358 | mIgnoreNextOverrun = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 359 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 360 | mOldTs = newTs; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 361 | } else { |
| 362 | // monotonic clock is broken |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 363 | mOldTsValid = false; |
| 364 | mSleepNs = mPeriodNs; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 365 | } |
| 366 | |
| 367 | } // for (;;) |
| 368 | |
| 369 | // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion |
| 370 | } |
| 371 | |
| 372 | } // namespace android |