blob: 77071dce40b997146f4489f1a75aaab71eff5220 [file] [log] [blame]
Glenn Kasten22340022014-04-07 12:04:41 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "FastThread"
18//#define LOG_NDEBUG 0
19
20#define ATRACE_TAG ATRACE_TAG_AUDIO
21
22#include "Configuration.h"
Elliott Hughese348c5b2014-05-21 18:47:50 -070023#include <linux/futex.h>
24#include <sys/syscall.h>
Eric Tanfefe3162018-09-07 10:09:11 -070025#include <audio_utils/clock.h>
Mathias Agopian05d19b02017-02-28 16:28:19 -080026#include <cutils/atomic.h>
Glenn Kasten22340022014-04-07 12:04:41 -070027#include <utils/Log.h>
Glenn Kasten22340022014-04-07 12:04:41 -070028#include <utils/Trace.h>
29#include "FastThread.h"
Glenn Kasten045ee7e2015-02-17 16:22:04 -080030#include "FastThreadDumpState.h"
Andy Hungb776e372023-05-24 11:53:47 -070031#include <afutils/TypedLogger.h>
Glenn Kasten22340022014-04-07 12:04:41 -070032
33#define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep
34#define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling
Glenn Kastend2123e62015-01-29 10:02:44 -080035#define MIN_WARMUP_CYCLES 2 // minimum number of consecutive in-range loop cycles
36 // to wait for warmup
Glenn Kasten22340022014-04-07 12:04:41 -070037#define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup
38
39namespace android {
40
Glenn Kastenf9715e42016-07-13 14:02:03 -070041FastThread::FastThread(const char *cycleMs, const char *loadUs) : Thread(false /*canCallJava*/),
Glenn Kastene4a7ce22015-03-03 11:23:17 -080042 // re-initialized to &sInitial by subclass constructor
Andy Hungf0859f32023-05-25 16:28:04 -070043 mPrevious(nullptr), mCurrent(nullptr),
Glenn Kastene4a7ce22015-03-03 11:23:17 -080044 /* mOldTs({0, 0}), */
45 mOldTsValid(false),
46 mSleepNs(-1),
47 mPeriodNs(0),
48 mUnderrunNs(0),
49 mOverrunNs(0),
50 mForceNs(0),
51 mWarmupNsMin(0),
52 mWarmupNsMax(LONG_MAX),
53 // re-initialized to &mDummySubclassDumpState by subclass constructor
Andy Hungf0859f32023-05-25 16:28:04 -070054 mDummyDumpState(nullptr),
55 mDumpState(nullptr),
Glenn Kastene4a7ce22015-03-03 11:23:17 -080056 mIgnoreNextOverrun(true),
Glenn Kasten214b4062015-03-02 14:15:47 -080057#ifdef FAST_THREAD_STATISTICS
Glenn Kastene4a7ce22015-03-03 11:23:17 -080058 // mOldLoad
59 mOldLoadValid(false),
60 mBounds(0),
61 mFull(false),
62 // mTcu
Glenn Kasten22340022014-04-07 12:04:41 -070063#endif
Glenn Kastene4a7ce22015-03-03 11:23:17 -080064 mColdGen(0),
65 mIsWarm(false),
66 /* mMeasuredWarmupTs({0, 0}), */
67 mWarmupCycles(0),
68 mWarmupConsecutiveInRangeCycles(0),
Glenn Kastene4a7ce22015-03-03 11:23:17 -080069 mTimestampStatus(INVALID_OPERATION),
Glenn Kasten22340022014-04-07 12:04:41 -070070
Glenn Kastene4a7ce22015-03-03 11:23:17 -080071 mCommand(FastThreadState::INITIAL),
Glenn Kasten22340022014-04-07 12:04:41 -070072#if 0
73 frameCount(0),
74#endif
Glenn Kastene4a7ce22015-03-03 11:23:17 -080075 mAttemptedWrite(false)
Glenn Kastenf9715e42016-07-13 14:02:03 -070076 // mCycleMs(cycleMs)
77 // mLoadUs(loadUs)
Glenn Kasten22340022014-04-07 12:04:41 -070078{
Glenn Kastene4a7ce22015-03-03 11:23:17 -080079 mOldTs.tv_sec = 0;
80 mOldTs.tv_nsec = 0;
81 mMeasuredWarmupTs.tv_sec = 0;
82 mMeasuredWarmupTs.tv_nsec = 0;
Glenn Kastenf9715e42016-07-13 14:02:03 -070083 strlcpy(mCycleMs, cycleMs, sizeof(mCycleMs));
84 strlcpy(mLoadUs, loadUs, sizeof(mLoadUs));
Glenn Kasten22340022014-04-07 12:04:41 -070085}
86
Glenn Kasten22340022014-04-07 12:04:41 -070087bool FastThread::threadLoop()
88{
Glenn Kasten388d5712017-04-07 14:38:41 -070089 // LOGT now works even if tlNBLogWriter is nullptr, but we're considering changing that,
Glenn Kasteneef598c2017-04-03 14:41:13 -070090 // so this initialization permits a future change to remove the check for nullptr.
Mikhail Naganov01d09d92018-09-18 12:38:57 -070091 tlNBLogWriter = mDummyNBLogWriter.get();
Glenn Kasten22340022014-04-07 12:04:41 -070092 for (;;) {
93
94 // either nanosleep, sched_yield, or busy wait
Glenn Kastene4a7ce22015-03-03 11:23:17 -080095 if (mSleepNs >= 0) {
96 if (mSleepNs > 0) {
97 ALOG_ASSERT(mSleepNs < 1000000000);
98 const struct timespec req = {0, mSleepNs};
Andy Hungf0859f32023-05-25 16:28:04 -070099 nanosleep(&req, nullptr);
Glenn Kasten22340022014-04-07 12:04:41 -0700100 } else {
101 sched_yield();
102 }
103 }
104 // default to long sleep for next cycle
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800105 mSleepNs = FAST_DEFAULT_NS;
Glenn Kasten22340022014-04-07 12:04:41 -0700106
107 // poll for state change
108 const FastThreadState *next = poll();
Andy Hungf0859f32023-05-25 16:28:04 -0700109 if (next == nullptr) {
Glenn Kasten22340022014-04-07 12:04:41 -0700110 // continue to use the default initial state until a real state is available
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800111 // FIXME &sInitial not available, should save address earlier
112 //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
113 next = mCurrent;
Glenn Kasten22340022014-04-07 12:04:41 -0700114 }
115
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800116 mCommand = next->mCommand;
117 if (next != mCurrent) {
Glenn Kasten22340022014-04-07 12:04:41 -0700118
119 // As soon as possible of learning of a new dump area, start using it
Andy Hungf0859f32023-05-25 16:28:04 -0700120 mDumpState = next->mDumpState != nullptr ? next->mDumpState : mDummyDumpState;
121 tlNBLogWriter = next->mNBLogWriter != nullptr ?
Mikhail Naganov01d09d92018-09-18 12:38:57 -0700122 next->mNBLogWriter : mDummyNBLogWriter.get();
Mikhail Naganov9b6599e2019-07-29 15:23:21 -0700123 setNBLogWriter(tlNBLogWriter); // This is used for debugging only
Glenn Kasten22340022014-04-07 12:04:41 -0700124
125 // We want to always have a valid reference to the previous (non-idle) state.
126 // However, the state queue only guarantees access to current and previous states.
127 // So when there is a transition from a non-idle state into an idle state, we make a
128 // copy of the last known non-idle state so it is still available on return from idle.
129 // The possible transitions are:
130 // non-idle -> non-idle update previous from current in-place
131 // non-idle -> idle update previous from copy of current
132 // idle -> idle don't update previous
133 // idle -> non-idle don't update previous
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800134 if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
135 if (mCommand & FastThreadState::IDLE) {
Glenn Kasten22340022014-04-07 12:04:41 -0700136 onIdle();
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800137 mOldTsValid = false;
Glenn Kasten214b4062015-03-02 14:15:47 -0800138#ifdef FAST_THREAD_STATISTICS
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800139 mOldLoadValid = false;
Glenn Kasten22340022014-04-07 12:04:41 -0700140#endif
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800141 mIgnoreNextOverrun = true;
Glenn Kasten22340022014-04-07 12:04:41 -0700142 }
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800143 mPrevious = mCurrent;
Glenn Kasten22340022014-04-07 12:04:41 -0700144 }
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800145 mCurrent = next;
Glenn Kasten22340022014-04-07 12:04:41 -0700146 }
147#if !LOG_NDEBUG
Andy Hungf0859f32023-05-25 16:28:04 -0700148 next = nullptr; // not referenced again
Glenn Kasten22340022014-04-07 12:04:41 -0700149#endif
150
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800151 mDumpState->mCommand = mCommand;
Glenn Kasten22340022014-04-07 12:04:41 -0700152
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800153 // FIXME what does this comment mean?
Glenn Kasten22340022014-04-07 12:04:41 -0700154 // << current, previous, command, dumpState >>
155
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800156 switch (mCommand) {
Glenn Kasten22340022014-04-07 12:04:41 -0700157 case FastThreadState::INITIAL:
158 case FastThreadState::HOT_IDLE:
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800159 mSleepNs = FAST_HOT_IDLE_NS;
Glenn Kasten22340022014-04-07 12:04:41 -0700160 continue;
161 case FastThreadState::COLD_IDLE:
162 // only perform a cold idle command once
163 // FIXME consider checking previous state and only perform if previous != COLD_IDLE
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800164 if (mCurrent->mColdGen != mColdGen) {
165 int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
Andy Hungf0859f32023-05-25 16:28:04 -0700166 ALOG_ASSERT(coldFutexAddr != nullptr);
167 const int32_t old = android_atomic_dec(coldFutexAddr);
Glenn Kasten22340022014-04-07 12:04:41 -0700168 if (old <= 0) {
Andy Hungf0859f32023-05-25 16:28:04 -0700169 syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, nullptr);
Glenn Kasten22340022014-04-07 12:04:41 -0700170 }
Andy Hungf0859f32023-05-25 16:28:04 -0700171 const int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
Glenn Kasten22340022014-04-07 12:04:41 -0700172 if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
Glenn Kasten1bfe09a2017-02-21 13:05:56 -0800173 ALOGE("did not receive expected priority boost on time");
Glenn Kasten22340022014-04-07 12:04:41 -0700174 }
175 // This may be overly conservative; there could be times that the normal mixer
176 // requests such a brief cold idle that it doesn't require resetting this flag.
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800177 mIsWarm = false;
178 mMeasuredWarmupTs.tv_sec = 0;
179 mMeasuredWarmupTs.tv_nsec = 0;
180 mWarmupCycles = 0;
181 mWarmupConsecutiveInRangeCycles = 0;
182 mSleepNs = -1;
183 mColdGen = mCurrent->mColdGen;
Glenn Kasten214b4062015-03-02 14:15:47 -0800184#ifdef FAST_THREAD_STATISTICS
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800185 mBounds = 0;
186 mFull = false;
Glenn Kasten22340022014-04-07 12:04:41 -0700187#endif
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800188 mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
189 mTimestampStatus = INVALID_OPERATION;
Glenn Kasten22340022014-04-07 12:04:41 -0700190 } else {
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800191 mSleepNs = FAST_HOT_IDLE_NS;
Glenn Kasten22340022014-04-07 12:04:41 -0700192 }
193 continue;
194 case FastThreadState::EXIT:
195 onExit();
196 return false;
197 default:
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800198 LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
Glenn Kasten22340022014-04-07 12:04:41 -0700199 break;
200 }
201
202 // there is a non-idle state available to us; did the state change?
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800203 if (mCurrent != mPrevious) {
Glenn Kasten22340022014-04-07 12:04:41 -0700204 onStateChange();
205#if 1 // FIXME shouldn't need this
206 // only process state change once
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800207 mPrevious = mCurrent;
Glenn Kasten22340022014-04-07 12:04:41 -0700208#endif
209 }
210
211 // do work using current state here
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800212 mAttemptedWrite = false;
Glenn Kasten22340022014-04-07 12:04:41 -0700213 onWork();
214
215 // To be exactly periodic, compute the next sleep time based on current time.
216 // This code doesn't have long-term stability when the sink is non-blocking.
217 // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
218 struct timespec newTs;
219 int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
220 if (rc == 0) {
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800221 if (mOldTsValid) {
222 time_t sec = newTs.tv_sec - mOldTs.tv_sec;
223 long nsec = newTs.tv_nsec - mOldTs.tv_nsec;
Glenn Kasten22340022014-04-07 12:04:41 -0700224 ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
225 "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800226 mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
Glenn Kasten22340022014-04-07 12:04:41 -0700227 if (nsec < 0) {
228 --sec;
229 nsec += 1000000000;
230 }
231 // To avoid an initial underrun on fast tracks after exiting standby,
232 // do not start pulling data from tracks and mixing until warmup is complete.
233 // Warmup is considered complete after the earlier of:
Glenn Kastend2123e62015-01-29 10:02:44 -0800234 // MIN_WARMUP_CYCLES consecutive in-range write() attempts,
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800235 // where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
Glenn Kasten22340022014-04-07 12:04:41 -0700236 // MAX_WARMUP_CYCLES write() attempts.
237 // This is overly conservative, but to get better accuracy requires a new HAL API.
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800238 if (!mIsWarm && mAttemptedWrite) {
239 mMeasuredWarmupTs.tv_sec += sec;
240 mMeasuredWarmupTs.tv_nsec += nsec;
241 if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
242 mMeasuredWarmupTs.tv_sec++;
243 mMeasuredWarmupTs.tv_nsec -= 1000000000;
Glenn Kasten22340022014-04-07 12:04:41 -0700244 }
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800245 ++mWarmupCycles;
246 if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
247 ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
248 ++mWarmupConsecutiveInRangeCycles;
Glenn Kastend2123e62015-01-29 10:02:44 -0800249 } else {
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800250 ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
251 mWarmupConsecutiveInRangeCycles = 0;
Glenn Kastend2123e62015-01-29 10:02:44 -0800252 }
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800253 if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
254 (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
255 mIsWarm = true;
256 mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
257 mDumpState->mWarmupCycles = mWarmupCycles;
Eric Tanfefe3162018-09-07 10:09:11 -0700258 const double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1e3) +
259 (mMeasuredWarmupTs.tv_nsec * 1e-6);
260 LOG_WARMUP_TIME(measuredWarmupMs);
Glenn Kasten22340022014-04-07 12:04:41 -0700261 }
262 }
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800263 mSleepNs = -1;
264 if (mIsWarm) {
265 if (sec > 0 || nsec > mUnderrunNs) {
Andy Hungf0859f32023-05-25 16:28:04 -0700266 ATRACE_NAME("underrun"); // NOLINT(misc-const-correctness)
Glenn Kasten22340022014-04-07 12:04:41 -0700267 // FIXME only log occasionally
268 ALOGV("underrun: time since last cycle %d.%03ld sec",
269 (int) sec, nsec / 1000000L);
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800270 mDumpState->mUnderruns++;
Eric Tanfefe3162018-09-07 10:09:11 -0700271 LOG_UNDERRUN(audio_utils_ns_from_timespec(&newTs));
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800272 mIgnoreNextOverrun = true;
273 } else if (nsec < mOverrunNs) {
274 if (mIgnoreNextOverrun) {
275 mIgnoreNextOverrun = false;
Glenn Kasten22340022014-04-07 12:04:41 -0700276 } else {
277 // FIXME only log occasionally
278 ALOGV("overrun: time since last cycle %d.%03ld sec",
279 (int) sec, nsec / 1000000L);
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800280 mDumpState->mOverruns++;
Eric Tanfefe3162018-09-07 10:09:11 -0700281 LOG_OVERRUN(audio_utils_ns_from_timespec(&newTs));
Glenn Kasten22340022014-04-07 12:04:41 -0700282 }
283 // This forces a minimum cycle time. It:
284 // - compensates for an audio HAL with jitter due to sample rate conversion
285 // - works with a variable buffer depth audio HAL that never pulls at a
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800286 // rate < than mOverrunNs per buffer.
Glenn Kasten22340022014-04-07 12:04:41 -0700287 // - recovers from overrun immediately after underrun
288 // It doesn't work with a non-blocking audio HAL.
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800289 mSleepNs = mForceNs - nsec;
Glenn Kasten22340022014-04-07 12:04:41 -0700290 } else {
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800291 mIgnoreNextOverrun = false;
Glenn Kasten22340022014-04-07 12:04:41 -0700292 }
293 }
Glenn Kasten214b4062015-03-02 14:15:47 -0800294#ifdef FAST_THREAD_STATISTICS
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800295 if (mIsWarm) {
Glenn Kasten22340022014-04-07 12:04:41 -0700296 // advance the FIFO queue bounds
Andy Hungf0859f32023-05-25 16:28:04 -0700297 const size_t i = mBounds & (mDumpState->mSamplingN - 1);
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800298 mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
299 if (mFull) {
Ivan Lozano77657142018-01-02 21:01:16 +0000300 //mBounds += 0x10000;
301 __builtin_add_overflow(mBounds, 0x10000, &mBounds);
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800302 } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
303 mFull = true;
Glenn Kasten22340022014-04-07 12:04:41 -0700304 }
305 // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
306 uint32_t monotonicNs = nsec;
307 if (sec > 0 && sec < 4) {
wendy lincd4c7242019-06-12 10:37:11 +0800308 monotonicNs += sec * 1000000000U; // unsigned to prevent signed overflow.
Glenn Kasten22340022014-04-07 12:04:41 -0700309 }
310 // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
311 uint32_t loadNs = 0;
312 struct timespec newLoad;
313 rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
314 if (rc == 0) {
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800315 if (mOldLoadValid) {
316 sec = newLoad.tv_sec - mOldLoad.tv_sec;
317 nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
Glenn Kasten22340022014-04-07 12:04:41 -0700318 if (nsec < 0) {
319 --sec;
320 nsec += 1000000000;
321 }
322 loadNs = nsec;
323 if (sec > 0 && sec < 4) {
wendy lincd4c7242019-06-12 10:37:11 +0800324 loadNs += sec * 1000000000U; // unsigned to prevent signed overflow.
Glenn Kasten22340022014-04-07 12:04:41 -0700325 }
326 } else {
327 // first time through the loop
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800328 mOldLoadValid = true;
Glenn Kasten22340022014-04-07 12:04:41 -0700329 }
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800330 mOldLoad = newLoad;
Glenn Kasten22340022014-04-07 12:04:41 -0700331 }
332#ifdef CPU_FREQUENCY_STATISTICS
333 // get the absolute value of CPU clock frequency in kHz
334 int cpuNum = sched_getcpu();
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800335 uint32_t kHz = mTcu.getCpukHz(cpuNum);
Glenn Kasten22340022014-04-07 12:04:41 -0700336 kHz = (kHz << 4) | (cpuNum & 0xF);
337#endif
338 // save values in FIFO queues for dumpsys
339 // these stores #1, #2, #3 are not atomic with respect to each other,
340 // or with respect to store #4 below
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800341 mDumpState->mMonotonicNs[i] = monotonicNs;
Eric Tancf3d82c2018-09-04 15:44:45 -0700342 LOG_WORK_TIME(monotonicNs);
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800343 mDumpState->mLoadNs[i] = loadNs;
Glenn Kasten22340022014-04-07 12:04:41 -0700344#ifdef CPU_FREQUENCY_STATISTICS
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800345 mDumpState->mCpukHz[i] = kHz;
Glenn Kasten22340022014-04-07 12:04:41 -0700346#endif
347 // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
348 // the newest open & oldest closed halves are atomic with respect to each other
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800349 mDumpState->mBounds = mBounds;
Glenn Kastenf9715e42016-07-13 14:02:03 -0700350 ATRACE_INT(mCycleMs, monotonicNs / 1000000);
351 ATRACE_INT(mLoadUs, loadNs / 1000);
Glenn Kasten22340022014-04-07 12:04:41 -0700352 }
353#endif
354 } else {
355 // first time through the loop
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800356 mOldTsValid = true;
357 mSleepNs = mPeriodNs;
358 mIgnoreNextOverrun = true;
Glenn Kasten22340022014-04-07 12:04:41 -0700359 }
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800360 mOldTs = newTs;
Glenn Kasten22340022014-04-07 12:04:41 -0700361 } else {
362 // monotonic clock is broken
Glenn Kastene4a7ce22015-03-03 11:23:17 -0800363 mOldTsValid = false;
364 mSleepNs = mPeriodNs;
Glenn Kasten22340022014-04-07 12:04:41 -0700365 }
366
367 } // for (;;)
368
369 // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
370}
371
372} // namespace android