summaryrefslogtreecommitdiffstats
path: root/services/audioflinger/FastThread.cpp
blob: 5ca579bae69fac6e47faf230310cf58dfe99df96 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
/*
 * Copyright (C) 2014 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define LOG_TAG "FastThread"
//#define LOG_NDEBUG 0

#define ATRACE_TAG ATRACE_TAG_AUDIO

#include "Configuration.h"
#include <linux/futex.h>
#include <sys/syscall.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include "FastThread.h"
#include "FastThreadDumpState.h"

#define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
#define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
#define MIN_WARMUP_CYCLES          2    // minimum number of consecutive in-range loop cycles
                                        // to wait for warmup
#define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup

namespace android {

FastThread::FastThread() : Thread(false /*canCallJava*/),
    // re-initialized to &sInitial by subclass constructor
    mPrevious(NULL), mCurrent(NULL),
    /* mOldTs({0, 0}), */
    mOldTsValid(false),
    mSleepNs(-1),
    mPeriodNs(0),
    mUnderrunNs(0),
    mOverrunNs(0),
    mForceNs(0),
    mWarmupNsMin(0),
    mWarmupNsMax(LONG_MAX),
    // re-initialized to &mDummySubclassDumpState by subclass constructor
    mDummyDumpState(NULL),
    mDumpState(NULL),
    mIgnoreNextOverrun(true),
#ifdef FAST_THREAD_STATISTICS
    // mOldLoad
    mOldLoadValid(false),
    mBounds(0),
    mFull(false),
    // mTcu
#endif
    mColdGen(0),
    mIsWarm(false),
    /* mMeasuredWarmupTs({0, 0}), */
    mWarmupCycles(0),
    mWarmupConsecutiveInRangeCycles(0),
    // mDummyLogWriter
    mLogWriter(&mDummyLogWriter),
    mTimestampStatus(INVALID_OPERATION),

    mCommand(FastThreadState::INITIAL),
#if 0
    frameCount(0),
#endif
    mAttemptedWrite(false)
{
    mOldTs.tv_sec = 0;
    mOldTs.tv_nsec = 0;
    mMeasuredWarmupTs.tv_sec = 0;
    mMeasuredWarmupTs.tv_nsec = 0;
}

FastThread::~FastThread()
{
}

bool FastThread::threadLoop()
{
    for (;;) {

        // either nanosleep, sched_yield, or busy wait
        if (mSleepNs >= 0) {
            if (mSleepNs > 0) {
                ALOG_ASSERT(mSleepNs < 1000000000);
                const struct timespec req = {0, mSleepNs};
                nanosleep(&req, NULL);
            } else {
                sched_yield();
            }
        }
        // default to long sleep for next cycle
        mSleepNs = FAST_DEFAULT_NS;

        // poll for state change
        const FastThreadState *next = poll();
        if (next == NULL) {
            // continue to use the default initial state until a real state is available
            // FIXME &sInitial not available, should save address earlier
            //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
            next = mCurrent;
        }

        mCommand = next->mCommand;
        if (next != mCurrent) {

            // As soon as possible of learning of a new dump area, start using it
            mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
            mLogWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &mDummyLogWriter;
            setLog(mLogWriter);

            // We want to always have a valid reference to the previous (non-idle) state.
            // However, the state queue only guarantees access to current and previous states.
            // So when there is a transition from a non-idle state into an idle state, we make a
            // copy of the last known non-idle state so it is still available on return from idle.
            // The possible transitions are:
            //  non-idle -> non-idle    update previous from current in-place
            //  non-idle -> idle        update previous from copy of current
            //  idle     -> idle        don't update previous
            //  idle     -> non-idle    don't update previous
            if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
                if (mCommand & FastThreadState::IDLE) {
                    onIdle();
                    mOldTsValid = false;
#ifdef FAST_THREAD_STATISTICS
                    mOldLoadValid = false;
#endif
                    mIgnoreNextOverrun = true;
                }
                mPrevious = mCurrent;
            }
            mCurrent = next;
        }
#if !LOG_NDEBUG
        next = NULL;    // not referenced again
#endif

        mDumpState->mCommand = mCommand;

        // FIXME what does this comment mean?
        // << current, previous, command, dumpState >>

        switch (mCommand) {
        case FastThreadState::INITIAL:
        case FastThreadState::HOT_IDLE:
            mSleepNs = FAST_HOT_IDLE_NS;
            continue;
        case FastThreadState::COLD_IDLE:
            // only perform a cold idle command once
            // FIXME consider checking previous state and only perform if previous != COLD_IDLE
            if (mCurrent->mColdGen != mColdGen) {
                int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
                ALOG_ASSERT(coldFutexAddr != NULL);
                int32_t old = android_atomic_dec(coldFutexAddr);
                if (old <= 0) {
                    syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL);
                }
                int policy = sched_getscheduler(0);
                if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
                    ALOGE("did not receive expected priority boost");
                }
                // This may be overly conservative; there could be times that the normal mixer
                // requests such a brief cold idle that it doesn't require resetting this flag.
                mIsWarm = false;
                mMeasuredWarmupTs.tv_sec = 0;
                mMeasuredWarmupTs.tv_nsec = 0;
                mWarmupCycles = 0;
                mWarmupConsecutiveInRangeCycles = 0;
                mSleepNs = -1;
                mColdGen = mCurrent->mColdGen;
#ifdef FAST_THREAD_STATISTICS
                mBounds = 0;
                mFull = false;
#endif
                mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
                mTimestampStatus = INVALID_OPERATION;
            } else {
                mSleepNs = FAST_HOT_IDLE_NS;
            }
            continue;
        case FastThreadState::EXIT:
            onExit();
            return false;
        default:
            LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
            break;
        }

        // there is a non-idle state available to us; did the state change?
        if (mCurrent != mPrevious) {
            onStateChange();
#if 1   // FIXME shouldn't need this
            // only process state change once
            mPrevious = mCurrent;
#endif
        }

        // do work using current state here
        mAttemptedWrite = false;
        onWork();

        // To be exactly periodic, compute the next sleep time based on current time.
        // This code doesn't have long-term stability when the sink is non-blocking.
        // FIXME To avoid drift, use the local audio clock or watch the sink's fill status.
        struct timespec newTs;
        int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
        if (rc == 0) {
            //mLogWriter->logTimestamp(newTs);
            if (mOldTsValid) {
                time_t sec = newTs.tv_sec - mOldTs.tv_sec;
                long nsec = newTs.tv_nsec - mOldTs.tv_nsec;
                ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
                        "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
                        mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
                if (nsec < 0) {
                    --sec;
                    nsec += 1000000000;
                }
                // To avoid an initial underrun on fast tracks after exiting standby,
                // do not start pulling data from tracks and mixing until warmup is complete.
                // Warmup is considered complete after the earlier of:
                //      MIN_WARMUP_CYCLES consecutive in-range write() attempts,
                //          where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
                //      MAX_WARMUP_CYCLES write() attempts.
                // This is overly conservative, but to get better accuracy requires a new HAL API.
                if (!mIsWarm && mAttemptedWrite) {
                    mMeasuredWarmupTs.tv_sec += sec;
                    mMeasuredWarmupTs.tv_nsec += nsec;
                    if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
                        mMeasuredWarmupTs.tv_sec++;
                        mMeasuredWarmupTs.tv_nsec -= 1000000000;
                    }
                    ++mWarmupCycles;
                    if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
                        ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
                        ++mWarmupConsecutiveInRangeCycles;
                    } else {
                        ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
                        mWarmupConsecutiveInRangeCycles = 0;
                    }
                    if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
                            (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
                        mIsWarm = true;
                        mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
                        mDumpState->mWarmupCycles = mWarmupCycles;
                    }
                }
                mSleepNs = -1;
                if (mIsWarm) {
                    if (sec > 0 || nsec > mUnderrunNs) {
                        ATRACE_NAME("underrun");
                        // FIXME only log occasionally
                        ALOGV("underrun: time since last cycle %d.%03ld sec",
                                (int) sec, nsec / 1000000L);
                        mDumpState->mUnderruns++;
                        mIgnoreNextOverrun = true;
                    } else if (nsec < mOverrunNs) {
                        if (mIgnoreNextOverrun) {
                            mIgnoreNextOverrun = false;
                        } else {
                            // FIXME only log occasionally
                            ALOGV("overrun: time since last cycle %d.%03ld sec",
                                    (int) sec, nsec / 1000000L);
                            mDumpState->mOverruns++;
                        }
                        // This forces a minimum cycle time. It:
                        //  - compensates for an audio HAL with jitter due to sample rate conversion
                        //  - works with a variable buffer depth audio HAL that never pulls at a
                        //    rate < than mOverrunNs per buffer.
                        //  - recovers from overrun immediately after underrun
                        // It doesn't work with a non-blocking audio HAL.
                        mSleepNs = mForceNs - nsec;
                    } else {
                        mIgnoreNextOverrun = false;
                    }
                }
#ifdef FAST_THREAD_STATISTICS
                if (mIsWarm) {
                    // advance the FIFO queue bounds
                    size_t i = mBounds & (mDumpState->mSamplingN - 1);
                    mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
                    if (mFull) {
                        mBounds += 0x10000;
                    } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
                        mFull = true;
                    }
                    // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
                    uint32_t monotonicNs = nsec;
                    if (sec > 0 && sec < 4) {
                        monotonicNs += sec * 1000000000;
                    }
                    // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
                    uint32_t loadNs = 0;
                    struct timespec newLoad;
                    rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
                    if (rc == 0) {
                        if (mOldLoadValid) {
                            sec = newLoad.tv_sec - mOldLoad.tv_sec;
                            nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
                            if (nsec < 0) {
                                --sec;
                                nsec += 1000000000;
                            }
                            loadNs = nsec;
                            if (sec > 0 && sec < 4) {
                                loadNs += sec * 1000000000;
                            }
                        } else {
                            // first time through the loop
                            mOldLoadValid = true;
                        }
                        mOldLoad = newLoad;
                    }
#ifdef CPU_FREQUENCY_STATISTICS
                    // get the absolute value of CPU clock frequency in kHz
                    int cpuNum = sched_getcpu();
                    uint32_t kHz = mTcu.getCpukHz(cpuNum);
                    kHz = (kHz << 4) | (cpuNum & 0xF);
#endif
                    // save values in FIFO queues for dumpsys
                    // these stores #1, #2, #3 are not atomic with respect to each other,
                    // or with respect to store #4 below
                    mDumpState->mMonotonicNs[i] = monotonicNs;
                    mDumpState->mLoadNs[i] = loadNs;
#ifdef CPU_FREQUENCY_STATISTICS
                    mDumpState->mCpukHz[i] = kHz;
#endif
                    // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
                    // the newest open & oldest closed halves are atomic with respect to each other
                    mDumpState->mBounds = mBounds;
                    ATRACE_INT("cycle_ms", monotonicNs / 1000000);
                    ATRACE_INT("load_us", loadNs / 1000);
                }
#endif
            } else {
                // first time through the loop
                mOldTsValid = true;
                mSleepNs = mPeriodNs;
                mIgnoreNextOverrun = true;
            }
            mOldTs = newTs;
        } else {
            // monotonic clock is broken
            mOldTsValid = false;
            mSleepNs = mPeriodNs;
        }

    }   // for (;;)

    // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion
}

}   // namespace android