blob: 7d7b4ef32e7bd5ef1cf1c4b71f1dba53553b1bef [file] [log] [blame]
Phil Burk87c9f642017-05-17 07:22:39 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Phil Burk87c9f642017-05-17 07:22:39 -070017//#define LOG_NDEBUG 0
18#include <utils/Log.h>
19
Phil Burkec89b2e2017-06-20 15:05:06 -070020#include <algorithm>
millerliang387458c2021-04-27 00:41:15 +080021#include <audio_utils/format.h>
Phil Burk87c9f642017-05-17 07:22:39 -070022#include <aaudio/AAudio.h>
jiabin97247ea2021-04-07 00:33:38 +000023#include <media/MediaMetricsItem.h>
Phil Burk87c9f642017-05-17 07:22:39 -070024
25#include "client/AudioStreamInternalCapture.h"
26#include "utility/AudioClock.h"
27
Zimd53d7a12022-08-25 13:54:47 +010028#undef ATRACE_TAG
Phil Burkfd34a932017-07-19 07:03:52 -070029#define ATRACE_TAG ATRACE_TAG_AUDIO
30#include <utils/Trace.h>
31
Phil Burk58f5ce12020-08-12 14:29:10 +000032// We do this after the #includes because if a header uses ALOG.
33// it would fail on the reference to mInService.
34#undef LOG_TAG
35// This file is used in both client and server processes.
36// This is needed to make sense of the logs more easily.
37#define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
38 : "AudioStreamInternalCapture_Client")
39
Phil Burk87c9f642017-05-17 07:22:39 -070040using android::WrappingBuffer;
41
42using namespace aaudio;
43
44AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
45 bool inService)
46 : AudioStreamInternal(serviceInterface, inService) {
47
48}
49
Robert Wud559ba52023-06-29 00:08:51 +000050aaudio_result_t AudioStreamInternalCapture::open(const AudioStreamBuilder &builder) {
51 aaudio_result_t result = AudioStreamInternal::open(builder);
52 if (result == AAUDIO_OK) {
53 result = mFlowGraph.configure(getDeviceFormat(),
Robert Wue8b58962023-07-21 19:48:56 +000054 getDeviceSamplesPerFrame(),
Robert Wud559ba52023-06-29 00:08:51 +000055 getDeviceSampleRate(),
56 getFormat(),
57 getSamplesPerFrame(),
58 getSampleRate(),
59 getRequireMonoBlend(),
60 false /* useVolumeRamps */,
61 getAudioBalance(),
62 aaudio::resampler::MultiChannelResampler::Quality::Medium);
63
64 if (result != AAUDIO_OK) {
65 safeReleaseClose();
66 }
67 }
68 return result;
69}
70
Phil Burkec8ca522020-05-19 10:05:58 -070071void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
Phil Burk5edc4ea2020-04-17 08:15:42 -070072 int64_t readCounter = mAudioEndpoint->getDataReadCounter();
Phil Burkec8ca522020-05-19 10:05:58 -070073 int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
Phil Burkbcc36742017-08-31 17:24:51 -070074
75 // Bump offset so caller does not see the retrograde motion in getFramesRead().
76 int64_t offset = readCounter - writeCounter;
77 mFramesOffsetFromService += offset;
78 ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
79 (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
80
81 // Force readCounter to match writeCounter.
82 // This is because we cannot change the write counter in the hardware.
Phil Burk5edc4ea2020-04-17 08:15:42 -070083 mAudioEndpoint->setDataReadCounter(writeCounter);
Phil Burkbcc36742017-08-31 17:24:51 -070084}
85
Phil Burk87c9f642017-05-17 07:22:39 -070086// Write the data, block if needed and timeoutMillis > 0
87aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
88 int64_t timeoutNanoseconds)
89{
90 return processData(buffer, numFrames, timeoutNanoseconds);
91}
92
93// Read as much data as we can without blocking.
94aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
95 int64_t currentNanoTime, int64_t *wakeTimePtr) {
96 aaudio_result_t result = processCommands();
97 if (result != AAUDIO_OK) {
98 return result;
99 }
100
Phil Burkfd34a932017-07-19 07:03:52 -0700101 const char *traceName = "aaRdNow";
102 ATRACE_BEGIN(traceName);
103
Phil Burkbcc36742017-08-31 17:24:51 -0700104 if (mClockModel.isStarting()) {
105 // Still haven't got any timestamps from server.
106 // Keep waiting until we get some valid timestamps then start writing to the
107 // current buffer position.
108 ALOGD("processDataNow() wait for valid timestamps");
109 // Sleep very briefly and hope we get a timestamp soon.
110 *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
111 ATRACE_END();
112 return 0;
113 }
114 // If we have gotten this far then we have at least one timestamp from server.
115
Phil Burk5edc4ea2020-04-17 08:15:42 -0700116 if (mAudioEndpoint->isFreeRunning()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700117 //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
118 // Update data queue based on the timing model.
Phil Burkfceeee72019-06-14 11:18:45 -0700119 // Jitter in the DSP can cause late writes to the FIFO.
120 // This might be caused by resampling.
121 // We want to read the FIFO after the latest possible time
122 // that the DSP could have written the data.
123 int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
Phil Burk87c9f642017-05-17 07:22:39 -0700124 // TODO refactor, maybe use setRemoteCounter()
Phil Burk5edc4ea2020-04-17 08:15:42 -0700125 mAudioEndpoint->setDataWriteCounter(estimatedRemoteCounter);
Phil Burk87c9f642017-05-17 07:22:39 -0700126 }
127
Phil Burkbcc36742017-08-31 17:24:51 -0700128 // This code assumes that we have already received valid timestamps.
129 if (mNeedCatchUp.isRequested()) {
130 // Catch an MMAP pointer that is already advancing.
131 // This will avoid initial underruns caused by a slow cold start.
jiabind5bd06a2021-04-27 22:04:08 +0000132 advanceClientToMatchServerPosition(0 /*serverMargin*/);
Phil Burkbcc36742017-08-31 17:24:51 -0700133 mNeedCatchUp.acknowledge();
134 }
135
Phil Burka10bd512019-09-27 11:49:17 -0700136 // If the capture buffer is full beyond capacity then consider it an overrun.
Phil Burk23296382017-11-20 15:45:11 -0800137 // For shared streams, the xRunCount is passed up from the service.
Phil Burk5edc4ea2020-04-17 08:15:42 -0700138 if (mAudioEndpoint->isFreeRunning()
139 && mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
Phil Burk87c9f642017-05-17 07:22:39 -0700140 mXRunCount++;
Phil Burkfd34a932017-07-19 07:03:52 -0700141 if (ATRACE_ENABLED()) {
142 ATRACE_INT("aaOverRuns", mXRunCount);
143 }
Phil Burk87c9f642017-05-17 07:22:39 -0700144 }
145
146 // Read some data from the buffer.
147 //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
148 int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
149 //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
150 // numFrames, framesProcessed);
Phil Burkfd34a932017-07-19 07:03:52 -0700151 if (ATRACE_ENABLED()) {
152 ATRACE_INT("aaRead", framesProcessed);
153 }
Phil Burk87c9f642017-05-17 07:22:39 -0700154
155 // Calculate an ideal time to wake up.
156 if (wakeTimePtr != nullptr && framesProcessed >= 0) {
157 // By default wake up a few milliseconds from now. // TODO review
158 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
159 aaudio_stream_state_t state = getState();
160 //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
161 // AAudio_convertStreamStateToText(state));
162 switch (state) {
163 case AAUDIO_STREAM_STATE_OPEN:
164 case AAUDIO_STREAM_STATE_STARTING:
165 break;
Phil Burkfd34a932017-07-19 07:03:52 -0700166 case AAUDIO_STREAM_STATE_STARTED:
Phil Burk87c9f642017-05-17 07:22:39 -0700167 {
Phil Burkfd34a932017-07-19 07:03:52 -0700168 // When do we expect the next write burst to occur?
Phil Burk87c9f642017-05-17 07:22:39 -0700169
Phil Burkfd34a932017-07-19 07:03:52 -0700170 // Calculate frame position based off of the readCounter because
171 // the writeCounter might have just advanced in the background,
172 // causing us to sleep until a later burst.
Robert Wud559ba52023-06-29 00:08:51 +0000173 const int64_t nextPosition = mAudioEndpoint->getDataReadCounter() +
174 getDeviceFramesPerBurst();
Phil Burkfceeee72019-06-14 11:18:45 -0700175 wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
Phil Burk87c9f642017-05-17 07:22:39 -0700176 }
177 break;
178 default:
179 break;
180 }
181 *wakeTimePtr = wakeTime;
182
183 }
Phil Burkfd34a932017-07-19 07:03:52 -0700184
185 ATRACE_END();
Phil Burk87c9f642017-05-17 07:22:39 -0700186 return framesProcessed;
187}
188
189aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
190 int32_t numFrames) {
Phil Burk87c9f642017-05-17 07:22:39 -0700191 WrappingBuffer wrappingBuffer;
Robert Wud559ba52023-06-29 00:08:51 +0000192 uint8_t *byteBuffer = (uint8_t *) buffer;
193 int32_t framesLeftInByteBuffer = numFrames;
194
195 if (framesLeftInByteBuffer > 0) {
196 // Pull data from the flowgraph in case there is residual data.
197 const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.pull(
198 (void *)byteBuffer,
199 framesLeftInByteBuffer);
200
201 const int32_t numBytesActuallyWrittenToByteBuffer =
202 framesActuallyWrittenToByteBuffer * getBytesPerFrame();
203 byteBuffer += numBytesActuallyWrittenToByteBuffer;
204 framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
205 }
Phil Burk87c9f642017-05-17 07:22:39 -0700206
Phil Burk5edc4ea2020-04-17 08:15:42 -0700207 mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);
Phil Burk87c9f642017-05-17 07:22:39 -0700208
Robert Wud559ba52023-06-29 00:08:51 +0000209 // Write data in one or two parts.
210 int partIndex = 0;
211 int framesReadFromAudioEndpoint = 0;
212 while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
213 const int32_t totalFramesInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
214 int32_t framesAvailableInWrappingBuffer = totalFramesInWrappingBuffer;
215 uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
Phil Burk87c9f642017-05-17 07:22:39 -0700216
Robert Wud559ba52023-06-29 00:08:51 +0000217 // Put data from the wrapping buffer into the flowgraph 8 frames at a time.
218 // Continuously pull as much data as possible from the flowgraph into the byte buffer.
219 // The return value of mFlowGraph.process is the number of frames actually pulled.
220 while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
221 const int32_t framesToReadFromWrappingBuffer = std::min(flowgraph::kDefaultBufferSize,
222 framesAvailableInWrappingBuffer);
223
224 const int32_t numBytesToReadFromWrappingBuffer = getBytesPerDeviceFrame() *
225 framesToReadFromWrappingBuffer;
226
227 // If framesActuallyWrittenToByteBuffer < framesLeftInByteBuffer, it is guaranteed
228 // that all the data is pulled. If there is no more space in the byteBuffer, the
229 // remaining data will be pulled in the following readNowWithConversion().
230 const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.process(
231 (void *)currentWrappingBuffer,
232 framesToReadFromWrappingBuffer,
233 (void *)byteBuffer,
234 framesLeftInByteBuffer);
235
236 const int32_t numBytesActuallyWrittenToByteBuffer =
237 framesActuallyWrittenToByteBuffer * getBytesPerFrame();
238 byteBuffer += numBytesActuallyWrittenToByteBuffer;
239 framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
240 currentWrappingBuffer += numBytesToReadFromWrappingBuffer;
241 framesAvailableInWrappingBuffer -= framesToReadFromWrappingBuffer;
242
243 //ALOGD("%s() numBytesActuallyWrittenToByteBuffer %d, framesLeftInByteBuffer %d"
244 // "framesAvailableInWrappingBuffer %d, framesReadFromAudioEndpoint %d"
245 // , __func__, numBytesActuallyWrittenToByteBuffer, framesLeftInByteBuffer,
246 // framesAvailableInWrappingBuffer, framesReadFromAudioEndpoint);
Phil Burk87c9f642017-05-17 07:22:39 -0700247 }
Robert Wud559ba52023-06-29 00:08:51 +0000248 framesReadFromAudioEndpoint += totalFramesInWrappingBuffer -
249 framesAvailableInWrappingBuffer;
250 partIndex++;
Phil Burk87c9f642017-05-17 07:22:39 -0700251 }
252
Robert Wud559ba52023-06-29 00:08:51 +0000253 // The audio endpoint should reference the number of frames written to the wrapping buffer.
254 mAudioEndpoint->advanceReadIndex(framesReadFromAudioEndpoint);
Phil Burk87c9f642017-05-17 07:22:39 -0700255
Robert Wud559ba52023-06-29 00:08:51 +0000256 // The internal code should use the number of frames read from the app.
257 return numFrames - framesLeftInByteBuffer;
Phil Burk87c9f642017-05-17 07:22:39 -0700258}
259
Phil Burkec89b2e2017-06-20 15:05:06 -0700260int64_t AudioStreamInternalCapture::getFramesWritten() {
Phil Burk5edc4ea2020-04-17 08:15:42 -0700261 if (mAudioEndpoint) {
262 const int64_t framesWrittenHardware = isClockModelInControl()
263 ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
264 : mAudioEndpoint->getDataWriteCounter();
265 // Add service offset and prevent retrograde motion.
266 mLastFramesWritten = std::max(mLastFramesWritten,
267 framesWrittenHardware + mFramesOffsetFromService);
268 }
Phil Burkec89b2e2017-06-20 15:05:06 -0700269 return mLastFramesWritten;
Phil Burk87c9f642017-05-17 07:22:39 -0700270}
271
Phil Burkec89b2e2017-06-20 15:05:06 -0700272int64_t AudioStreamInternalCapture::getFramesRead() {
Phil Burk5edc4ea2020-04-17 08:15:42 -0700273 if (mAudioEndpoint) {
274 mLastFramesRead = mAudioEndpoint->getDataReadCounter() + mFramesOffsetFromService;
275 }
276 return mLastFramesRead;
Phil Burk87c9f642017-05-17 07:22:39 -0700277}
278
279// Read data from the stream and pass it to the callback for processing.
280void *AudioStreamInternalCapture::callbackLoop() {
281 aaudio_result_t result = AAUDIO_OK;
282 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
jiabind5bd06a2021-04-27 22:04:08 +0000283 if (!isDataCallbackSet()) return nullptr;
Phil Burk87c9f642017-05-17 07:22:39 -0700284
285 // result might be a frame count
286 while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
287
288 // Read audio data from stream.
289 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
290
291 // This is a BLOCKING READ!
Phil Burkbf821e22020-04-17 11:51:43 -0700292 result = read(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
Phil Burk87c9f642017-05-17 07:22:39 -0700293 if ((result != mCallbackFrames)) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700294 ALOGE("callbackLoop: read() returned %d", result);
Phil Burk87c9f642017-05-17 07:22:39 -0700295 if (result >= 0) {
296 // Only read some of the frames requested. Must have timed out.
297 result = AAUDIO_ERROR_TIMEOUT;
298 }
Phil Burk134f1972017-12-08 13:06:11 -0800299 maybeCallErrorCallback(result);
Phil Burk87c9f642017-05-17 07:22:39 -0700300 break;
301 }
302
303 // Call application using the AAudio callback interface.
Phil Burkbf821e22020-04-17 11:51:43 -0700304 callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
Phil Burk87c9f642017-05-17 07:22:39 -0700305
306 if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
Phil Burk762365c2018-12-10 16:02:16 -0800307 ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
Phil Burk5ff3b952021-04-02 17:29:11 +0000308 result = systemStopInternal();
Phil Burk87c9f642017-05-17 07:22:39 -0700309 break;
310 }
311 }
312
Phil Burkfbf031e2017-10-12 15:58:31 -0700313 ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
Phil Burk87c9f642017-05-17 07:22:39 -0700314 result, (int) isActive());
jiabind5bd06a2021-04-27 22:04:08 +0000315 return nullptr;
Phil Burk87c9f642017-05-17 07:22:39 -0700316}