AAudio: Add sample rate conversion to MMAP
This CL adds sample rate conversion to the MMAP path in AAudio.
AAudio no longer needs to use AudioFlinger's sample rate converter for
MMAP streams. This effectively reduces the latency from 100ms to 30ms
on speaker, 3.5mm, and USB if MMAP is enabled and sample rate conversion
is needed.
A sample rate converter is added to the AAudio flowgraph. Since input
and output sample rates are different for the flowgraph, a variable
number of frames must be used. 8 frames are passed through the flowgraph
at any given point of times and the output of the flowgraph pulls as
much data as there exists in the flowgraph.
Burst sizes, capacity, latency, frames read, and frames written are all
changed reflect the sample rate of the app.
Flowgraph was also added to input MMAP streams.
Bug: 219533889
Test: OboeTester Test Input/Output with a variety of sample rates and formats
Test: atest AAudioTests
Change-Id: I9b6a2694921ea558fb91a231f956f638d3342837
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
index 5444565..69be050 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.cpp
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -39,18 +39,21 @@
aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
int32_t sourceChannelCount,
+ int32_t sourceSampleRate,
audio_format_t sinkFormat,
int32_t sinkChannelCount,
+ int32_t sinkSampleRate,
bool useMonoBlend,
+ bool useVolumeRamps,
float audioBalance,
- bool isExclusive) {
+ aaudio::resampler::MultiChannelResampler::Quality resamplerQuality) {
FlowGraphPortFloatOutput *lastOutput = nullptr;
- // TODO change back to ALOGD
- ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d, "
- "useMonoBlend = %d, audioBalance = %f, isExclusive %d",
- __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount,
- useMonoBlend, audioBalance, isExclusive);
+ ALOGD("%s() source format = 0x%08x, channels = %d, sample rate = %d, "
+ "sink format = 0x%08x, channels = %d, sample rate = %d, "
+ "useMonoBlend = %d, audioBalance = %f, useVolumeRamps %d",
+ __func__, sourceFormat, sourceChannelCount, sourceSampleRate, sinkFormat,
+ sinkChannelCount, sinkSampleRate, useMonoBlend, audioBalance, useVolumeRamps);
switch (sourceFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
@@ -85,6 +88,15 @@
lastOutput = &mLimiter->output;
}
+ if (sourceSampleRate != sinkSampleRate) {
+ mResampler.reset(aaudio::resampler::MultiChannelResampler::make(sinkChannelCount,
+ sourceSampleRate, sinkSampleRate, resamplerQuality));
+ mRateConverter = std::make_unique<SampleRateConverter>(sinkChannelCount,
+ *mResampler);
+ lastOutput->connect(&mRateConverter->input);
+ lastOutput = &mRateConverter->output;
+ }
+
// Expand the number of channels if required.
if (sourceChannelCount == 1 && sinkChannelCount > 1) {
mChannelConverter = std::make_unique<MonoToMultiConverter>(sinkChannelCount);
@@ -95,8 +107,7 @@
return AAUDIO_ERROR_UNIMPLEMENTED;
}
- // Apply volume ramps for only exclusive streams.
- if (isExclusive) {
+ if (useVolumeRamps) {
// Apply volume ramps to set the left/right audio balance and target volumes.
// The signals will be decoupled, volume ramps will be applied, before the signals are
// combined again.
@@ -137,9 +148,14 @@
return AAUDIO_OK;
}
-void AAudioFlowGraph::process(const void *source, void *destination, int32_t numFrames) {
- mSource->setData(source, numFrames);
- mSink->read(destination, numFrames);
+int32_t AAudioFlowGraph::pull(void *destination, int32_t targetFramesToRead) {
+ return mSink->read(destination, targetFramesToRead);
+}
+
+int32_t AAudioFlowGraph::process(const void *source, int32_t numFramesToWrite, void *destination,
+ int32_t targetFramesToRead) {
+ mSource->setData(source, numFramesToWrite);
+ return mSink->read(destination, targetFramesToRead);
}
/**
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index 35fef37..e1d517e 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -30,6 +30,7 @@
#include <flowgraph/MonoToMultiConverter.h>
#include <flowgraph/MultiToManyConverter.h>
#include <flowgraph/RampLinear.h>
+#include <flowgraph/SampleRateConverter.h>
class AAudioFlowGraph {
public:
@@ -38,23 +39,57 @@
*
* @param sourceFormat
* @param sourceChannelCount
+ * @param sourceSampleRate
* @param sinkFormat
* @param sinkChannelCount
+ * @param sinkSampleRate
* @param useMonoBlend
+ * @param useVolumeRamps
* @param audioBalance
- * @param channelMask
- * @param isExclusive
+ * @param resamplerQuality
* @return
*/
aaudio_result_t configure(audio_format_t sourceFormat,
int32_t sourceChannelCount,
+ int32_t sourceSampleRate,
audio_format_t sinkFormat,
int32_t sinkChannelCount,
+ int32_t sinkSampleRate,
bool useMonoBlend,
+ bool useVolumeRamps,
float audioBalance,
- bool isExclusive);
+ aaudio::resampler::MultiChannelResampler::Quality resamplerQuality);
- void process(const void *source, void *destination, int32_t numFrames);
+ /**
+ * Attempt to read targetFramesToRead from the flowgraph.
+ * This function returns the number of frames actually read.
+ *
+ * This function does nothing if process() was not called before.
+ *
+ * @param destination
+ * @param targetFramesToRead
+ * @return numFramesRead
+ */
+ int32_t pull(void *destination, int32_t targetFramesToRead);
+
+ /**
+ * Set numFramesToWrite frames from the source into the flowgraph.
+ * Then, attempt to read targetFramesToRead from the flowgraph.
+ * This function returns the number of frames actually read.
+ *
+ * There may be data still in the flowgraph if targetFramesToRead is not large enough.
+ * Before calling process() again, pull() must be called until until all the data is consumed.
+ *
+ * TODO: b/289510598 - Calculate the exact number of input frames needed for Y output frames.
+ *
+ * @param source
+ * @param numFramesToWrite
+ * @param destination
+ * @param targetFramesToRead
+ * @return numFramesRead
+ */
+ int32_t process(const void *source, int32_t numFramesToWrite, void *destination,
+ int32_t targetFramesToRead);
/**
* @param volume between 0.0 and 1.0
@@ -73,6 +108,8 @@
private:
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::FlowGraphSourceBuffered> mSource;
+ std::unique_ptr<RESAMPLER_OUTER_NAMESPACE::resampler::MultiChannelResampler> mResampler;
+ std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::SampleRateConverter> mRateConverter;
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::MonoBlend> mMonoBlend;
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::Limiter> mLimiter;
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::MonoToMultiConverter> mChannelConverter;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 84c715f..ba4c63f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -63,6 +63,8 @@
#define LOG_TIMESTAMPS 0
+#define ENABLE_SAMPLE_RATE_CONVERTER 1
+
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
, mClockModel()
@@ -183,7 +185,6 @@
mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
- setSampleRate(configurationOutput.getSampleRate());
setDeviceId(configurationOutput.getDeviceId());
setSessionId(configurationOutput.getSessionId());
setSharingMode(configurationOutput.getSharingMode());
@@ -194,6 +195,18 @@
setIsContentSpatialized(configurationOutput.isContentSpatialized());
setInputPreset(configurationOutput.getInputPreset());
+ setDeviceSampleRate(configurationOutput.getSampleRate());
+
+ if (getSampleRate() == AAUDIO_UNSPECIFIED) {
+ setSampleRate(configurationOutput.getSampleRate());
+ }
+
+#if !ENABLE_SAMPLE_RATE_CONVERTER
+ if (getSampleRate() != getDeviceSampleRate()) {
+ goto error;
+ }
+#endif
+
// Save device format so we can do format conversion and volume scaling together.
setDeviceFormat(configurationOutput.getFormat());
@@ -233,39 +246,46 @@
}
aaudio_result_t AudioStreamInternal::configureDataInformation(int32_t callbackFrames) {
- int32_t framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+ int32_t deviceFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
// Scale up the burst size to meet the minimum equivalent in microseconds.
// This is to avoid waking the CPU too often when the HW burst is very small
- // or at high sample rates.
- int32_t framesPerBurst = framesPerHardwareBurst;
+ // or at high sample rates. The actual number of frames that we call back to
+ // the app with will be 0 < N <= framesPerBurst so round up the division.
+ int32_t framesPerBurst = (static_cast<int64_t>(deviceFramesPerBurst) * getSampleRate() +
+ getDeviceSampleRate() - 1) / getDeviceSampleRate();
int32_t burstMicros = 0;
const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
do {
if (burstMicros > 0) { // skip first loop
+ deviceFramesPerBurst *= 2;
framesPerBurst *= 2;
}
burstMicros = framesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
} while (burstMicros < burstMinMicros);
ALOGD("%s() original HW burst = %d, minMicros = %d => SW burst = %d\n",
- __func__, framesPerHardwareBurst, burstMinMicros, framesPerBurst);
+ __func__, deviceFramesPerBurst, burstMinMicros, framesPerBurst);
// Validate final burst size.
if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
ALOGE("%s - framesPerBurst out of range = %d", __func__, framesPerBurst);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
+ setDeviceFramesPerBurst(deviceFramesPerBurst);
setFramesPerBurst(framesPerBurst); // only save good value
- mBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
+ mDeviceBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
+
+ mBufferCapacityInFrames = static_cast<int64_t>(mDeviceBufferCapacityInFrames)
+ * getSampleRate() / getDeviceSampleRate();
if (mBufferCapacityInFrames < getFramesPerBurst()
|| mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
- mClockModel.setSampleRate(getSampleRate());
- mClockModel.setFramesPerBurst(framesPerHardwareBurst);
+ mClockModel.setSampleRate(getDeviceSampleRate());
+ mClockModel.setFramesPerBurst(deviceFramesPerBurst);
if (isDataCallbackSet()) {
mCallbackFrames = callbackFrames;
@@ -315,7 +335,8 @@
mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
}
- setBufferSize(mBufferCapacityInFrames / 2); // Default buffer size to match Q
+ // Default buffer size to match Q
+ setBufferSize(mBufferCapacityInFrames / 2);
return AAUDIO_OK;
}
@@ -374,9 +395,9 @@
// Cache the buffer size which may be from client.
const int32_t previousBufferSize = mBufferSizeInFrames;
// Copy all available data from current data queue.
- uint8_t buffer[getBufferCapacity() * getBytesPerFrame()];
- android::fifo_frames_t fullFramesAvailable =
- mAudioEndpoint->read(buffer, getBufferCapacity());
+ uint8_t buffer[getDeviceBufferCapacity() * getBytesPerFrame()];
+ android::fifo_frames_t fullFramesAvailable = mAudioEndpoint->read(buffer,
+ getDeviceBufferCapacity());
mEndPointParcelable.closeDataFileDescriptor();
aaudio_result_t result = mServiceInterface.exitStandby(
mServiceStreamHandleInfo, endpointParcelable);
@@ -408,7 +429,7 @@
goto exit;
}
// Write data from previous data buffer to new endpoint.
- if (android::fifo_frames_t framesWritten =
+ if (const android::fifo_frames_t framesWritten =
mAudioEndpoint->write(buffer, fullFramesAvailable);
framesWritten != fullFramesAvailable) {
ALOGW("Some data lost after exiting standby, frames written: %d, "
@@ -448,7 +469,7 @@
ALOGD("requestStart() but DISCONNECTED");
return AAUDIO_ERROR_DISCONNECTED;
}
- aaudio_stream_state_t originalState = getState();
+ const aaudio_stream_state_t originalState = getState();
setState(AAUDIO_STREAM_STATE_STARTING);
// Clear any stale timestamps from the previous run.
@@ -605,7 +626,11 @@
// Generated in server and passed to client. Return latest.
if (mAtomicInternalTimestamp.isValid()) {
Timestamp timestamp = mAtomicInternalTimestamp.read();
- int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
+ // This should not overflow as timestamp.getPosition() should be a position in a buffer and
+ // not the actual timestamp. timestamp.getNanoseconds() below uses the actual timestamp.
+ // At 48000 Hz we can run for over 100 years before overflowing the int64_t.
+ int64_t position = (timestamp.getPosition() + mFramesOffsetFromService) * getSampleRate() /
+ getDeviceSampleRate();
if (position >= 0) {
*framePosition = position;
*timeNanoseconds = timestamp.getNanoseconds();
@@ -889,7 +914,8 @@
adjustedFrames = maximumSize;
} else {
// Round to the next highest burst size.
- int32_t numBursts = (adjustedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
+ int32_t numBursts = (static_cast<int64_t>(adjustedFrames) + getFramesPerBurst() - 1) /
+ getFramesPerBurst();
adjustedFrames = numBursts * getFramesPerBurst();
// Clip just in case maximumSize is not a multiple of getFramesPerBurst().
adjustedFrames = std::min(maximumSize, adjustedFrames);
@@ -897,23 +923,32 @@
if (mAudioEndpoint) {
// Clip against the actual size from the endpoint.
- int32_t actualFrames = 0;
+ int32_t actualFramesDevice = 0;
+ int32_t maximumFramesDevice = (static_cast<int64_t>(maximumSize) * getDeviceSampleRate()
+ + getSampleRate() - 1) / getSampleRate();
// Set to maximum size so we can write extra data when ready in order to reduce glitches.
// The amount we keep in the buffer is controlled by mBufferSizeInFrames.
- mAudioEndpoint->setBufferSizeInFrames(maximumSize, &actualFrames);
+ mAudioEndpoint->setBufferSizeInFrames(maximumFramesDevice, &actualFramesDevice);
+ int32_t actualFrames = (static_cast<int64_t>(actualFramesDevice) * getSampleRate() +
+ getDeviceSampleRate() - 1) / getDeviceSampleRate();
// actualFrames should be <= actual maximum size of endpoint
adjustedFrames = std::min(actualFrames, adjustedFrames);
}
- if (adjustedFrames != mBufferSizeInFrames) {
+ const int32_t bufferSizeInFrames = adjustedFrames;
+ const int32_t deviceBufferSizeInFrames = static_cast<int64_t>(bufferSizeInFrames) *
+ getDeviceSampleRate() / getSampleRate();
+
+ if (deviceBufferSizeInFrames != mDeviceBufferSizeInFrames) {
android::mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
- .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, adjustedFrames)
+ .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, deviceBufferSizeInFrames)
.set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getXRunCount())
.record();
}
- mBufferSizeInFrames = adjustedFrames;
+ mBufferSizeInFrames = bufferSizeInFrames;
+ mDeviceBufferSizeInFrames = deviceBufferSizeInFrames;
ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
return (aaudio_result_t) adjustedFrames;
}
@@ -922,10 +957,18 @@
return mBufferSizeInFrames;
}
+int32_t AudioStreamInternal::getDeviceBufferSize() const {
+ return mDeviceBufferSizeInFrames;
+}
+
int32_t AudioStreamInternal::getBufferCapacity() const {
return mBufferCapacityInFrames;
}
+int32_t AudioStreamInternal::getDeviceBufferCapacity() const {
+ return mDeviceBufferCapacityInFrames;
+}
+
bool AudioStreamInternal::isClockModelInControl() const {
return isActive() && mAudioEndpoint->isFreeRunning() && mClockModel.isRunning();
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 9c06121..0dc9995 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -22,8 +22,9 @@
#include "binding/AudioEndpointParcelable.h"
#include "binding/AAudioServiceInterface.h"
-#include "client/IsochronousClockModel.h"
+#include "client/AAudioFlowGraph.h"
#include "client/AudioEndpoint.h"
+#include "client/IsochronousClockModel.h"
#include "core/AudioStream.h"
#include "utility/AudioClock.h"
@@ -56,8 +57,12 @@
int32_t getBufferSize() const override;
+ int32_t getDeviceBufferSize() const;
+
int32_t getBufferCapacity() const override;
+ int32_t getDeviceBufferCapacity() const override;
+
int32_t getXRunCount() const override {
return mXRunCount;
}
@@ -177,6 +182,8 @@
int64_t mLastFramesWritten = 0;
int64_t mLastFramesRead = 0;
+ AAudioFlowGraph mFlowGraph;
+
private:
/*
* Asynchronous write with data conversion.
@@ -211,8 +218,9 @@
int32_t mDeviceChannelCount = 0;
int32_t mBufferSizeInFrames = 0; // local threshold to control latency
+ int32_t mDeviceBufferSizeInFrames = 0;
int32_t mBufferCapacityInFrames = 0;
-
+ int32_t mDeviceBufferCapacityInFrames = 0;
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index f5cc2be..47518d7 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -47,6 +47,27 @@
}
+aaudio_result_t AudioStreamInternalCapture::open(const AudioStreamBuilder &builder) {
+ aaudio_result_t result = AudioStreamInternal::open(builder);
+ if (result == AAUDIO_OK) {
+ result = mFlowGraph.configure(getDeviceFormat(),
+ getDeviceChannelCount(),
+ getDeviceSampleRate(),
+ getFormat(),
+ getSamplesPerFrame(),
+ getSampleRate(),
+ getRequireMonoBlend(),
+ false /* useVolumeRamps */,
+ getAudioBalance(),
+ aaudio::resampler::MultiChannelResampler::Quality::Medium);
+
+ if (result != AAUDIO_OK) {
+ safeReleaseClose();
+ }
+ }
+ return result;
+}
+
void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
int64_t readCounter = mAudioEndpoint->getDataReadCounter();
int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
@@ -149,7 +170,8 @@
// Calculate frame position based off of the readCounter because
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
- int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + getFramesPerBurst();
+ const int64_t nextPosition = mAudioEndpoint->getDataReadCounter() +
+ getDeviceFramesPerBurst();
wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
@@ -166,42 +188,73 @@
aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
int32_t numFrames) {
- // ALOGD("readNowWithConversion(%p, %d)",
- // buffer, numFrames);
WrappingBuffer wrappingBuffer;
- uint8_t *destination = (uint8_t *) buffer;
- int32_t framesLeft = numFrames;
+ uint8_t *byteBuffer = (uint8_t *) buffer;
+ int32_t framesLeftInByteBuffer = numFrames;
+
+ if (framesLeftInByteBuffer > 0) {
+ // Pull data from the flowgraph in case there is residual data.
+ const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.pull(
+ (void *)byteBuffer,
+ framesLeftInByteBuffer);
+
+ const int32_t numBytesActuallyWrittenToByteBuffer =
+ framesActuallyWrittenToByteBuffer * getBytesPerFrame();
+ byteBuffer += numBytesActuallyWrittenToByteBuffer;
+ framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
+ }
mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);
- // Read data in one or two parts.
- for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
- int32_t framesToProcess = framesLeft;
- const int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable <= 0) break;
+ // Write data in one or two parts.
+ int partIndex = 0;
+ int framesReadFromAudioEndpoint = 0;
+ while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
+ const int32_t totalFramesInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
+ int32_t framesAvailableInWrappingBuffer = totalFramesInWrappingBuffer;
+ uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
- if (framesToProcess > framesAvailable) {
- framesToProcess = framesAvailable;
+ // Put data from the wrapping buffer into the flowgraph 8 frames at a time.
+ // Continuously pull as much data as possible from the flowgraph into the byte buffer.
+ // The return value of mFlowGraph.process is the number of frames actually pulled.
+ while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
+ const int32_t framesToReadFromWrappingBuffer = std::min(flowgraph::kDefaultBufferSize,
+ framesAvailableInWrappingBuffer);
+
+ const int32_t numBytesToReadFromWrappingBuffer = getBytesPerDeviceFrame() *
+ framesToReadFromWrappingBuffer;
+
+ // If framesActuallyWrittenToByteBuffer < framesLeftInByteBuffer, it is guaranteed
+ // that all the data is pulled. If there is no more space in the byteBuffer, the
+ // remaining data will be pulled in the following readNowWithConversion().
+ const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.process(
+ (void *)currentWrappingBuffer,
+ framesToReadFromWrappingBuffer,
+ (void *)byteBuffer,
+ framesLeftInByteBuffer);
+
+ const int32_t numBytesActuallyWrittenToByteBuffer =
+ framesActuallyWrittenToByteBuffer * getBytesPerFrame();
+ byteBuffer += numBytesActuallyWrittenToByteBuffer;
+ framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
+ currentWrappingBuffer += numBytesToReadFromWrappingBuffer;
+ framesAvailableInWrappingBuffer -= framesToReadFromWrappingBuffer;
+
+ //ALOGD("%s() numBytesActuallyWrittenToByteBuffer %d, framesLeftInByteBuffer %d"
+ // "framesAvailableInWrappingBuffer %d, framesReadFromAudioEndpoint %d"
+ // , __func__, numBytesActuallyWrittenToByteBuffer, framesLeftInByteBuffer,
+ // framesAvailableInWrappingBuffer, framesReadFromAudioEndpoint);
}
-
- const int32_t numBytes = getBytesPerFrame() * framesToProcess;
- const int32_t numSamples = framesToProcess * getSamplesPerFrame();
-
- const audio_format_t sourceFormat = getDeviceFormat();
- const audio_format_t destinationFormat = getFormat();
-
- memcpy_by_audio_format(destination, destinationFormat,
- wrappingBuffer.data[partIndex], sourceFormat, numSamples);
-
- destination += numBytes;
- framesLeft -= framesToProcess;
+ framesReadFromAudioEndpoint += totalFramesInWrappingBuffer -
+ framesAvailableInWrappingBuffer;
+ partIndex++;
}
- int32_t framesProcessed = numFrames - framesLeft;
- mAudioEndpoint->advanceReadIndex(framesProcessed);
+ // The audio endpoint should reference the number of frames written to the wrapping buffer.
+ mAudioEndpoint->advanceReadIndex(framesReadFromAudioEndpoint);
- //ALOGD("readNowWithConversion() returns %d", framesProcessed);
- return framesProcessed;
+ // The internal code should use the number of frames read from the app.
+ return numFrames - framesLeftInByteBuffer;
}
int64_t AudioStreamInternalCapture::getFramesWritten() {
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 87017de..10e247d 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -32,6 +32,8 @@
bool inService = false);
virtual ~AudioStreamInternalCapture() = default;
+ aaudio_result_t open(const AudioStreamBuilder &builder) override;
+
aaudio_result_t read(void *buffer,
int32_t numFrames,
int64_t timeoutNanoseconds) override;
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 89dd8ff..99aa910 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -48,14 +48,18 @@
aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
aaudio_result_t result = AudioStreamInternal::open(builder);
+ const bool useVolumeRamps = (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE);
if (result == AAUDIO_OK) {
result = mFlowGraph.configure(getFormat(),
getSamplesPerFrame(),
+ getSampleRate(),
getDeviceFormat(),
getDeviceChannelCount(),
+ getDeviceSampleRate(),
getRequireMonoBlend(),
+ useVolumeRamps,
getAudioBalance(),
- (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE));
+ aaudio::resampler::MultiChannelResampler::Quality::Medium);
if (result != AAUDIO_OK) {
safeReleaseClose();
@@ -186,7 +190,7 @@
// Sleep if there is too much data in the buffer.
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr
- && (mAudioEndpoint->getFullFramesAvailable() >= getBufferSize())) {
+ && (mAudioEndpoint->getFullFramesAvailable() >= getDeviceBufferSize())) {
// By default wake up a few milliseconds from now. // TODO review
int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
aaudio_stream_state_t state = getState();
@@ -206,12 +210,12 @@
// If the appBufferSize is smaller than the endpointBufferSize then
// we will have room to write data beyond the appBufferSize.
// That is a technique used to reduce glitches without adding latency.
- const int32_t appBufferSize = getBufferSize();
+ const int64_t appBufferSize = getDeviceBufferSize();
// The endpoint buffer size is set to the maximum that can be written.
// If we use it then we must carve out some room to write data when we wake up.
- const int32_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
- - getFramesPerBurst();
- const int32_t bestBufferSize = std::min(appBufferSize, endBufferSize);
+ const int64_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
+ - getDeviceFramesPerBurst();
+ const int64_t bestBufferSize = std::min(appBufferSize, endBufferSize);
int64_t targetReadPosition = mAudioEndpoint->getDataWriteCounter() - bestBufferSize;
wakeTime = mClockModel.convertPositionToTime(targetReadPosition);
}
@@ -232,37 +236,78 @@
int32_t numFrames) {
WrappingBuffer wrappingBuffer;
uint8_t *byteBuffer = (uint8_t *) buffer;
- int32_t framesLeft = numFrames;
+ int32_t framesLeftInByteBuffer = numFrames;
mAudioEndpoint->getEmptyFramesAvailable(&wrappingBuffer);
// Write data in one or two parts.
int partIndex = 0;
- while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
- int32_t framesToWrite = framesLeft;
- int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable > 0) {
- if (framesToWrite > framesAvailable) {
- framesToWrite = framesAvailable;
- }
+ int framesWrittenToAudioEndpoint = 0;
+ while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
+ int32_t framesAvailableInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
+ uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
- int32_t numBytes = getBytesPerFrame() * framesToWrite;
+ if (framesAvailableInWrappingBuffer > 0) {
+ // Pull data from the flowgraph in case there is residual data.
+ const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.pull(
+ (void*) currentWrappingBuffer,
+ framesAvailableInWrappingBuffer);
- mFlowGraph.process((void *)byteBuffer,
- wrappingBuffer.data[partIndex],
- framesToWrite);
+ const int32_t numBytesActuallyWrittenToWrappingBuffer =
+ framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
+ currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
+ framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
+ framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
+ }
- byteBuffer += numBytes;
- framesLeft -= framesToWrite;
- } else {
- break;
+ // Put data from byteBuffer into the flowgraph one buffer (8 frames) at a time.
+ // Continuously pull as much data as possible from the flowgraph into the wrapping buffer.
+ // The return value of mFlowGraph.process is the number of frames actually pulled.
+ while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
+ const int32_t framesToWriteFromByteBuffer = std::min(flowgraph::kDefaultBufferSize,
+ framesLeftInByteBuffer);
+
+ const int32_t numBytesToWriteFromByteBuffer = getBytesPerFrame() *
+ framesToWriteFromByteBuffer;
+
+ //ALOGD("%s() framesLeftInByteBuffer %d, framesAvailableInWrappingBuffer %d"
+ // "framesToWriteFromByteBuffer %d, numBytesToWriteFromByteBuffer %d"
+ // , __func__, framesLeftInByteBuffer, framesAvailableInWrappingBuffer,
+ // framesToWriteFromByteBuffer, numBytesToWriteFromByteBuffer);
+
+ const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.process(
+ (void *)byteBuffer,
+ framesToWriteFromByteBuffer,
+ (void *)currentWrappingBuffer,
+ framesAvailableInWrappingBuffer);
+
+ byteBuffer += numBytesToWriteFromByteBuffer;
+ framesLeftInByteBuffer -= framesToWriteFromByteBuffer;
+ const int32_t numBytesActuallyWrittenToWrappingBuffer =
+ framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
+ currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
+ framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
+ framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
+
+ //ALOGD("%s() numBytesActuallyWrittenToWrappingBuffer %d, framesLeftInByteBuffer %d"
+ // "framesActuallyWrittenToWrappingBuffer %d, numBytesToWriteFromByteBuffer %d"
+ // "framesWrittenToAudioEndpoint %d"
+ // , __func__, numBytesActuallyWrittenToWrappingBuffer, framesLeftInByteBuffer,
+ // framesActuallyWrittenToWrappingBuffer, numBytesToWriteFromByteBuffer,
+ // framesWrittenToAudioEndpoint);
}
partIndex++;
}
- int32_t framesWritten = numFrames - framesLeft;
- mAudioEndpoint->advanceWriteIndex(framesWritten);
+ //ALOGD("%s() framesWrittenToAudioEndpoint %d, numFrames %d"
+ // "framesLeftInByteBuffer %d"
+ // , __func__, framesWrittenToAudioEndpoint, numFrames,
+ // framesLeftInByteBuffer);
- return framesWritten;
+ // The audio endpoint should reference the number of frames written to the wrapping buffer.
+ mAudioEndpoint->advanceWriteIndex(framesWrittenToAudioEndpoint);
+
+ // The internal code should use the number of frames read from the app.
+ return numFrames - framesLeftInByteBuffer;
}
int64_t AudioStreamInternalPlay::getFramesRead() {
@@ -284,7 +329,6 @@
return mLastFramesWritten;
}
-
// Render audio in the application callback and then write the data to the stream.
void *AudioStreamInternalPlay::callbackLoop() {
ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index e761807..b51b5d0 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -21,7 +21,6 @@
#include <aaudio/AAudio.h>
#include "binding/AAudioServiceInterface.h"
-#include "client/AAudioFlowGraph.h"
#include "client/AudioStreamInternal.h"
using android::sp;
@@ -89,13 +88,11 @@
* Asynchronous write with data conversion.
* @param buffer
* @param numFrames
- * @return fdrames written or negative error
+ * @return frames written or negative error
*/
aaudio_result_t writeNowWithConversion(const void *buffer,
int32_t numFrames);
- AAudioFlowGraph mFlowGraph;
-
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 8a13a6f..1e27a81 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -571,13 +571,15 @@
AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- return audioStream->getFramesWritten();
+ return audioStream->getFramesWritten() * audioStream->getSampleRate() /
+ audioStream->getDeviceSampleRate();
}
AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- return audioStream->getFramesRead();
+ return audioStream->getFramesRead() * audioStream->getSampleRate() /
+ audioStream->getDeviceSampleRate();
}
AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* stream,
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 9b4b734..1649eaf 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -204,10 +204,18 @@
return mBufferCapacity;
}
+ virtual int32_t getDeviceBufferCapacity() const {
+ return mDeviceBufferCapacity;
+ }
+
virtual int32_t getFramesPerBurst() const {
return mFramesPerBurst;
}
+ virtual int32_t getDeviceFramesPerBurst() const {
+ return mDeviceFramesPerBurst;
+ }
+
virtual int32_t getXRunCount() const {
return AAUDIO_ERROR_UNIMPLEMENTED;
}
@@ -224,6 +232,10 @@
return mSampleRate;
}
+ aaudio_result_t getDeviceSampleRate() const {
+ return mDeviceSampleRate;
+ }
+
aaudio_result_t getHardwareSampleRate() const {
return mHardwareSampleRate;
}
@@ -542,6 +554,11 @@
}
// This should not be called after the open() call.
+ void setDeviceSampleRate(int32_t deviceSampleRate) {
+ mDeviceSampleRate = deviceSampleRate;
+ }
+
+ // This should not be called after the open() call.
void setHardwareSampleRate(int32_t hardwareSampleRate) {
mHardwareSampleRate = hardwareSampleRate;
}
@@ -552,11 +569,21 @@
}
// This should not be called after the open() call.
+ void setDeviceFramesPerBurst(int32_t deviceFramesPerBurst) {
+ mDeviceFramesPerBurst = deviceFramesPerBurst;
+ }
+
+ // This should not be called after the open() call.
void setBufferCapacity(int32_t bufferCapacity) {
mBufferCapacity = bufferCapacity;
}
// This should not be called after the open() call.
+ void setDeviceBufferCapacity(int32_t deviceBufferCapacity) {
+ mDeviceBufferCapacity = deviceBufferCapacity;
+ }
+
+ // This should not be called after the open() call.
void setSharingMode(aaudio_sharing_mode_t sharingMode) {
mSharingMode = sharingMode;
}
@@ -724,6 +751,7 @@
int32_t mHardwareSamplesPerFrame = AAUDIO_UNSPECIFIED;
aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mDeviceSampleRate = AAUDIO_UNSPECIFIED;
int32_t mHardwareSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
@@ -732,7 +760,9 @@
audio_format_t mHardwareFormat = AUDIO_FORMAT_DEFAULT;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
int32_t mFramesPerBurst = 0;
+ int32_t mDeviceFramesPerBurst = 0;
int32_t mBufferCapacity = 0;
+ int32_t mDeviceBufferCapacity = 0;
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index e760dab..7b4821f 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -208,6 +208,10 @@
setBufferCapacity(getBufferCapacityFromDevice());
setFramesPerBurst(getFramesPerBurstFromDevice());
+ setDeviceSampleRate(mAudioRecord->getSampleRate());
+ setDeviceBufferCapacity(getBufferCapacityFromDevice());
+ setDeviceFramesPerBurst(getFramesPerBurstFromDevice());
+
setHardwareSamplesPerFrame(mAudioRecord->getHalChannelCount());
setHardwareSampleRate(mAudioRecord->getHalSampleRate());
setHardwareFormat(mAudioRecord->getHalFormat());
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 67ee42e..723b419 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -202,6 +202,9 @@
setSampleRate(mAudioTrack->getSampleRate());
setBufferCapacity(getBufferCapacityFromDevice());
setFramesPerBurst(getFramesPerBurstFromDevice());
+ setDeviceSampleRate(mAudioTrack->getSampleRate());
+ setDeviceBufferCapacity(getBufferCapacityFromDevice());
+ setDeviceFramesPerBurst(getFramesPerBurstFromDevice());
setHardwareSamplesPerFrame(mAudioTrack->getHalChannelCount());
setHardwareSampleRate(mAudioTrack->getHalSampleRate());
diff --git a/media/libaaudio/src/utility/AudioClock.h b/media/libaaudio/src/utility/AudioClock.h
index d5d4ef4..37f5b39 100644
--- a/media/libaaudio/src/utility/AudioClock.h
+++ b/media/libaaudio/src/utility/AudioClock.h
@@ -33,7 +33,7 @@
public:
static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
struct timespec time;
- int result = clock_gettime(clockId, &time);
+ const int result = clock_gettime(clockId, &time);
if (result < 0) {
return -errno;
}
@@ -56,7 +56,7 @@
time.tv_sec = nanoTime / AAUDIO_NANOS_PER_SECOND;
// Calculate the fractional nanoseconds. Avoids expensive % operation.
time.tv_nsec = nanoTime - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
- int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
+ const int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
switch (err) {
case EINTR:
return 1;
@@ -86,7 +86,7 @@
// Calculate the fractional nanoseconds. Avoids expensive % operation.
time.tv_nsec = nanoseconds - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
const int flags = 0; // documented as relative sleep
- int err = clock_nanosleep(clockId, flags, &time, nullptr);
+ const int err = clock_nanosleep(clockId, flags, &time, nullptr);
switch (err) {
case EINTR:
return 1;
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 51eb69b..b58634f 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -104,7 +104,7 @@
*/
void roundUp64(int32_t period) {
if (period > 0) {
- int64_t numPeriods = (mCounter64 + period - 1) / period;
+ const int64_t numPeriods = (mCounter64 + period - 1) / period;
mCounter64 = numPeriods * period;
}
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 24041bc..0cfdfb2 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -145,6 +145,7 @@
srcs: ["test_flowgraph.cpp"],
shared_libs: [
"libaaudio_internal",
+ "libaudioutils",
"libbinder",
"libcutils",
"libutils",
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index 6f75f5a..7eb8b0d 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -25,6 +25,8 @@
#include <gtest/gtest.h>
+#include <aaudio/AAudio.h>
+#include "client/AAudioFlowGraph.h"
#include "flowgraph/ClipToRange.h"
#include "flowgraph/Limiter.h"
#include "flowgraph/MonoBlend.h"
@@ -37,8 +39,18 @@
#include "flowgraph/SinkI32.h"
#include "flowgraph/SourceI16.h"
#include "flowgraph/SourceI24.h"
+#include "flowgraph/resampler/IntegerRatio.h"
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
+using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
+
+using TestFlowgraphResamplerParams = std::tuple<int32_t, int32_t, MultiChannelResampler::Quality>;
+
+enum {
+ PARAM_SOURCE_SAMPLE_RATE = 0,
+ PARAM_SINK_SAMPLE_RATE,
+ PARAM_RESAMPLER_QUALITY
+};
constexpr int kBytesPerI24Packed = 3;
@@ -394,3 +406,240 @@
EXPECT_NEAR(expected[i], output[i], tolerance);
}
}
+
+TEST(test_flowgraph, module_sinki16_multiple_reads) {
+ static constexpr int kNumSamples = 8;
+ std::array<int16_t, kNumSamples + 10> output; // larger than input
+
+ SourceFloat sourceFloat{1};
+ SinkI16 sinkI16{1};
+
+ sourceFloat.setData(kInputFloat.data(), kNumSamples);
+ sourceFloat.output.connect(&sinkI16.input);
+
+ output.fill(777);
+
+ // Read the first half of the data
+ int32_t numRead = sinkI16.read(output.data(), kNumSamples / 2);
+ ASSERT_EQ(kNumSamples / 2, numRead);
+ for (int i = 0; i < numRead; i++) {
+ EXPECT_EQ(kExpectedI16.at(i), output.at(i)) << ", i = " << i;
+ }
+
+ // Read the rest of the data
+ numRead = sinkI16.read(output.data(), output.size());
+ ASSERT_EQ(kNumSamples / 2, numRead);
+ for (int i = 0; i < numRead; i++) {
+ EXPECT_EQ(kExpectedI16.at(i + kNumSamples / 2), output.at(i)) << ", i = " << i;
+ }
+}
+
+void checkSampleRateConversionVariedSizes(int32_t sourceSampleRate,
+ int32_t sinkSampleRate,
+ MultiChannelResampler::Quality resamplerQuality) {
+ AAudioFlowGraph flowgraph;
+ aaudio_result_t result = flowgraph.configure(AUDIO_FORMAT_PCM_FLOAT /* sourceFormat */,
+ 1 /* sourceChannelCount */,
+ sourceSampleRate,
+ AUDIO_FORMAT_PCM_FLOAT /* sinkFormat */,
+ 1 /* sinkChannelCount */,
+ sinkSampleRate,
+ false /* useMonoBlend */,
+ false /* useVolumeRamps */,
+ 0.0f /* audioBalance */,
+ resamplerQuality);
+
+ IntegerRatio ratio(sourceSampleRate, sinkSampleRate);
+ ratio.reduce();
+
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ const int inputSize = ratio.getNumerator();
+ const int outputSize = ratio.getDenominator();
+ float input[inputSize];
+ float output[outputSize];
+
+ for (int i = 0; i < inputSize; i++) {
+ input[i] = i * 1.0f / inputSize;
+ }
+
+ int inputUsed = 0;
+ int outputRead = 0;
+ int curInputSize = 1;
+
+ // Process the data with larger and larger input buffer sizes.
+ while (inputUsed < inputSize) {
+ outputRead += flowgraph.process((void *) (input + inputUsed),
+ curInputSize,
+ (void *) (output + outputRead),
+ outputSize - outputRead);
+ inputUsed += curInputSize;
+ curInputSize = std::min(curInputSize + 5, inputSize - inputUsed);
+ }
+
+ ASSERT_EQ(outputSize, outputRead);
+
+ for (int i = 1; i < outputSize; i++) {
+ // The first values of the flowgraph will be close to zero.
+ // Besides those, the values should be strictly increasing.
+ if (output[i - 1] > 0.01f) {
+ EXPECT_GT(output[i], output[i - 1]);
+ }
+ }
+}
+
+TEST(test_flowgraph, flowgraph_varied_sizes_all) {
+ const int rates[] = {8000, 11025, 22050, 32000, 44100, 48000, 64000, 88200, 96000};
+ const MultiChannelResampler::Quality qualities[] =
+ {
+ MultiChannelResampler::Quality::Fastest,
+ MultiChannelResampler::Quality::Low,
+ MultiChannelResampler::Quality::Medium,
+ MultiChannelResampler::Quality::High,
+ MultiChannelResampler::Quality::Best
+ };
+ for (int srcRate : rates) {
+ for (int destRate : rates) {
+ for (auto quality : qualities) {
+ if (srcRate != destRate) {
+ checkSampleRateConversionVariedSizes(srcRate, destRate, quality);
+ }
+ }
+ }
+ }
+}
+
+void checkSampleRateConversionPullLater(int32_t sourceSampleRate,
+ int32_t sinkSampleRate,
+ MultiChannelResampler::Quality resamplerQuality) {
+ AAudioFlowGraph flowgraph;
+ aaudio_result_t result = flowgraph.configure(AUDIO_FORMAT_PCM_FLOAT /* sourceFormat */,
+ 1 /* sourceChannelCount */,
+ sourceSampleRate,
+ AUDIO_FORMAT_PCM_FLOAT /* sinkFormat */,
+ 1 /* sinkChannelCount */,
+ sinkSampleRate,
+ false /* useMonoBlend */,
+ false /* useVolumeRamps */,
+ 0.0f /* audioBalance */,
+ resamplerQuality);
+
+ IntegerRatio ratio(sourceSampleRate, sinkSampleRate);
+ ratio.reduce();
+
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ const int inputSize = ratio.getNumerator();
+ const int outputSize = ratio.getDenominator();
+ float input[inputSize];
+ float output[outputSize];
+
+ for (int i = 0; i < inputSize; i++) {
+ input[i] = i * 1.0f / inputSize;
+ }
+
+ // Read half the data with process.
+ int outputRead = flowgraph.process((void *) input,
+ inputSize,
+ (void *) output,
+ outputSize / 2);
+
+ ASSERT_EQ(outputSize / 2, outputRead);
+
+ // Now read the other half of the data with pull.
+ outputRead += flowgraph.pull(
+ (void *) (output + outputRead),
+ outputSize - outputRead);
+
+ ASSERT_EQ(outputSize, outputRead);
+ for (int i = 1; i < outputSize; i++) {
+ // The first values of the flowgraph will be close to zero.
+ // Besides those, the values should be strictly increasing.
+ if (output[i - 1] > 0.01f) {
+ EXPECT_GT(output[i], output[i - 1]);
+ }
+ }
+}
+
+// TODO: b/289508408 - Remove non-parameterized tests if they get noisy.
+TEST(test_flowgraph, flowgraph_pull_later_all) {
+ const int rates[] = {8000, 11025, 22050, 32000, 44100, 48000, 64000, 88200, 96000};
+ const MultiChannelResampler::Quality qualities[] =
+ {
+ MultiChannelResampler::Quality::Fastest,
+ MultiChannelResampler::Quality::Low,
+ MultiChannelResampler::Quality::Medium,
+ MultiChannelResampler::Quality::High,
+ MultiChannelResampler::Quality::Best
+ };
+ for (int srcRate : rates) {
+ for (int destRate : rates) {
+ for (auto quality : qualities) {
+ if (srcRate != destRate) {
+ checkSampleRateConversionPullLater(srcRate, destRate, quality);
+ }
+ }
+ }
+ }
+}
+
+class TestFlowgraphSampleRateConversion : public ::testing::Test,
+ public ::testing::WithParamInterface<TestFlowgraphResamplerParams> {
+};
+
+const char* resamplerQualityToString(MultiChannelResampler::Quality quality) {
+ switch (quality) {
+ case MultiChannelResampler::Quality::Fastest: return "FASTEST";
+ case MultiChannelResampler::Quality::Low: return "LOW";
+ case MultiChannelResampler::Quality::Medium: return "MEDIUM";
+ case MultiChannelResampler::Quality::High: return "HIGH";
+ case MultiChannelResampler::Quality::Best: return "BEST";
+ }
+ return "UNKNOWN";
+}
+
+static std::string getTestName(
+ const ::testing::TestParamInfo<TestFlowgraphResamplerParams>& info) {
+ return std::string()
+ + std::to_string(std::get<PARAM_SOURCE_SAMPLE_RATE>(info.param))
+ + "__" + std::to_string(std::get<PARAM_SINK_SAMPLE_RATE>(info.param))
+ + "__" + resamplerQualityToString(std::get<PARAM_RESAMPLER_QUALITY>(info.param));
+}
+
+TEST_P(TestFlowgraphSampleRateConversion, test_flowgraph_pull_later) {
+ checkSampleRateConversionPullLater(std::get<PARAM_SOURCE_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_SINK_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_RESAMPLER_QUALITY>(GetParam()));
+}
+
+TEST_P(TestFlowgraphSampleRateConversion, test_flowgraph_varied_sizes) {
+ checkSampleRateConversionVariedSizes(std::get<PARAM_SOURCE_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_SINK_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_RESAMPLER_QUALITY>(GetParam()));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ test_flowgraph,
+ TestFlowgraphSampleRateConversion,
+ ::testing::Values(
+ TestFlowgraphResamplerParams({8000, 11025, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({8000, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({8000, 44100, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 24000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 48000,
+ MultiChannelResampler::Quality::Fastest}),
+ TestFlowgraphResamplerParams({11025, 48000, MultiChannelResampler::Quality::Low}),
+ TestFlowgraphResamplerParams({11025, 48000,
+ MultiChannelResampler::Quality::Medium}),
+ TestFlowgraphResamplerParams({11025, 48000, MultiChannelResampler::Quality::High}),
+ TestFlowgraphResamplerParams({11025, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 44100, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 88200, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({16000, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({44100, 48000, MultiChannelResampler::Quality::Low}),
+ TestFlowgraphResamplerParams({44100, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({48000, 11025, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({48000, 44100, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({44100, 11025, MultiChannelResampler::Quality::Best})),
+ &getTestName
+);
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 7f228c7..2ef6fe5 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -45,6 +45,8 @@
#define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (3 * AAUDIO_NANOS_PER_MILLISECOND)
#define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (-1 * AAUDIO_NANOS_PER_MILLISECOND)
+#define AAUDIO_MAX_OPEN_ATTEMPTS 10
+
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
@@ -77,14 +79,23 @@
{AUDIO_FORMAT_PCM_24_BIT_PACKED, AUDIO_FORMAT_PCM_16_BIT}
};
-audio_format_t getNextFormatToTry(audio_format_t curFormat, audio_format_t returnedFromAPM) {
- if (returnedFromAPM != AUDIO_FORMAT_DEFAULT) {
- return returnedFromAPM;
- }
+audio_format_t getNextFormatToTry(audio_format_t curFormat) {
const auto it = NEXT_FORMAT_TO_TRY.find(curFormat);
- return it != NEXT_FORMAT_TO_TRY.end() ? it->second : AUDIO_FORMAT_DEFAULT;
+ return it != NEXT_FORMAT_TO_TRY.end() ? it->second : curFormat;
}
+struct configComp {
+ bool operator() (const audio_config_base_t& lhs, const audio_config_base_t& rhs) const {
+ if (lhs.sample_rate != rhs.sample_rate) {
+ return lhs.sample_rate < rhs.sample_rate;
+ } else if (lhs.channel_mask != rhs.channel_mask) {
+ return lhs.channel_mask < rhs.channel_mask;
+ } else {
+ return lhs.format < rhs.format;
+ }
+ }
+};
+
} // namespace
aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
@@ -101,60 +112,66 @@
legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
audio_format_t audioFormat = getFormat();
- std::set<audio_format_t> formatsTried;
- while (true) {
- if (formatsTried.find(audioFormat) != formatsTried.end()) {
+ int32_t sampleRate = getSampleRate();
+ if (sampleRate == AAUDIO_UNSPECIFIED) {
+ sampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
+ }
+
+ const aaudio_direction_t direction = getDirection();
+ audio_config_base_t config;
+ config.format = audioFormat;
+ config.sample_rate = sampleRate;
+ config.channel_mask = AAudio_getChannelMaskForOpen(
+ getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
+
+ std::set<audio_config_base_t, configComp> configsTried;
+ int32_t numberOfAttempts = 0;
+ while (numberOfAttempts < AAUDIO_MAX_OPEN_ATTEMPTS) {
+ if (configsTried.find(config) != configsTried.end()) {
// APM returning something that has already tried.
- ALOGW("Have already tried to open with format=%#x, but failed before", audioFormat);
+ ALOGW("Have already tried to open with format=%#x and sr=%d, but failed before",
+ config.format, config.sample_rate);
break;
}
- formatsTried.insert(audioFormat);
+ configsTried.insert(config);
- audio_format_t nextFormatToTry = AUDIO_FORMAT_DEFAULT;
- result = openWithFormat(audioFormat, &nextFormatToTry);
+ audio_config_base_t previousConfig = config;
+ result = openWithConfig(&config);
if (result != AAUDIO_ERROR_UNAVAILABLE) {
// Return if it is successful or there is an error that is not
// AAUDIO_ERROR_UNAVAILABLE happens.
- ALOGI("Opened format=%#x with result=%d", audioFormat, result);
+ ALOGI("Opened format=%#x sr=%d, with result=%d", previousConfig.format,
+ previousConfig.sample_rate, result);
break;
}
- nextFormatToTry = getNextFormatToTry(audioFormat, nextFormatToTry);
- ALOGD("%s() %#x failed, perhaps due to format. Try again with %#x",
- __func__, audioFormat, nextFormatToTry);
- audioFormat = nextFormatToTry;
- if (audioFormat == AUDIO_FORMAT_DEFAULT) {
- // Nothing else to try
- break;
+ // Try other formats if the config from APM is the same as our current config.
+ // Some HALs may report its format support incorrectly.
+ if ((previousConfig.format == config.format) &&
+ (previousConfig.sample_rate == config.sample_rate)) {
+ config.format = getNextFormatToTry(config.format);
}
+
+ ALOGD("%s() %#x %d failed, perhaps due to format or sample rate. Try again with %#x %d",
+ __func__, previousConfig.format, previousConfig.sample_rate, config.format,
+ config.sample_rate);
+ numberOfAttempts++;
}
return result;
}
-aaudio_result_t AAudioServiceEndpointMMAP::openWithFormat(
- audio_format_t audioFormat, audio_format_t* nextFormatToTry) {
+aaudio_result_t AAudioServiceEndpointMMAP::openWithConfig(
+ audio_config_base_t* config) {
aaudio_result_t result = AAUDIO_OK;
- audio_config_base_t config;
+ audio_config_base_t currentConfig = *config;
audio_port_handle_t deviceId;
const audio_attributes_t attributes = getAudioAttributesFrom(this);
deviceId = mRequestedDeviceId;
- // Fill in config
- config.format = audioFormat;
-
- int32_t aaudioSampleRate = getSampleRate();
- if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
- aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
- }
- config.sample_rate = aaudioSampleRate;
-
const aaudio_direction_t direction = getDirection();
- config.channel_mask = AAudio_getChannelMaskForOpen(
- getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
-
if (direction == AAUDIO_DIRECTION_OUTPUT) {
mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
@@ -177,11 +194,11 @@
// Open HAL stream. Set mMmapStream
ALOGD("%s trying to open MMAP stream with format=%#x, "
"sample_rate=%u, channel_mask=%#x, device=%d",
- __func__, config.format, config.sample_rate,
- config.channel_mask, deviceId);
+ __func__, config->format, config->sample_rate,
+ config->channel_mask, deviceId);
const status_t status = MmapStreamInterface::openMmapStream(streamDirection,
&attributes,
- &config,
+ config,
mMmapClient,
&deviceId,
&sessionId,
@@ -195,9 +212,9 @@
// not match the hardware.
ALOGD("%s() - openMmapStream() returned status=%d, suggested format=%#x, sample_rate=%u, "
"channel_mask=%#x",
- __func__, status, config.format, config.sample_rate, config.channel_mask);
- *nextFormatToTry = config.format != audioFormat ? config.format
- : *nextFormatToTry;
+ __func__, status, config->format, config->sample_rate, config->channel_mask);
+ // Keep the channel mask of the current config
+ config->channel_mask = currentConfig.channel_mask;
return AAUDIO_ERROR_UNAVAILABLE;
}
@@ -217,7 +234,7 @@
setSessionId(actualSessionId);
ALOGD("%s(format = 0x%X) deviceId = %d, sessionId = %d",
- __func__, audioFormat, getDeviceId(), getSessionId());
+ __func__, config->format, getDeviceId(), getSessionId());
// Create MMAP/NOIRQ buffer.
result = createMmapBuffer();
@@ -227,11 +244,11 @@
// Get information about the stream and pass it back to the caller.
setChannelMask(AAudioConvert_androidToAAudioChannelMask(
- config.channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
- AAudio_isChannelIndexMask(config.channel_mask)));
+ config->channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
+ AAudio_isChannelIndexMask(config->channel_mask)));
- setFormat(config.format);
- setSampleRate(config.sample_rate);
+ setFormat(config->format);
+ setSampleRate(config->sample_rate);
setHardwareSampleRate(getSampleRate());
setHardwareFormat(getFormat());
setHardwareSamplesPerFrame(AAudioConvert_channelMaskToCount(getChannelMask()));
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 38cf0ba..f19005c 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -97,7 +97,7 @@
private:
- aaudio_result_t openWithFormat(audio_format_t audioFormat, audio_format_t* nextFormatToTry);
+ aaudio_result_t openWithConfig(audio_config_base_t* config);
aaudio_result_t createMmapBuffer();