aaudio: convert mono output to stereo
MMAP hardware streams are often stereo.
But apps often want to output a mono stream.
Since converting from mono to stereo is easy,
we can go ahead and open a stereo hardware stream
and then just convert the apps mono data to stereo for the HW.
Add getDeviceChannelCount().
Test: adb shell write_sine_callback -pl -s10 -c1 -m3
Test: adb shell write_sine_callback -pl -s10 -c1 -m3 -x
Change-Id: I444a38c6f5cd32d1d6113f16aacec68285a1bc82
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5660c1b..7695dfa 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -194,7 +194,7 @@
// ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
// buffer, numFrames);
WrappingBuffer wrappingBuffer;
- uint8_t *source = (uint8_t *) buffer;
+ uint8_t *byteBuffer = (uint8_t *) buffer;
int32_t framesLeft = numFrames;
mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
@@ -208,69 +208,26 @@
if (framesToWrite > framesAvailable) {
framesToWrite = framesAvailable;
}
+
int32_t numBytes = getBytesPerFrame() * framesToWrite;
- int32_t numSamples = framesToWrite * getSamplesPerFrame();
// Data conversion.
float levelFrom;
float levelTo;
- bool ramping = mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
- // The formats are validated when the stream is opened so we do not have to
- // check for illegal combinations here.
- // TODO factor this out into a utility function
- if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- AAudio_linearRamp(
- (const float *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- if (ramping) {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- }
- } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- if (ramping) {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- AAudio_linearRamp(
- (const int16_t *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- }
- }
- source += numBytes;
+ mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
+
+ AAudioDataConverter::FormattedData source(
+ (void *)byteBuffer,
+ getFormat(),
+ getSamplesPerFrame());
+ AAudioDataConverter::FormattedData destination(
+ wrappingBuffer.data[partIndex],
+ getDeviceFormat(),
+ getDeviceChannelCount());
+
+ AAudioDataConverter::convert(source, destination, framesToWrite,
+ levelFrom, levelTo);
+
+ byteBuffer += numBytes;
framesLeft -= framesToWrite;
} else {
break;