Merge "AudioFlinger: Extract inner TrackHandle and RecordHandle classes"
diff --git a/media/audioaidlconversion/AidlConversionEffect.cpp b/media/audioaidlconversion/AidlConversionEffect.cpp
index 611cfab..6f55f1b 100644
--- a/media/audioaidlconversion/AidlConversionEffect.cpp
+++ b/media/audioaidlconversion/AidlConversionEffect.cpp
@@ -52,6 +52,7 @@
using ::android::status_t;
using ::android::base::unexpected;
using ::android::effect::utils::EffectParamReader;
+using ::android::effect::utils::EffectParamWrapper;
using ::android::effect::utils::EffectParamWriter;
////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -408,40 +409,7 @@
}
}
-/**
- * Copy the parameter area of effect_param_t to DefaultExtension::bytes.
- */
-ConversionResult<VendorExtension> legacy2aidl_EffectParameterReader_Param_VendorExtension(
- EffectParamReader& param) {
- size_t len = param.getParameterSize();
- DefaultExtension defaultExt;
- defaultExt.bytes.resize(len);
- RETURN_IF_ERROR(param.readFromParameter(defaultExt.bytes.data(), len));
-
- VendorExtension ext;
- ext.extension.setParcelable(defaultExt);
- return ext;
-}
-
-/**
- * Copy the data area of effect_param_t to DefaultExtension::bytes.
- */
-ConversionResult<VendorExtension> legacy2aidl_EffectParameterReader_Data_VendorExtension(
- EffectParamReader& param) {
- size_t len = param.getValueSize();
- DefaultExtension defaultExt;
- defaultExt.bytes.resize(len);
- RETURN_IF_ERROR(param.readFromValue(defaultExt.bytes.data(), len));
-
- VendorExtension ext;
- ext.extension.setParcelable(defaultExt);
- return ext;
-}
-
-/**
- * Copy DefaultExtension::bytes to the data area of effect_param_t.
- */
-ConversionResult<status_t> aidl2legacy_VendorExtension_EffectParameterWriter_Data(
+ConversionResult<status_t> aidl2legacy_VendorExtension_EffectParameterWriter(
EffectParamWriter& param, VendorExtension ext) {
std::optional<DefaultExtension> defaultExt;
RETURN_IF_ERROR(ext.extension.getParcelable(&defaultExt));
@@ -449,26 +417,47 @@
return unexpected(BAD_VALUE);
}
- RETURN_IF_ERROR(param.writeToValue(defaultExt->bytes.data(), defaultExt->bytes.size()));
+ // DefaultExtension defaultValue = defaultExt->get();
+ if (defaultExt->bytes.size() < sizeof(effect_param_t)) {
+ return unexpected(BAD_VALUE);
+ }
+ // verify data length with EffectParamWrapper, DefaultExtension array size should not smaller
+ // than (sizeof(effect_param_t) + paddedPSize + vSize)
+ EffectParamWrapper wrapper(*(effect_param_t*)defaultExt->bytes.data());
+ if (sizeof(effect_param_t) + wrapper.getPaddedParameterSize() + wrapper.getValueSize() >
+ defaultExt->bytes.size()) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(param.overwrite(wrapper.getEffectParam()));
return OK;
}
-ConversionResult<Parameter> legacy2aidl_EffectParameterReader_ParameterExtension(
+ConversionResult<VendorExtension> legacy2aidl_EffectParameterReader_VendorExtension(
EffectParamReader& param) {
- VendorExtension ext =
- VALUE_OR_RETURN(legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
- return UNION_MAKE(Parameter, specific, UNION_MAKE(Parameter::Specific, vendorEffect, ext));
+ size_t len = param.getTotalSize();
+ DefaultExtension defaultExt;
+ defaultExt.bytes.resize(len);
+
+ std::memcpy(defaultExt.bytes.data(), (void *)¶m.getEffectParam(), len);
+
+ VendorExtension ext;
+ ext.extension.setParcelable(defaultExt);
+ return ext;
}
-ConversionResult<::android::status_t> aidl2legacy_ParameterExtension_EffectParameterWriter(
+ConversionResult<::android::status_t> aidl2legacy_Parameter_EffectParameterWriter(
const ::aidl::android::hardware::audio::effect::Parameter& aidl,
EffectParamWriter& legacy) {
VendorExtension ext = VALUE_OR_RETURN(
(::aidl::android::getParameterSpecific<Parameter, VendorExtension,
Parameter::Specific::vendorEffect>(aidl)));
- return VALUE_OR_RETURN_STATUS(
- aidl2legacy_VendorExtension_EffectParameterWriter_Data(legacy, ext));
+ return VALUE_OR_RETURN_STATUS(aidl2legacy_VendorExtension_EffectParameterWriter(legacy, ext));
+}
+
+ConversionResult<Parameter> legacy2aidl_EffectParameterReader_Parameter(EffectParamReader& param) {
+ VendorExtension ext = VALUE_OR_RETURN(legacy2aidl_EffectParameterReader_VendorExtension(param));
+ return UNION_MAKE(Parameter, specific, UNION_MAKE(Parameter::Specific, vendorEffect, ext));
}
} // namespace android
diff --git a/media/audioaidlconversion/include/media/AidlConversionEffect.h b/media/audioaidlconversion/include/media/AidlConversionEffect.h
index 5e245a7..b03d06b 100644
--- a/media/audioaidlconversion/include/media/AidlConversionEffect.h
+++ b/media/audioaidlconversion/include/media/AidlConversionEffect.h
@@ -67,7 +67,7 @@
#define VENDOR_EXTENSION_GET_AND_RETURN(_effect, _tag, _param) \
{ \
aidl::android::hardware::audio::effect::VendorExtension _extId = VALUE_OR_RETURN_STATUS( \
- aidl::android::legacy2aidl_EffectParameterReader_Param_VendorExtension(_param)); \
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(_param)); \
aidl::android::hardware::audio::effect::Parameter::Id _id = \
MAKE_EXTENSION_PARAMETER_ID(_effect, _tag##Tag, _extId); \
aidl::android::hardware::audio::effect::Parameter _aidlParam; \
@@ -76,8 +76,7 @@
VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD( \
_aidlParam, _effect, _tag, _effect::vendor, VendorExtension)); \
return VALUE_OR_RETURN_STATUS( \
- aidl::android::aidl2legacy_ParameterExtension_EffectParameterWriter(_aidlParam, \
- _param)); \
+ aidl::android::aidl2legacy_Parameter_EffectParameterWriter(_aidlParam, _param)); \
}
ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(
@@ -157,26 +156,26 @@
ConversionResult<::aidl::android::hardware::audio::effect::Visualizer::MeasurementMode>
legacy2aidl_Parameter_Visualizer_uint32_MeasurementMode(uint32_t legacy);
-ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
-legacy2aidl_EffectParameterReader_ParameterExtension(
- ::android::effect::utils::EffectParamReader& param);
-ConversionResult<::android::status_t> aidl2legacy_ParameterExtension_EffectParameterWriter(
- const ::aidl::android::hardware::audio::effect::Parameter& aidl,
- ::android::effect::utils::EffectParamWriter& legacy);
-
-ConversionResult<::aidl::android::hardware::audio::effect::VendorExtension>
-legacy2aidl_EffectParameterReader_Param_VendorExtension(
- ::android::effect::utils::EffectParamReader& param);
-ConversionResult<::aidl::android::hardware::audio::effect::VendorExtension>
-legacy2aidl_EffectParameterReader_Data_VendorExtension(
- ::android::effect::utils::EffectParamReader& param);
-
+/**
+ * Read DefaultExtension from VendorExtension, and overwrite to the entire effect_param_t (both
+ * parameter and data area) with EffectParamWriter::overwrite.
+ */
ConversionResult<::android::status_t> aidl2legacy_VendorExtension_EffectParameterWriter_Data(
::android::effect::utils::EffectParamWriter& param,
::aidl::android::hardware::audio::effect::VendorExtension ext);
-ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
-legacy2aidl_EffectParameterReader_ParameterExtension(
+/**
+ * Copy the entire effect_param_t (both parameter and data area) to DefaultExtension::bytes, and
+ * write into VendorExtension.
+ */
+ConversionResult<::aidl::android::hardware::audio::effect::VendorExtension>
+legacy2aidl_EffectParameterReader_VendorExtension(
::android::effect::utils::EffectParamReader& param);
+ConversionResult<::android::status_t> aidl2legacy_Parameter_EffectParameterWriter(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl,
+ ::android::effect::utils::EffectParamWriter& legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_EffectParameterReader_Parameter(
+ ::android::effect::utils::EffectParamReader& param);
} // namespace android
} // namespace aidl
diff --git a/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
index 332d3ac..bb6c1b8 100644
--- a/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
@@ -82,6 +82,7 @@
return false;
}
+ // Default scenario --- the consumer is display or GPU
const AHardwareBuffer_Desc desc = {
.width = 320,
.height = 240,
@@ -96,7 +97,40 @@
.rfu1 = 0,
};
- return AHardwareBuffer_isSupported(&desc);
+ // The consumer is a HW encoder
+ const AHardwareBuffer_Desc descHwEncoder = {
+ .width = 320,
+ .height = 240,
+ .format = format,
+ .layers = 1,
+ .usage = AHARDWAREBUFFER_USAGE_CPU_READ_RARELY |
+ AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+ AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+ AHARDWAREBUFFER_USAGE_COMPOSER_OVERLAY |
+ AHARDWAREBUFFER_USAGE_VIDEO_ENCODE,
+ .stride = 0,
+ .rfu0 = 0,
+ .rfu1 = 0,
+ };
+
+ // The consumer is a SW encoder
+ const AHardwareBuffer_Desc descSwEncoder = {
+ .width = 320,
+ .height = 240,
+ .format = format,
+ .layers = 1,
+ .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
+ AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+ AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+ AHARDWAREBUFFER_USAGE_COMPOSER_OVERLAY,
+ .stride = 0,
+ .rfu0 = 0,
+ .rfu1 = 0,
+ };
+
+ return AHardwareBuffer_isSupported(&desc)
+ && AHardwareBuffer_isSupported(&descHwEncoder)
+ && AHardwareBuffer_isSupported(&descSwEncoder);
}
} // namespace android
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
index 5444565..69be050 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.cpp
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -39,18 +39,21 @@
aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
int32_t sourceChannelCount,
+ int32_t sourceSampleRate,
audio_format_t sinkFormat,
int32_t sinkChannelCount,
+ int32_t sinkSampleRate,
bool useMonoBlend,
+ bool useVolumeRamps,
float audioBalance,
- bool isExclusive) {
+ aaudio::resampler::MultiChannelResampler::Quality resamplerQuality) {
FlowGraphPortFloatOutput *lastOutput = nullptr;
- // TODO change back to ALOGD
- ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d, "
- "useMonoBlend = %d, audioBalance = %f, isExclusive %d",
- __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount,
- useMonoBlend, audioBalance, isExclusive);
+ ALOGD("%s() source format = 0x%08x, channels = %d, sample rate = %d, "
+ "sink format = 0x%08x, channels = %d, sample rate = %d, "
+ "useMonoBlend = %d, audioBalance = %f, useVolumeRamps %d",
+ __func__, sourceFormat, sourceChannelCount, sourceSampleRate, sinkFormat,
+ sinkChannelCount, sinkSampleRate, useMonoBlend, audioBalance, useVolumeRamps);
switch (sourceFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
@@ -85,6 +88,15 @@
lastOutput = &mLimiter->output;
}
+ if (sourceSampleRate != sinkSampleRate) {
+ mResampler.reset(aaudio::resampler::MultiChannelResampler::make(sinkChannelCount,
+ sourceSampleRate, sinkSampleRate, resamplerQuality));
+ mRateConverter = std::make_unique<SampleRateConverter>(sinkChannelCount,
+ *mResampler);
+ lastOutput->connect(&mRateConverter->input);
+ lastOutput = &mRateConverter->output;
+ }
+
// Expand the number of channels if required.
if (sourceChannelCount == 1 && sinkChannelCount > 1) {
mChannelConverter = std::make_unique<MonoToMultiConverter>(sinkChannelCount);
@@ -95,8 +107,7 @@
return AAUDIO_ERROR_UNIMPLEMENTED;
}
- // Apply volume ramps for only exclusive streams.
- if (isExclusive) {
+ if (useVolumeRamps) {
// Apply volume ramps to set the left/right audio balance and target volumes.
// The signals will be decoupled, volume ramps will be applied, before the signals are
// combined again.
@@ -137,9 +148,14 @@
return AAUDIO_OK;
}
-void AAudioFlowGraph::process(const void *source, void *destination, int32_t numFrames) {
- mSource->setData(source, numFrames);
- mSink->read(destination, numFrames);
+int32_t AAudioFlowGraph::pull(void *destination, int32_t targetFramesToRead) {
+ return mSink->read(destination, targetFramesToRead);
+}
+
+int32_t AAudioFlowGraph::process(const void *source, int32_t numFramesToWrite, void *destination,
+ int32_t targetFramesToRead) {
+ mSource->setData(source, numFramesToWrite);
+ return mSink->read(destination, targetFramesToRead);
}
/**
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index 35fef37..e1d517e 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -30,6 +30,7 @@
#include <flowgraph/MonoToMultiConverter.h>
#include <flowgraph/MultiToManyConverter.h>
#include <flowgraph/RampLinear.h>
+#include <flowgraph/SampleRateConverter.h>
class AAudioFlowGraph {
public:
@@ -38,23 +39,57 @@
*
* @param sourceFormat
* @param sourceChannelCount
+ * @param sourceSampleRate
* @param sinkFormat
* @param sinkChannelCount
+ * @param sinkSampleRate
* @param useMonoBlend
+ * @param useVolumeRamps
* @param audioBalance
- * @param channelMask
- * @param isExclusive
+ * @param resamplerQuality
* @return
*/
aaudio_result_t configure(audio_format_t sourceFormat,
int32_t sourceChannelCount,
+ int32_t sourceSampleRate,
audio_format_t sinkFormat,
int32_t sinkChannelCount,
+ int32_t sinkSampleRate,
bool useMonoBlend,
+ bool useVolumeRamps,
float audioBalance,
- bool isExclusive);
+ aaudio::resampler::MultiChannelResampler::Quality resamplerQuality);
- void process(const void *source, void *destination, int32_t numFrames);
+ /**
+ * Attempt to read targetFramesToRead from the flowgraph.
+ * This function returns the number of frames actually read.
+ *
+ * This function does nothing if process() was not called before.
+ *
+ * @param destination
+ * @param targetFramesToRead
+ * @return numFramesRead
+ */
+ int32_t pull(void *destination, int32_t targetFramesToRead);
+
+ /**
+ * Set numFramesToWrite frames from the source into the flowgraph.
+ * Then, attempt to read targetFramesToRead from the flowgraph.
+ * This function returns the number of frames actually read.
+ *
+ * There may be data still in the flowgraph if targetFramesToRead is not large enough.
+ * Before calling process() again, pull() must be called until until all the data is consumed.
+ *
+ * TODO: b/289510598 - Calculate the exact number of input frames needed for Y output frames.
+ *
+ * @param source
+ * @param numFramesToWrite
+ * @param destination
+ * @param targetFramesToRead
+ * @return numFramesRead
+ */
+ int32_t process(const void *source, int32_t numFramesToWrite, void *destination,
+ int32_t targetFramesToRead);
/**
* @param volume between 0.0 and 1.0
@@ -73,6 +108,8 @@
private:
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::FlowGraphSourceBuffered> mSource;
+ std::unique_ptr<RESAMPLER_OUTER_NAMESPACE::resampler::MultiChannelResampler> mResampler;
+ std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::SampleRateConverter> mRateConverter;
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::MonoBlend> mMonoBlend;
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::Limiter> mLimiter;
std::unique_ptr<FLOWGRAPH_OUTER_NAMESPACE::flowgraph::MonoToMultiConverter> mChannelConverter;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 93ac966..d75832f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -63,6 +63,8 @@
#define LOG_TIMESTAMPS 0
+#define ENABLE_SAMPLE_RATE_CONVERTER 1
+
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
, mClockModel()
@@ -179,7 +181,6 @@
mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
- setSampleRate(configurationOutput.getSampleRate());
setDeviceId(configurationOutput.getDeviceId());
setSessionId(configurationOutput.getSessionId());
setSharingMode(configurationOutput.getSharingMode());
@@ -190,6 +191,18 @@
setIsContentSpatialized(configurationOutput.isContentSpatialized());
setInputPreset(configurationOutput.getInputPreset());
+ setDeviceSampleRate(configurationOutput.getSampleRate());
+
+ if (getSampleRate() == AAUDIO_UNSPECIFIED) {
+ setSampleRate(configurationOutput.getSampleRate());
+ }
+
+#if !ENABLE_SAMPLE_RATE_CONVERTER
+ if (getSampleRate() != getDeviceSampleRate()) {
+ goto error;
+ }
+#endif
+
// Save device format so we can do format conversion and volume scaling together.
setDeviceFormat(configurationOutput.getFormat());
@@ -229,39 +242,46 @@
}
aaudio_result_t AudioStreamInternal::configureDataInformation(int32_t callbackFrames) {
- int32_t framesPerHardwareBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+ int32_t deviceFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
// Scale up the burst size to meet the minimum equivalent in microseconds.
// This is to avoid waking the CPU too often when the HW burst is very small
- // or at high sample rates.
- int32_t framesPerBurst = framesPerHardwareBurst;
+ // or at high sample rates. The actual number of frames that we call back to
+ // the app with will be 0 < N <= framesPerBurst so round up the division.
+ int32_t framesPerBurst = (static_cast<int64_t>(deviceFramesPerBurst) * getSampleRate() +
+ getDeviceSampleRate() - 1) / getDeviceSampleRate();
int32_t burstMicros = 0;
const int32_t burstMinMicros = android::AudioSystem::getAAudioHardwareBurstMinUsec();
do {
if (burstMicros > 0) { // skip first loop
+ deviceFramesPerBurst *= 2;
framesPerBurst *= 2;
}
burstMicros = framesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
} while (burstMicros < burstMinMicros);
ALOGD("%s() original HW burst = %d, minMicros = %d => SW burst = %d\n",
- __func__, framesPerHardwareBurst, burstMinMicros, framesPerBurst);
+ __func__, deviceFramesPerBurst, burstMinMicros, framesPerBurst);
// Validate final burst size.
if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
ALOGE("%s - framesPerBurst out of range = %d", __func__, framesPerBurst);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
+ setDeviceFramesPerBurst(deviceFramesPerBurst);
setFramesPerBurst(framesPerBurst); // only save good value
- mBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
+ mDeviceBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
+
+ mBufferCapacityInFrames = static_cast<int64_t>(mDeviceBufferCapacityInFrames)
+ * getSampleRate() / getDeviceSampleRate();
if (mBufferCapacityInFrames < getFramesPerBurst()
|| mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
- mClockModel.setSampleRate(getSampleRate());
- mClockModel.setFramesPerBurst(framesPerHardwareBurst);
+ mClockModel.setSampleRate(getDeviceSampleRate());
+ mClockModel.setFramesPerBurst(deviceFramesPerBurst);
if (isDataCallbackSet()) {
mCallbackFrames = callbackFrames;
@@ -311,7 +331,8 @@
mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
}
- setBufferSize(mBufferCapacityInFrames / 2); // Default buffer size to match Q
+ // Default buffer size to match Q
+ setBufferSize(mBufferCapacityInFrames / 2);
return AAUDIO_OK;
}
@@ -370,9 +391,9 @@
// Cache the buffer size which may be from client.
const int32_t previousBufferSize = mBufferSizeInFrames;
// Copy all available data from current data queue.
- uint8_t buffer[getBufferCapacity() * getBytesPerFrame()];
- android::fifo_frames_t fullFramesAvailable =
- mAudioEndpoint->read(buffer, getBufferCapacity());
+ uint8_t buffer[getDeviceBufferCapacity() * getBytesPerFrame()];
+ android::fifo_frames_t fullFramesAvailable = mAudioEndpoint->read(buffer,
+ getDeviceBufferCapacity());
mEndPointParcelable.closeDataFileDescriptor();
aaudio_result_t result = mServiceInterface.exitStandby(
mServiceStreamHandleInfo, endpointParcelable);
@@ -404,7 +425,7 @@
goto exit;
}
// Write data from previous data buffer to new endpoint.
- if (android::fifo_frames_t framesWritten =
+ if (const android::fifo_frames_t framesWritten =
mAudioEndpoint->write(buffer, fullFramesAvailable);
framesWritten != fullFramesAvailable) {
ALOGW("Some data lost after exiting standby, frames written: %d, "
@@ -444,7 +465,7 @@
ALOGD("requestStart() but DISCONNECTED");
return AAUDIO_ERROR_DISCONNECTED;
}
- aaudio_stream_state_t originalState = getState();
+ const aaudio_stream_state_t originalState = getState();
setState(AAUDIO_STREAM_STATE_STARTING);
// Clear any stale timestamps from the previous run.
@@ -601,7 +622,11 @@
// Generated in server and passed to client. Return latest.
if (mAtomicInternalTimestamp.isValid()) {
Timestamp timestamp = mAtomicInternalTimestamp.read();
- int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
+ // This should not overflow as timestamp.getPosition() should be a position in a buffer and
+ // not the actual timestamp. timestamp.getNanoseconds() below uses the actual timestamp.
+ // At 48000 Hz we can run for over 100 years before overflowing the int64_t.
+ int64_t position = (timestamp.getPosition() + mFramesOffsetFromService) * getSampleRate() /
+ getDeviceSampleRate();
if (position >= 0) {
*framePosition = position;
*timeNanoseconds = timestamp.getNanoseconds();
@@ -885,7 +910,8 @@
adjustedFrames = maximumSize;
} else {
// Round to the next highest burst size.
- int32_t numBursts = (adjustedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
+ int32_t numBursts = (static_cast<int64_t>(adjustedFrames) + getFramesPerBurst() - 1) /
+ getFramesPerBurst();
adjustedFrames = numBursts * getFramesPerBurst();
// Clip just in case maximumSize is not a multiple of getFramesPerBurst().
adjustedFrames = std::min(maximumSize, adjustedFrames);
@@ -893,23 +919,32 @@
if (mAudioEndpoint) {
// Clip against the actual size from the endpoint.
- int32_t actualFrames = 0;
+ int32_t actualFramesDevice = 0;
+ int32_t maximumFramesDevice = (static_cast<int64_t>(maximumSize) * getDeviceSampleRate()
+ + getSampleRate() - 1) / getSampleRate();
// Set to maximum size so we can write extra data when ready in order to reduce glitches.
// The amount we keep in the buffer is controlled by mBufferSizeInFrames.
- mAudioEndpoint->setBufferSizeInFrames(maximumSize, &actualFrames);
+ mAudioEndpoint->setBufferSizeInFrames(maximumFramesDevice, &actualFramesDevice);
+ int32_t actualFrames = (static_cast<int64_t>(actualFramesDevice) * getSampleRate() +
+ getDeviceSampleRate() - 1) / getDeviceSampleRate();
// actualFrames should be <= actual maximum size of endpoint
adjustedFrames = std::min(actualFrames, adjustedFrames);
}
- if (adjustedFrames != mBufferSizeInFrames) {
+ const int32_t bufferSizeInFrames = adjustedFrames;
+ const int32_t deviceBufferSizeInFrames = static_cast<int64_t>(bufferSizeInFrames) *
+ getDeviceSampleRate() / getSampleRate();
+
+ if (deviceBufferSizeInFrames != mDeviceBufferSizeInFrames) {
android::mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_SETBUFFERSIZE)
- .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, adjustedFrames)
+ .set(AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, deviceBufferSizeInFrames)
.set(AMEDIAMETRICS_PROP_UNDERRUN, (int32_t) getXRunCount())
.record();
}
- mBufferSizeInFrames = adjustedFrames;
+ mBufferSizeInFrames = bufferSizeInFrames;
+ mDeviceBufferSizeInFrames = deviceBufferSizeInFrames;
ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
return (aaudio_result_t) adjustedFrames;
}
@@ -918,10 +953,18 @@
return mBufferSizeInFrames;
}
+int32_t AudioStreamInternal::getDeviceBufferSize() const {
+ return mDeviceBufferSizeInFrames;
+}
+
int32_t AudioStreamInternal::getBufferCapacity() const {
return mBufferCapacityInFrames;
}
+int32_t AudioStreamInternal::getDeviceBufferCapacity() const {
+ return mDeviceBufferCapacityInFrames;
+}
+
bool AudioStreamInternal::isClockModelInControl() const {
return isActive() && mAudioEndpoint->isFreeRunning() && mClockModel.isRunning();
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 9c06121..0dc9995 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -22,8 +22,9 @@
#include "binding/AudioEndpointParcelable.h"
#include "binding/AAudioServiceInterface.h"
-#include "client/IsochronousClockModel.h"
+#include "client/AAudioFlowGraph.h"
#include "client/AudioEndpoint.h"
+#include "client/IsochronousClockModel.h"
#include "core/AudioStream.h"
#include "utility/AudioClock.h"
@@ -56,8 +57,12 @@
int32_t getBufferSize() const override;
+ int32_t getDeviceBufferSize() const;
+
int32_t getBufferCapacity() const override;
+ int32_t getDeviceBufferCapacity() const override;
+
int32_t getXRunCount() const override {
return mXRunCount;
}
@@ -177,6 +182,8 @@
int64_t mLastFramesWritten = 0;
int64_t mLastFramesRead = 0;
+ AAudioFlowGraph mFlowGraph;
+
private:
/*
* Asynchronous write with data conversion.
@@ -211,8 +218,9 @@
int32_t mDeviceChannelCount = 0;
int32_t mBufferSizeInFrames = 0; // local threshold to control latency
+ int32_t mDeviceBufferSizeInFrames = 0;
int32_t mBufferCapacityInFrames = 0;
-
+ int32_t mDeviceBufferCapacityInFrames = 0;
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index f5cc2be..47518d7 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -47,6 +47,27 @@
}
+aaudio_result_t AudioStreamInternalCapture::open(const AudioStreamBuilder &builder) {
+ aaudio_result_t result = AudioStreamInternal::open(builder);
+ if (result == AAUDIO_OK) {
+ result = mFlowGraph.configure(getDeviceFormat(),
+ getDeviceChannelCount(),
+ getDeviceSampleRate(),
+ getFormat(),
+ getSamplesPerFrame(),
+ getSampleRate(),
+ getRequireMonoBlend(),
+ false /* useVolumeRamps */,
+ getAudioBalance(),
+ aaudio::resampler::MultiChannelResampler::Quality::Medium);
+
+ if (result != AAUDIO_OK) {
+ safeReleaseClose();
+ }
+ }
+ return result;
+}
+
void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
int64_t readCounter = mAudioEndpoint->getDataReadCounter();
int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
@@ -149,7 +170,8 @@
// Calculate frame position based off of the readCounter because
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
- int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + getFramesPerBurst();
+ const int64_t nextPosition = mAudioEndpoint->getDataReadCounter() +
+ getDeviceFramesPerBurst();
wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
@@ -166,42 +188,73 @@
aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
int32_t numFrames) {
- // ALOGD("readNowWithConversion(%p, %d)",
- // buffer, numFrames);
WrappingBuffer wrappingBuffer;
- uint8_t *destination = (uint8_t *) buffer;
- int32_t framesLeft = numFrames;
+ uint8_t *byteBuffer = (uint8_t *) buffer;
+ int32_t framesLeftInByteBuffer = numFrames;
+
+ if (framesLeftInByteBuffer > 0) {
+ // Pull data from the flowgraph in case there is residual data.
+ const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.pull(
+ (void *)byteBuffer,
+ framesLeftInByteBuffer);
+
+ const int32_t numBytesActuallyWrittenToByteBuffer =
+ framesActuallyWrittenToByteBuffer * getBytesPerFrame();
+ byteBuffer += numBytesActuallyWrittenToByteBuffer;
+ framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
+ }
mAudioEndpoint->getFullFramesAvailable(&wrappingBuffer);
- // Read data in one or two parts.
- for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
- int32_t framesToProcess = framesLeft;
- const int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable <= 0) break;
+ // Write data in one or two parts.
+ int partIndex = 0;
+ int framesReadFromAudioEndpoint = 0;
+ while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
+ const int32_t totalFramesInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
+ int32_t framesAvailableInWrappingBuffer = totalFramesInWrappingBuffer;
+ uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
- if (framesToProcess > framesAvailable) {
- framesToProcess = framesAvailable;
+ // Put data from the wrapping buffer into the flowgraph 8 frames at a time.
+ // Continuously pull as much data as possible from the flowgraph into the byte buffer.
+ // The return value of mFlowGraph.process is the number of frames actually pulled.
+ while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
+ const int32_t framesToReadFromWrappingBuffer = std::min(flowgraph::kDefaultBufferSize,
+ framesAvailableInWrappingBuffer);
+
+ const int32_t numBytesToReadFromWrappingBuffer = getBytesPerDeviceFrame() *
+ framesToReadFromWrappingBuffer;
+
+ // If framesActuallyWrittenToByteBuffer < framesLeftInByteBuffer, it is guaranteed
+ // that all the data is pulled. If there is no more space in the byteBuffer, the
+ // remaining data will be pulled in the following readNowWithConversion().
+ const int32_t framesActuallyWrittenToByteBuffer = mFlowGraph.process(
+ (void *)currentWrappingBuffer,
+ framesToReadFromWrappingBuffer,
+ (void *)byteBuffer,
+ framesLeftInByteBuffer);
+
+ const int32_t numBytesActuallyWrittenToByteBuffer =
+ framesActuallyWrittenToByteBuffer * getBytesPerFrame();
+ byteBuffer += numBytesActuallyWrittenToByteBuffer;
+ framesLeftInByteBuffer -= framesActuallyWrittenToByteBuffer;
+ currentWrappingBuffer += numBytesToReadFromWrappingBuffer;
+ framesAvailableInWrappingBuffer -= framesToReadFromWrappingBuffer;
+
+ //ALOGD("%s() numBytesActuallyWrittenToByteBuffer %d, framesLeftInByteBuffer %d"
+ // "framesAvailableInWrappingBuffer %d, framesReadFromAudioEndpoint %d"
+ // , __func__, numBytesActuallyWrittenToByteBuffer, framesLeftInByteBuffer,
+ // framesAvailableInWrappingBuffer, framesReadFromAudioEndpoint);
}
-
- const int32_t numBytes = getBytesPerFrame() * framesToProcess;
- const int32_t numSamples = framesToProcess * getSamplesPerFrame();
-
- const audio_format_t sourceFormat = getDeviceFormat();
- const audio_format_t destinationFormat = getFormat();
-
- memcpy_by_audio_format(destination, destinationFormat,
- wrappingBuffer.data[partIndex], sourceFormat, numSamples);
-
- destination += numBytes;
- framesLeft -= framesToProcess;
+ framesReadFromAudioEndpoint += totalFramesInWrappingBuffer -
+ framesAvailableInWrappingBuffer;
+ partIndex++;
}
- int32_t framesProcessed = numFrames - framesLeft;
- mAudioEndpoint->advanceReadIndex(framesProcessed);
+ // The audio endpoint should reference the number of frames written to the wrapping buffer.
+ mAudioEndpoint->advanceReadIndex(framesReadFromAudioEndpoint);
- //ALOGD("readNowWithConversion() returns %d", framesProcessed);
- return framesProcessed;
+ // The internal code should use the number of frames read from the app.
+ return numFrames - framesLeftInByteBuffer;
}
int64_t AudioStreamInternalCapture::getFramesWritten() {
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 87017de..10e247d 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -32,6 +32,8 @@
bool inService = false);
virtual ~AudioStreamInternalCapture() = default;
+ aaudio_result_t open(const AudioStreamBuilder &builder) override;
+
aaudio_result_t read(void *buffer,
int32_t numFrames,
int64_t timeoutNanoseconds) override;
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 89dd8ff..99aa910 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -48,14 +48,18 @@
aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
aaudio_result_t result = AudioStreamInternal::open(builder);
+ const bool useVolumeRamps = (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE);
if (result == AAUDIO_OK) {
result = mFlowGraph.configure(getFormat(),
getSamplesPerFrame(),
+ getSampleRate(),
getDeviceFormat(),
getDeviceChannelCount(),
+ getDeviceSampleRate(),
getRequireMonoBlend(),
+ useVolumeRamps,
getAudioBalance(),
- (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE));
+ aaudio::resampler::MultiChannelResampler::Quality::Medium);
if (result != AAUDIO_OK) {
safeReleaseClose();
@@ -186,7 +190,7 @@
// Sleep if there is too much data in the buffer.
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr
- && (mAudioEndpoint->getFullFramesAvailable() >= getBufferSize())) {
+ && (mAudioEndpoint->getFullFramesAvailable() >= getDeviceBufferSize())) {
// By default wake up a few milliseconds from now. // TODO review
int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
aaudio_stream_state_t state = getState();
@@ -206,12 +210,12 @@
// If the appBufferSize is smaller than the endpointBufferSize then
// we will have room to write data beyond the appBufferSize.
// That is a technique used to reduce glitches without adding latency.
- const int32_t appBufferSize = getBufferSize();
+ const int64_t appBufferSize = getDeviceBufferSize();
// The endpoint buffer size is set to the maximum that can be written.
// If we use it then we must carve out some room to write data when we wake up.
- const int32_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
- - getFramesPerBurst();
- const int32_t bestBufferSize = std::min(appBufferSize, endBufferSize);
+ const int64_t endBufferSize = mAudioEndpoint->getBufferSizeInFrames()
+ - getDeviceFramesPerBurst();
+ const int64_t bestBufferSize = std::min(appBufferSize, endBufferSize);
int64_t targetReadPosition = mAudioEndpoint->getDataWriteCounter() - bestBufferSize;
wakeTime = mClockModel.convertPositionToTime(targetReadPosition);
}
@@ -232,37 +236,78 @@
int32_t numFrames) {
WrappingBuffer wrappingBuffer;
uint8_t *byteBuffer = (uint8_t *) buffer;
- int32_t framesLeft = numFrames;
+ int32_t framesLeftInByteBuffer = numFrames;
mAudioEndpoint->getEmptyFramesAvailable(&wrappingBuffer);
// Write data in one or two parts.
int partIndex = 0;
- while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
- int32_t framesToWrite = framesLeft;
- int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable > 0) {
- if (framesToWrite > framesAvailable) {
- framesToWrite = framesAvailable;
- }
+ int framesWrittenToAudioEndpoint = 0;
+ while (framesLeftInByteBuffer > 0 && partIndex < WrappingBuffer::SIZE) {
+ int32_t framesAvailableInWrappingBuffer = wrappingBuffer.numFrames[partIndex];
+ uint8_t *currentWrappingBuffer = (uint8_t *) wrappingBuffer.data[partIndex];
- int32_t numBytes = getBytesPerFrame() * framesToWrite;
+ if (framesAvailableInWrappingBuffer > 0) {
+ // Pull data from the flowgraph in case there is residual data.
+ const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.pull(
+ (void*) currentWrappingBuffer,
+ framesAvailableInWrappingBuffer);
- mFlowGraph.process((void *)byteBuffer,
- wrappingBuffer.data[partIndex],
- framesToWrite);
+ const int32_t numBytesActuallyWrittenToWrappingBuffer =
+ framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
+ currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
+ framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
+ framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
+ }
- byteBuffer += numBytes;
- framesLeft -= framesToWrite;
- } else {
- break;
+ // Put data from byteBuffer into the flowgraph one buffer (8 frames) at a time.
+ // Continuously pull as much data as possible from the flowgraph into the wrapping buffer.
+ // The return value of mFlowGraph.process is the number of frames actually pulled.
+ while (framesAvailableInWrappingBuffer > 0 && framesLeftInByteBuffer > 0) {
+ const int32_t framesToWriteFromByteBuffer = std::min(flowgraph::kDefaultBufferSize,
+ framesLeftInByteBuffer);
+
+ const int32_t numBytesToWriteFromByteBuffer = getBytesPerFrame() *
+ framesToWriteFromByteBuffer;
+
+ //ALOGD("%s() framesLeftInByteBuffer %d, framesAvailableInWrappingBuffer %d"
+ // "framesToWriteFromByteBuffer %d, numBytesToWriteFromByteBuffer %d"
+ // , __func__, framesLeftInByteBuffer, framesAvailableInWrappingBuffer,
+ // framesToWriteFromByteBuffer, numBytesToWriteFromByteBuffer);
+
+ const int32_t framesActuallyWrittenToWrappingBuffer = mFlowGraph.process(
+ (void *)byteBuffer,
+ framesToWriteFromByteBuffer,
+ (void *)currentWrappingBuffer,
+ framesAvailableInWrappingBuffer);
+
+ byteBuffer += numBytesToWriteFromByteBuffer;
+ framesLeftInByteBuffer -= framesToWriteFromByteBuffer;
+ const int32_t numBytesActuallyWrittenToWrappingBuffer =
+ framesActuallyWrittenToWrappingBuffer * getBytesPerDeviceFrame();
+ currentWrappingBuffer += numBytesActuallyWrittenToWrappingBuffer;
+ framesAvailableInWrappingBuffer -= framesActuallyWrittenToWrappingBuffer;
+ framesWrittenToAudioEndpoint += framesActuallyWrittenToWrappingBuffer;
+
+ //ALOGD("%s() numBytesActuallyWrittenToWrappingBuffer %d, framesLeftInByteBuffer %d"
+ // "framesActuallyWrittenToWrappingBuffer %d, numBytesToWriteFromByteBuffer %d"
+ // "framesWrittenToAudioEndpoint %d"
+ // , __func__, numBytesActuallyWrittenToWrappingBuffer, framesLeftInByteBuffer,
+ // framesActuallyWrittenToWrappingBuffer, numBytesToWriteFromByteBuffer,
+ // framesWrittenToAudioEndpoint);
}
partIndex++;
}
- int32_t framesWritten = numFrames - framesLeft;
- mAudioEndpoint->advanceWriteIndex(framesWritten);
+ //ALOGD("%s() framesWrittenToAudioEndpoint %d, numFrames %d"
+ // "framesLeftInByteBuffer %d"
+ // , __func__, framesWrittenToAudioEndpoint, numFrames,
+ // framesLeftInByteBuffer);
- return framesWritten;
+ // The audio endpoint should reference the number of frames written to the wrapping buffer.
+ mAudioEndpoint->advanceWriteIndex(framesWrittenToAudioEndpoint);
+
+ // The internal code should use the number of frames read from the app.
+ return numFrames - framesLeftInByteBuffer;
}
int64_t AudioStreamInternalPlay::getFramesRead() {
@@ -284,7 +329,6 @@
return mLastFramesWritten;
}
-
// Render audio in the application callback and then write the data to the stream.
void *AudioStreamInternalPlay::callbackLoop() {
ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index e761807..b51b5d0 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -21,7 +21,6 @@
#include <aaudio/AAudio.h>
#include "binding/AAudioServiceInterface.h"
-#include "client/AAudioFlowGraph.h"
#include "client/AudioStreamInternal.h"
using android::sp;
@@ -89,13 +88,11 @@
* Asynchronous write with data conversion.
* @param buffer
* @param numFrames
- * @return fdrames written or negative error
+ * @return frames written or negative error
*/
aaudio_result_t writeNowWithConversion(const void *buffer,
int32_t numFrames);
- AAudioFlowGraph mFlowGraph;
-
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 8a13a6f..1e27a81 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -571,13 +571,15 @@
AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- return audioStream->getFramesWritten();
+ return audioStream->getFramesWritten() * audioStream->getSampleRate() /
+ audioStream->getDeviceSampleRate();
}
AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- return audioStream->getFramesRead();
+ return audioStream->getFramesRead() * audioStream->getSampleRate() /
+ audioStream->getDeviceSampleRate();
}
AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* stream,
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 9b4b734..1649eaf 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -204,10 +204,18 @@
return mBufferCapacity;
}
+ virtual int32_t getDeviceBufferCapacity() const {
+ return mDeviceBufferCapacity;
+ }
+
virtual int32_t getFramesPerBurst() const {
return mFramesPerBurst;
}
+ virtual int32_t getDeviceFramesPerBurst() const {
+ return mDeviceFramesPerBurst;
+ }
+
virtual int32_t getXRunCount() const {
return AAUDIO_ERROR_UNIMPLEMENTED;
}
@@ -224,6 +232,10 @@
return mSampleRate;
}
+ aaudio_result_t getDeviceSampleRate() const {
+ return mDeviceSampleRate;
+ }
+
aaudio_result_t getHardwareSampleRate() const {
return mHardwareSampleRate;
}
@@ -542,6 +554,11 @@
}
// This should not be called after the open() call.
+ void setDeviceSampleRate(int32_t deviceSampleRate) {
+ mDeviceSampleRate = deviceSampleRate;
+ }
+
+ // This should not be called after the open() call.
void setHardwareSampleRate(int32_t hardwareSampleRate) {
mHardwareSampleRate = hardwareSampleRate;
}
@@ -552,11 +569,21 @@
}
// This should not be called after the open() call.
+ void setDeviceFramesPerBurst(int32_t deviceFramesPerBurst) {
+ mDeviceFramesPerBurst = deviceFramesPerBurst;
+ }
+
+ // This should not be called after the open() call.
void setBufferCapacity(int32_t bufferCapacity) {
mBufferCapacity = bufferCapacity;
}
// This should not be called after the open() call.
+ void setDeviceBufferCapacity(int32_t deviceBufferCapacity) {
+ mDeviceBufferCapacity = deviceBufferCapacity;
+ }
+
+ // This should not be called after the open() call.
void setSharingMode(aaudio_sharing_mode_t sharingMode) {
mSharingMode = sharingMode;
}
@@ -724,6 +751,7 @@
int32_t mHardwareSamplesPerFrame = AAUDIO_UNSPECIFIED;
aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mDeviceSampleRate = AAUDIO_UNSPECIFIED;
int32_t mHardwareSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
@@ -732,7 +760,9 @@
audio_format_t mHardwareFormat = AUDIO_FORMAT_DEFAULT;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
int32_t mFramesPerBurst = 0;
+ int32_t mDeviceFramesPerBurst = 0;
int32_t mBufferCapacity = 0;
+ int32_t mDeviceBufferCapacity = 0;
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index ad6d041..e79bf96 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -21,7 +21,8 @@
#include <stdint.h>
#include "FifoControllerBase.h"
-using namespace android; // TODO just import names needed
+using android::FifoControllerBase;
+using android::fifo_frames_t;
FifoControllerBase::FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold)
: mCapacity(capacity)
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index e760dab..7b4821f 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -208,6 +208,10 @@
setBufferCapacity(getBufferCapacityFromDevice());
setFramesPerBurst(getFramesPerBurstFromDevice());
+ setDeviceSampleRate(mAudioRecord->getSampleRate());
+ setDeviceBufferCapacity(getBufferCapacityFromDevice());
+ setDeviceFramesPerBurst(getFramesPerBurstFromDevice());
+
setHardwareSamplesPerFrame(mAudioRecord->getHalChannelCount());
setHardwareSampleRate(mAudioRecord->getHalSampleRate());
setHardwareFormat(mAudioRecord->getHalFormat());
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 67ee42e..723b419 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -202,6 +202,9 @@
setSampleRate(mAudioTrack->getSampleRate());
setBufferCapacity(getBufferCapacityFromDevice());
setFramesPerBurst(getFramesPerBurstFromDevice());
+ setDeviceSampleRate(mAudioTrack->getSampleRate());
+ setDeviceBufferCapacity(getBufferCapacityFromDevice());
+ setDeviceFramesPerBurst(getFramesPerBurstFromDevice());
setHardwareSamplesPerFrame(mAudioTrack->getHalChannelCount());
setHardwareSampleRate(mAudioTrack->getHalSampleRate());
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index e8324a8..0cbf79d 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -383,11 +383,10 @@
return AUDIO_CHANNEL_OUT_7POINT1POINT2;
case AAUDIO_CHANNEL_7POINT1POINT4:
return AUDIO_CHANNEL_OUT_7POINT1POINT4;
- // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
- // case AAUDIO_CHANNEL_9POINT1POINT4:
- // return AUDIO_CHANNEL_OUT_9POINT1POINT4;
- // case AAUDIO_CHANNEL_9POINT1POINT6:
- // return AUDIO_CHANNEL_OUT_9POINT1POINT6;
+ case AAUDIO_CHANNEL_9POINT1POINT4:
+ return AUDIO_CHANNEL_OUT_9POINT1POINT4;
+ case AAUDIO_CHANNEL_9POINT1POINT6:
+ return AUDIO_CHANNEL_OUT_9POINT1POINT6;
default:
ALOGE("%s() %#x unrecognized", __func__, channelMask);
return AUDIO_CHANNEL_INVALID;
@@ -465,11 +464,10 @@
return AAUDIO_CHANNEL_7POINT1POINT2;
case AUDIO_CHANNEL_OUT_7POINT1POINT4:
return AAUDIO_CHANNEL_7POINT1POINT4;
- // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
- // case AUDIO_CHANNEL_OUT_9POINT1POINT4:
- // return AAUDIO_CHANNEL_9POINT1POINT4;
- // case AUDIO_CHANNEL_OUT_9POINT1POINT6:
- // return AAUDIO_CHANNEL_9POINT1POINT6;
+ case AUDIO_CHANNEL_OUT_9POINT1POINT4:
+ return AAUDIO_CHANNEL_9POINT1POINT4;
+ case AUDIO_CHANNEL_OUT_9POINT1POINT6:
+ return AAUDIO_CHANNEL_9POINT1POINT6;
default:
ALOGE("%s() %#x unrecognized", __func__, channelMask);
return AAUDIO_CHANNEL_INVALID;
diff --git a/media/libaaudio/src/utility/AudioClock.h b/media/libaaudio/src/utility/AudioClock.h
index d5d4ef4..37f5b39 100644
--- a/media/libaaudio/src/utility/AudioClock.h
+++ b/media/libaaudio/src/utility/AudioClock.h
@@ -33,7 +33,7 @@
public:
static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
struct timespec time;
- int result = clock_gettime(clockId, &time);
+ const int result = clock_gettime(clockId, &time);
if (result < 0) {
return -errno;
}
@@ -56,7 +56,7 @@
time.tv_sec = nanoTime / AAUDIO_NANOS_PER_SECOND;
// Calculate the fractional nanoseconds. Avoids expensive % operation.
time.tv_nsec = nanoTime - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
- int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
+ const int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
switch (err) {
case EINTR:
return 1;
@@ -86,7 +86,7 @@
// Calculate the fractional nanoseconds. Avoids expensive % operation.
time.tv_nsec = nanoseconds - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
const int flags = 0; // documented as relative sleep
- int err = clock_nanosleep(clockId, flags, &time, nullptr);
+ const int err = clock_nanosleep(clockId, flags, &time, nullptr);
switch (err) {
case EINTR:
return 1;
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 51eb69b..b58634f 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -104,7 +104,7 @@
*/
void roundUp64(int32_t period) {
if (period > 0) {
- int64_t numPeriods = (mCounter64 + period - 1) / period;
+ const int64_t numPeriods = (mCounter64 + period - 1) / period;
mCounter64 = numPeriods * period;
}
}
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 24041bc..0cfdfb2 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -145,6 +145,7 @@
srcs: ["test_flowgraph.cpp"],
shared_libs: [
"libaaudio_internal",
+ "libaudioutils",
"libbinder",
"libcutils",
"libutils",
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index 6f75f5a..7eb8b0d 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -25,6 +25,8 @@
#include <gtest/gtest.h>
+#include <aaudio/AAudio.h>
+#include "client/AAudioFlowGraph.h"
#include "flowgraph/ClipToRange.h"
#include "flowgraph/Limiter.h"
#include "flowgraph/MonoBlend.h"
@@ -37,8 +39,18 @@
#include "flowgraph/SinkI32.h"
#include "flowgraph/SourceI16.h"
#include "flowgraph/SourceI24.h"
+#include "flowgraph/resampler/IntegerRatio.h"
using namespace FLOWGRAPH_OUTER_NAMESPACE::flowgraph;
+using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
+
+using TestFlowgraphResamplerParams = std::tuple<int32_t, int32_t, MultiChannelResampler::Quality>;
+
+enum {
+ PARAM_SOURCE_SAMPLE_RATE = 0,
+ PARAM_SINK_SAMPLE_RATE,
+ PARAM_RESAMPLER_QUALITY
+};
constexpr int kBytesPerI24Packed = 3;
@@ -394,3 +406,240 @@
EXPECT_NEAR(expected[i], output[i], tolerance);
}
}
+
+TEST(test_flowgraph, module_sinki16_multiple_reads) {
+ static constexpr int kNumSamples = 8;
+ std::array<int16_t, kNumSamples + 10> output; // larger than input
+
+ SourceFloat sourceFloat{1};
+ SinkI16 sinkI16{1};
+
+ sourceFloat.setData(kInputFloat.data(), kNumSamples);
+ sourceFloat.output.connect(&sinkI16.input);
+
+ output.fill(777);
+
+ // Read the first half of the data
+ int32_t numRead = sinkI16.read(output.data(), kNumSamples / 2);
+ ASSERT_EQ(kNumSamples / 2, numRead);
+ for (int i = 0; i < numRead; i++) {
+ EXPECT_EQ(kExpectedI16.at(i), output.at(i)) << ", i = " << i;
+ }
+
+ // Read the rest of the data
+ numRead = sinkI16.read(output.data(), output.size());
+ ASSERT_EQ(kNumSamples / 2, numRead);
+ for (int i = 0; i < numRead; i++) {
+ EXPECT_EQ(kExpectedI16.at(i + kNumSamples / 2), output.at(i)) << ", i = " << i;
+ }
+}
+
+void checkSampleRateConversionVariedSizes(int32_t sourceSampleRate,
+ int32_t sinkSampleRate,
+ MultiChannelResampler::Quality resamplerQuality) {
+ AAudioFlowGraph flowgraph;
+ aaudio_result_t result = flowgraph.configure(AUDIO_FORMAT_PCM_FLOAT /* sourceFormat */,
+ 1 /* sourceChannelCount */,
+ sourceSampleRate,
+ AUDIO_FORMAT_PCM_FLOAT /* sinkFormat */,
+ 1 /* sinkChannelCount */,
+ sinkSampleRate,
+ false /* useMonoBlend */,
+ false /* useVolumeRamps */,
+ 0.0f /* audioBalance */,
+ resamplerQuality);
+
+ IntegerRatio ratio(sourceSampleRate, sinkSampleRate);
+ ratio.reduce();
+
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ const int inputSize = ratio.getNumerator();
+ const int outputSize = ratio.getDenominator();
+ float input[inputSize];
+ float output[outputSize];
+
+ for (int i = 0; i < inputSize; i++) {
+ input[i] = i * 1.0f / inputSize;
+ }
+
+ int inputUsed = 0;
+ int outputRead = 0;
+ int curInputSize = 1;
+
+ // Process the data with larger and larger input buffer sizes.
+ while (inputUsed < inputSize) {
+ outputRead += flowgraph.process((void *) (input + inputUsed),
+ curInputSize,
+ (void *) (output + outputRead),
+ outputSize - outputRead);
+ inputUsed += curInputSize;
+ curInputSize = std::min(curInputSize + 5, inputSize - inputUsed);
+ }
+
+ ASSERT_EQ(outputSize, outputRead);
+
+ for (int i = 1; i < outputSize; i++) {
+ // The first values of the flowgraph will be close to zero.
+ // Besides those, the values should be strictly increasing.
+ if (output[i - 1] > 0.01f) {
+ EXPECT_GT(output[i], output[i - 1]);
+ }
+ }
+}
+
+TEST(test_flowgraph, flowgraph_varied_sizes_all) {
+ const int rates[] = {8000, 11025, 22050, 32000, 44100, 48000, 64000, 88200, 96000};
+ const MultiChannelResampler::Quality qualities[] =
+ {
+ MultiChannelResampler::Quality::Fastest,
+ MultiChannelResampler::Quality::Low,
+ MultiChannelResampler::Quality::Medium,
+ MultiChannelResampler::Quality::High,
+ MultiChannelResampler::Quality::Best
+ };
+ for (int srcRate : rates) {
+ for (int destRate : rates) {
+ for (auto quality : qualities) {
+ if (srcRate != destRate) {
+ checkSampleRateConversionVariedSizes(srcRate, destRate, quality);
+ }
+ }
+ }
+ }
+}
+
+void checkSampleRateConversionPullLater(int32_t sourceSampleRate,
+ int32_t sinkSampleRate,
+ MultiChannelResampler::Quality resamplerQuality) {
+ AAudioFlowGraph flowgraph;
+ aaudio_result_t result = flowgraph.configure(AUDIO_FORMAT_PCM_FLOAT /* sourceFormat */,
+ 1 /* sourceChannelCount */,
+ sourceSampleRate,
+ AUDIO_FORMAT_PCM_FLOAT /* sinkFormat */,
+ 1 /* sinkChannelCount */,
+ sinkSampleRate,
+ false /* useMonoBlend */,
+ false /* useVolumeRamps */,
+ 0.0f /* audioBalance */,
+ resamplerQuality);
+
+ IntegerRatio ratio(sourceSampleRate, sinkSampleRate);
+ ratio.reduce();
+
+ ASSERT_EQ(AAUDIO_OK, result);
+
+ const int inputSize = ratio.getNumerator();
+ const int outputSize = ratio.getDenominator();
+ float input[inputSize];
+ float output[outputSize];
+
+ for (int i = 0; i < inputSize; i++) {
+ input[i] = i * 1.0f / inputSize;
+ }
+
+ // Read half the data with process.
+ int outputRead = flowgraph.process((void *) input,
+ inputSize,
+ (void *) output,
+ outputSize / 2);
+
+ ASSERT_EQ(outputSize / 2, outputRead);
+
+ // Now read the other half of the data with pull.
+ outputRead += flowgraph.pull(
+ (void *) (output + outputRead),
+ outputSize - outputRead);
+
+ ASSERT_EQ(outputSize, outputRead);
+ for (int i = 1; i < outputSize; i++) {
+ // The first values of the flowgraph will be close to zero.
+ // Besides those, the values should be strictly increasing.
+ if (output[i - 1] > 0.01f) {
+ EXPECT_GT(output[i], output[i - 1]);
+ }
+ }
+}
+
+// TODO: b/289508408 - Remove non-parameterized tests if they get noisy.
+TEST(test_flowgraph, flowgraph_pull_later_all) {
+ const int rates[] = {8000, 11025, 22050, 32000, 44100, 48000, 64000, 88200, 96000};
+ const MultiChannelResampler::Quality qualities[] =
+ {
+ MultiChannelResampler::Quality::Fastest,
+ MultiChannelResampler::Quality::Low,
+ MultiChannelResampler::Quality::Medium,
+ MultiChannelResampler::Quality::High,
+ MultiChannelResampler::Quality::Best
+ };
+ for (int srcRate : rates) {
+ for (int destRate : rates) {
+ for (auto quality : qualities) {
+ if (srcRate != destRate) {
+ checkSampleRateConversionPullLater(srcRate, destRate, quality);
+ }
+ }
+ }
+ }
+}
+
+class TestFlowgraphSampleRateConversion : public ::testing::Test,
+ public ::testing::WithParamInterface<TestFlowgraphResamplerParams> {
+};
+
+const char* resamplerQualityToString(MultiChannelResampler::Quality quality) {
+ switch (quality) {
+ case MultiChannelResampler::Quality::Fastest: return "FASTEST";
+ case MultiChannelResampler::Quality::Low: return "LOW";
+ case MultiChannelResampler::Quality::Medium: return "MEDIUM";
+ case MultiChannelResampler::Quality::High: return "HIGH";
+ case MultiChannelResampler::Quality::Best: return "BEST";
+ }
+ return "UNKNOWN";
+}
+
+static std::string getTestName(
+ const ::testing::TestParamInfo<TestFlowgraphResamplerParams>& info) {
+ return std::string()
+ + std::to_string(std::get<PARAM_SOURCE_SAMPLE_RATE>(info.param))
+ + "__" + std::to_string(std::get<PARAM_SINK_SAMPLE_RATE>(info.param))
+ + "__" + resamplerQualityToString(std::get<PARAM_RESAMPLER_QUALITY>(info.param));
+}
+
+TEST_P(TestFlowgraphSampleRateConversion, test_flowgraph_pull_later) {
+ checkSampleRateConversionPullLater(std::get<PARAM_SOURCE_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_SINK_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_RESAMPLER_QUALITY>(GetParam()));
+}
+
+TEST_P(TestFlowgraphSampleRateConversion, test_flowgraph_varied_sizes) {
+ checkSampleRateConversionVariedSizes(std::get<PARAM_SOURCE_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_SINK_SAMPLE_RATE>(GetParam()),
+ std::get<PARAM_RESAMPLER_QUALITY>(GetParam()));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ test_flowgraph,
+ TestFlowgraphSampleRateConversion,
+ ::testing::Values(
+ TestFlowgraphResamplerParams({8000, 11025, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({8000, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({8000, 44100, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 24000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 48000,
+ MultiChannelResampler::Quality::Fastest}),
+ TestFlowgraphResamplerParams({11025, 48000, MultiChannelResampler::Quality::Low}),
+ TestFlowgraphResamplerParams({11025, 48000,
+ MultiChannelResampler::Quality::Medium}),
+ TestFlowgraphResamplerParams({11025, 48000, MultiChannelResampler::Quality::High}),
+ TestFlowgraphResamplerParams({11025, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 44100, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({11025, 88200, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({16000, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({44100, 48000, MultiChannelResampler::Quality::Low}),
+ TestFlowgraphResamplerParams({44100, 48000, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({48000, 11025, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({48000, 44100, MultiChannelResampler::Quality::Best}),
+ TestFlowgraphResamplerParams({44100, 11025, MultiChannelResampler::Quality::Best})),
+ &getTestName
+);
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
index 57395fa..87aaeac 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
@@ -287,7 +287,7 @@
if (const auto uuid =
::aidl::android::aidl2legacy_AudioUuid_audio_uuid_t(desc.common.id.uuid);
uuid.ok()) {
- static_cast<effectsConfig::EffectImpl>(effect).uuid = uuid.value();
+ static_cast<effectsConfig::EffectImpl&>(effect).uuid = uuid.value();
return std::make_shared<const effectsConfig::Effect>(effect);
} else {
return nullptr;
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
index 92b77d8..f5640b8 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
@@ -67,7 +67,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(AcousticEchoCanceler, acousticEchoCanceler, vendor,
ext);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->setParameter(aidlParam)));
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.cpp
index 1363ba4..03606f4 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.cpp
@@ -90,7 +90,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
Parameter aidlParam = MAKE_SPECIFIC_PARAMETER(AutomaticGainControlV1,
automaticGainControlV1, vendor, ext);
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
index b35a1c6..b9a5b02 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
@@ -68,7 +68,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(AutomaticGainControlV2, automaticGainControlV2,
vendor, ext);
break;
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
index 7c6a5a2..8df56b0 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
@@ -66,7 +66,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(BassBoost, bassBoost, vendor, ext);
break;
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
index b57971c..2b98e21 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
@@ -60,7 +60,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(Downmix, downmix, vendor, ext);
}
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
index fe845ab..89f8b83 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
@@ -119,7 +119,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam =
MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, vendor, ext);
break;
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
index 754da43..fcade68 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
@@ -168,7 +168,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
Parameter aidlParam = MAKE_SPECIFIC_PARAMETER(EnvironmentalReverb,
environmentalReverb, vendor, ext);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->setParameter(aidlParam)));
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
index fc867c7..ca6ff88 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
@@ -103,7 +103,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(Equalizer, equalizer, vendor, ext);
break;
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
index 73430ba..bdee7b6 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
@@ -79,7 +79,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(HapticGenerator, hapticGenerator, vendor, ext);
break;
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
index 31eec65..a0526e6 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
@@ -58,7 +58,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(LoudnessEnhancer, loudnessEnhancer, vendor, ext);
break;
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
index 7c34ed7..bf75e4a 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
@@ -63,7 +63,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(NoiseSuppression, noiseSuppression, vendor, ext);
break;
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
index e936aef..3cac591 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
@@ -61,7 +61,7 @@
} else {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(PresetReverb, presetReverb, vendor, ext);
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
index eadd6c3..ff0c32b 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
@@ -44,7 +44,7 @@
status_t AidlConversionSpatializer::setParameter(EffectParamReader& param) {
Parameter aidlParam = VALUE_OR_RETURN_STATUS(
- ::aidl::android::legacy2aidl_EffectParameterReader_ParameterExtension(param));
+ ::aidl::android::legacy2aidl_EffectParameterReader_Parameter(param));
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
}
@@ -64,8 +64,7 @@
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
// copy the AIDL extension data back to effect_param_t
return VALUE_OR_RETURN_STATUS(
- ::aidl::android::aidl2legacy_ParameterExtension_EffectParameterWriter(aidlParam,
- param));
+ ::aidl::android::aidl2legacy_Parameter_EffectParameterWriter(aidlParam, param));
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
index 488d5cd..2c29c7d 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
@@ -52,20 +52,19 @@
*/
status_t AidlConversionVendorExtension::setParameter(EffectParamReader& param) {
Parameter aidlParam = VALUE_OR_RETURN_STATUS(
- ::aidl::android::legacy2aidl_EffectParameterReader_ParameterExtension(param));
+ ::aidl::android::legacy2aidl_EffectParameterReader_Parameter(param));
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
}
status_t AidlConversionVendorExtension::getParameter(EffectParamWriter& param) {
VendorExtension extId = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Param_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
Parameter::Id id = UNION_MAKE(Parameter::Id, vendorEffectTag, extId);
Parameter aidlParam;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
// copy the AIDL extension data back to effect_param_t
return VALUE_OR_RETURN_STATUS(
- ::aidl::android::aidl2legacy_ParameterExtension_EffectParameterWriter(aidlParam,
- param));
+ ::aidl::android::aidl2legacy_Parameter_EffectParameterWriter(aidlParam, param));
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
index c95c3a9..cad0068 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
@@ -77,7 +77,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(Virtualizer, virtualizer, vendor, ext);
break;
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
index b4440ee..18d0d95 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
@@ -75,7 +75,7 @@
default: {
// for vendor extension, copy data area to the DefaultExtension, parameter ignored
VendorExtension ext = VALUE_OR_RETURN_STATUS(
- aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidl::android::legacy2aidl_EffectParameterReader_VendorExtension(param));
aidlParam = MAKE_SPECIFIC_PARAMETER(Visualizer, visualizer, vendor, ext);
break;
}
diff --git a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
index 63f895f..2854496 100644
--- a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
+++ b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
@@ -16,6 +16,7 @@
//#define LOG_NDEBUG 0
#include <algorithm>
+#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
@@ -24,8 +25,11 @@
#define LOG_TAG "EffectsFactoryHalInterfaceTest"
#include <aidl/android/media/audio/common/AudioUuid.h>
+#include <gtest/gtest.h>
#include <media/AidlConversionCppNdk.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <system/audio_aidl_utils.h>
+#include <system/audio_effect.h>
#include <system/audio_effects/audio_effects_utils.h>
#include <system/audio_effects/effect_aec.h>
#include <system/audio_effects/effect_agc.h>
@@ -36,17 +40,15 @@
#include <system/audio_effects/effect_hapticgenerator.h>
#include <system/audio_effects/effect_loudnessenhancer.h>
#include <system/audio_effects/effect_ns.h>
-#include <system/audio_effect.h>
-
-#include <gtest/gtest.h>
#include <utils/RefBase.h>
#include <vibrator/ExternalVibrationUtils.h>
namespace android {
+using ::aidl::android::media::audio::common::AudioUuid;
+using ::android::audio::utils::toString;
using effect::utils::EffectParamReader;
using effect::utils::EffectParamWriter;
-using ::aidl::android::media::audio::common::AudioUuid;
// EffectsFactoryHalInterface
TEST(libAudioHalTest, createEffectsFactoryHalInterface) {
@@ -195,7 +197,8 @@
static const effect_uuid_t EXTEND_EFFECT_TYPE_UUID = {
0xfa81dbde, 0x588b, 0x11ed, 0x9b6a, {0x02, 0x42, 0xac, 0x12, 0x00, 0x02}};
-
+constexpr std::array<uint8_t, 10> kVendorExtensionData({0xff, 0x5, 0x50, 0xab, 0xcd, 0x00, 0xbd,
+ 0xdb, 0xee, 0xff});
std::vector<EffectParamTestTuple> testPairs = {
std::make_tuple(FX_IID_AEC,
createEffectParamCombination(AEC_PARAM_ECHO_DELAY, 0xff /* echoDelayMs */,
@@ -203,12 +206,9 @@
std::make_tuple(FX_IID_AGC,
createEffectParamCombination(AGC_PARAM_TARGET_LEVEL, 20 /* targetLevel */,
sizeof(int16_t) /* returnValueSize */)),
- std::make_tuple(FX_IID_AGC2, createEffectParamCombination(
- AGC2_PARAM_FIXED_DIGITAL_GAIN, 15 /* digitalGainDb */,
- sizeof(int32_t) /* returnValueSize */)),
std::make_tuple(SL_IID_BASSBOOST,
createEffectParamCombination(BASSBOOST_PARAM_STRENGTH, 20 /* strength */,
- sizeof(int32_t) /* returnValueSize */)),
+ sizeof(int16_t) /* returnValueSize */)),
std::make_tuple(EFFECT_UIID_DOWNMIX,
createEffectParamCombination(DOWNMIX_PARAM_TYPE, DOWNMIX_TYPE_FOLD,
sizeof(int16_t) /* returnValueSize */)),
@@ -217,13 +217,6 @@
std::array<uint32_t, 2>({DP_PARAM_INPUT_GAIN, 0 /* channel */}),
30 /* gainDb */, sizeof(int32_t) /* returnValueSize */)),
std::make_tuple(
- FX_IID_HAPTICGENERATOR,
- createEffectParamCombination(
- HG_PARAM_HAPTIC_INTENSITY,
- std::array<uint32_t, 2>(
- {1, uint32_t(::android::os::HapticScale::HIGH) /* scale */}),
- 0 /* returnValueSize */)),
- std::make_tuple(
FX_IID_LOUDNESS_ENHANCER,
createEffectParamCombination(LOUDNESS_ENHANCER_PARAM_TARGET_GAIN_MB, 5 /* gain */,
sizeof(int32_t) /* returnValueSize */)),
@@ -231,7 +224,8 @@
createEffectParamCombination(NS_PARAM_LEVEL, 1 /* level */,
sizeof(int32_t) /* returnValueSize */)),
std::make_tuple(&EXTEND_EFFECT_TYPE_UUID,
- createEffectParamCombination(1, 0xbead, sizeof(int32_t)))};
+ createEffectParamCombination(8, kVendorExtensionData,
+ sizeof(kVendorExtensionData)))};
class libAudioHalEffectParamTest : public ::testing::TestWithParam<EffectParamTestTuple> {
public:
@@ -255,7 +249,9 @@
}()) {}
void SetUp() override {
- ASSERT_NE(0ul, mDescs.size());
+ if (0ul == mDescs.size()) {
+ GTEST_SKIP() << "Effect type not available on device, skipping";
+ }
for (const auto& desc : mDescs) {
sp<EffectHalInterface> interface = createEffectHal(desc);
ASSERT_NE(nullptr, interface);
@@ -264,9 +260,11 @@
}
void initEffect(const sp<EffectHalInterface>& interface) {
- uint32_t initReply = 0;
- uint32_t initReplySize = sizeof(initReply);
- ASSERT_EQ(OK, interface->command(EFFECT_CMD_INIT, 0, nullptr, &initReplySize, &initReply));
+ uint32_t reply = 0;
+ uint32_t replySize = sizeof(reply);
+ ASSERT_EQ(OK, interface->command(EFFECT_CMD_INIT, 0, nullptr, &replySize, &reply));
+ ASSERT_EQ(OK, interface->command(EFFECT_CMD_SET_CONFIG, sizeof(mEffectConfig),
+ &mEffectConfig, &replySize, &reply));
}
void TearDown() override {
@@ -311,7 +309,7 @@
std::vector<uint8_t> response(mCombination->valueSize);
EXPECT_EQ(OK, parameterGet.readFromValue(response.data(), mCombination->valueSize))
<< " try get valueSize " << mCombination->valueSize << " from "
- << parameterGet.toString();
+ << parameterGet.toString() << " set " << parameterSet->toString();
EXPECT_EQ(response, mExpectedValue);
}
}
@@ -323,6 +321,23 @@
const std::vector<uint8_t> mExpectedValue;
const std::vector<effect_descriptor_t> mDescs;
std::vector<sp<EffectHalInterface>> mHalInterfaces;
+ effect_config_t mEffectConfig = {.inputCfg = {.accessMode = EFFECT_BUFFER_ACCESS_READ,
+ .format = AUDIO_FORMAT_PCM_FLOAT,
+ .bufferProvider.getBuffer = nullptr,
+ .bufferProvider.releaseBuffer = nullptr,
+ .bufferProvider.cookie = nullptr,
+ .mask = EFFECT_CONFIG_ALL,
+ .samplingRate = 48000,
+ .channels = AUDIO_CHANNEL_IN_STEREO},
+
+ .outputCfg = {.accessMode = EFFECT_BUFFER_ACCESS_WRITE,
+ .format = AUDIO_FORMAT_PCM_FLOAT,
+ .bufferProvider.getBuffer = nullptr,
+ .bufferProvider.releaseBuffer = nullptr,
+ .bufferProvider.cookie = nullptr,
+ .mask = EFFECT_CONFIG_ALL,
+ .samplingRate = 48000,
+ .channels = AUDIO_CHANNEL_OUT_STEREO}};
};
TEST_P(libAudioHalEffectParamTest, setAndGetParam) {
@@ -338,7 +353,7 @@
AudioUuid uuid = ::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(
*std::get<TUPLE_UUID>(info.param))
.value();
- std::string name = "UUID_" + uuid.toString();
+ std::string name = "UUID_" + toString(uuid);
std::replace_if(
name.begin(), name.end(), [](const char c) { return !std::isalnum(c); }, '_');
return name;
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
index eb7ab1a..3148d36 100644
--- a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
@@ -14,21 +14,21 @@
* limitations under the License.
*/
-#include "BundleTypes.h"
-#define LOG_TAG "EffectBundleAidl"
-#include <Utils.h>
#include <algorithm>
+#include <limits.h>
#include <unordered_set>
+#define LOG_TAG "EffectBundleAidl"
-#include <android-base/logging.h>
-#include <fmq/AidlMessageQueue.h>
#include <audio_effects/effect_bassboost.h>
#include <audio_effects/effect_equalizer.h>
#include <audio_effects/effect_virtualizer.h>
-
-#include "EffectBundleAidl.h"
+#include <android-base/logging.h>
+#include <fmq/AidlMessageQueue.h>
#include <LVM.h>
-#include <limits.h>
+#include <Utils.h>
+
+#include "BundleTypes.h"
+#include "EffectBundleAidl.h"
using aidl::android::hardware::audio::effect::getEffectImplUuidBassBoostBundle;
using aidl::android::hardware::audio::effect::Descriptor;
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
index 73141b6..b49d109 100644
--- a/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
@@ -14,20 +14,21 @@
* limitations under the License.
*/
-#include "ReverbTypes.h"
-#define LOG_TAG "EffectReverb"
-#include <Utils.h>
#include <algorithm>
+#include <limits.h>
#include <unordered_set>
-#include <android-base/logging.h>
-#include <fmq/AidlMessageQueue.h>
+#define LOG_TAG "EffectReverb"
+
#include <audio_effects/effect_bassboost.h>
#include <audio_effects/effect_equalizer.h>
#include <audio_effects/effect_virtualizer.h>
+#include <android-base/logging.h>
+#include <fmq/AidlMessageQueue.h>
+#include <Utils.h>
#include "EffectReverb.h"
-#include <limits.h>
+#include "ReverbTypes.h"
using aidl::android::hardware::audio::effect::Descriptor;
using aidl::android::hardware::audio::effect::EffectReverb;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index d8c356a..28d554f 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1253,7 +1253,7 @@
void MediaCodec::updateMediametrics() {
if (mMetricsHandle == 0) {
- ALOGW("no metrics handle found");
+ ALOGV("no metrics handle found");
return;
}
@@ -1615,7 +1615,7 @@
}
void MediaCodec::flushMediametrics() {
- ALOGD("flushMediametrics");
+ ALOGV("flushMediametrics");
// update does its own mutex locking
updateMediametrics();
diff --git a/media/libstagefright/RemoteMediaExtractor.cpp b/media/libstagefright/RemoteMediaExtractor.cpp
index 574fc1a..5ade29b 100644
--- a/media/libstagefright/RemoteMediaExtractor.cpp
+++ b/media/libstagefright/RemoteMediaExtractor.cpp
@@ -102,7 +102,12 @@
static std::mutex pending_mutex;
static std::condition_variable pending_added;
-static void* closing_thread_func(void *arg) {
+static void* closingThreadWorker(void *arg) {
+ // simplifies debugging to name the thread
+ if (pthread_setname_np(pthread_self(), "mediaCloser")) {
+ ALOGW("Failed to set thread name on thread for closing data sources");
+ }
+
while (true) {
sp<DataSource> ds = nullptr;
std::unique_lock _lk(pending_mutex);
@@ -124,7 +129,7 @@
// this can be '&ds' as long as the pending.push_back() bumps the
// reference counts to ensure the object lives long enough
-static void start_close_thread(sp<DataSource> &ds) {
+static void asyncDataSourceClose(sp<DataSource> &ds) {
// make sure we have our (single) worker thread
static std::once_flag sCheckOnce;
@@ -132,7 +137,7 @@
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
- pthread_create(&myThread, &attr, closing_thread_func, nullptr);
+ pthread_create(&myThread, &attr, closingThreadWorker, nullptr);
pthread_attr_destroy(&attr);
});
@@ -149,7 +154,7 @@
int8_t new_scheme = property_get_bool("debug.mediaextractor.delayedclose", 1);
if (new_scheme != 0) {
ALOGV("deferred close()");
- start_close_thread(mSource);
+ asyncDataSourceClose(mSource);
mSource.clear();
} else {
ALOGV("immediate close()");
diff --git a/media/utils/BatteryNotifier.cpp b/media/utils/BatteryNotifier.cpp
index 09bc042..7762c24 100644
--- a/media/utils/BatteryNotifier.cpp
+++ b/media/utils/BatteryNotifier.cpp
@@ -85,8 +85,8 @@
void BatteryNotifier::noteStopAudio(uid_t uid) {
Mutex::Autolock _l(mLock);
- if (mAudioRefCounts.find(uid) == mAudioRefCounts.end()) {
- ALOGW("%s: audio refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
+ if (mAudioRefCounts.find(uid) == mAudioRefCounts.end() || (mAudioRefCounts[uid] == 0)) {
+ ALOGE("%s: audio refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
return;
}
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
index 3812d7a..73bed4a 100644
--- a/media/utils/include/mediautils/BatteryNotifier.h
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -68,6 +68,38 @@
sp<IBatteryStats> getBatteryService_l();
};
+namespace mediautils {
+class BatteryStatsAudioHandle {
+ public:
+ static constexpr uid_t INVALID_UID = static_cast<uid_t>(-1);
+
+ explicit BatteryStatsAudioHandle(uid_t uid) : mUid(uid) {
+ if (uid != INVALID_UID) {
+ BatteryNotifier::getInstance().noteStartAudio(mUid);
+ }
+ }
+
+ BatteryStatsAudioHandle(BatteryStatsAudioHandle&& other) : mUid(other.mUid) {
+ other.mUid = INVALID_UID;
+ }
+
+ BatteryStatsAudioHandle(const BatteryStatsAudioHandle& other) = delete;
+
+ BatteryStatsAudioHandle& operator=(const BatteryStatsAudioHandle& other) = delete;
+
+ BatteryStatsAudioHandle& operator=(BatteryStatsAudioHandle&& other) = delete;
+
+ ~BatteryStatsAudioHandle() {
+ if (mUid != INVALID_UID) {
+ BatteryNotifier::getInstance().noteStopAudio(mUid);
+ }
+ }
+
+ private:
+ // Logically const
+ uid_t mUid = INVALID_UID;
+};
+} // namespace mediautils
} // namespace android
#endif // MEDIA_BATTERY_NOTIFIER_H
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index d1235e2..d459e62 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -74,6 +74,7 @@
#include <media/DeviceDescriptorBase.h>
#include <media/ExtendedAudioBufferProvider.h>
#include <media/VolumeShaper.h>
+#include <mediautils/BatteryNotifier.h>
#include <mediautils/ServiceUtilities.h>
#include <mediautils/SharedMemoryAllocator.h>
#include <mediautils/Synchronization.h>
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index b4b8e19..c8998ad 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -3340,6 +3340,9 @@
}
if (status == NO_ERROR || status == ALREADY_EXISTS) {
Mutex::Autolock _l(mProxyLock);
+ size_t erasedHandle = mEffectHandles.erase(patchHandle);
+ ALOGV("%s %s effecthandle %p for patch %d",
+ __func__, (erasedHandle == 0 ? "adding" : "replacing"), handle.get(), patchHandle);
mEffectHandles.emplace(patchHandle, handle);
}
ALOGW_IF(status == BAD_VALUE,
@@ -3372,18 +3375,21 @@
if (mDescriptor.flags & EFFECT_FLAG_HW_ACC_TUNNEL) {
Mutex::Autolock _l(mProxyLock);
- mDevicePort = *port;
- mHalEffect = new EffectModule(mMyCallback,
+ if (mHalEffect != nullptr && mDevicePort.id == port->id) {
+ ALOGV("%s reusing HAL effect", __func__);
+ } else {
+ mDevicePort = *port;
+ mHalEffect = new EffectModule(mMyCallback,
const_cast<effect_descriptor_t *>(&mDescriptor),
mMyCallback->newEffectId(), AUDIO_SESSION_DEVICE,
false /* pinned */, port->id);
- if (audio_is_input_device(mDevice.mType)) {
- mHalEffect->setInputDevice(mDevice);
- } else {
- mHalEffect->setDevices({mDevice});
+ if (audio_is_input_device(mDevice.mType)) {
+ mHalEffect->setInputDevice(mDevice);
+ } else {
+ mHalEffect->setDevices({mDevice});
+ }
+ mHalEffect->configure();
}
- mHalEffect->configure();
-
*handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
mNotifyFramesProcessed);
status = (*handle)->initCheck();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 397f5bd..af15cfc 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1858,7 +1858,7 @@
logTrack("add", track);
mActiveTracksGeneration++;
mLatestActiveTrack = track;
- ++mBatteryCounter[track->uid()].second;
+ track->beginBatteryAttribution();
mHasChanged = true;
return mActiveTracks.add(track);
}
@@ -1872,7 +1872,7 @@
}
logTrack("remove", track);
mActiveTracksGeneration++;
- --mBatteryCounter[track->uid()].second;
+ track->endBatteryAttribution();
// mLatestActiveTrack is not cleared even if is the same as track.
mHasChanged = true;
#ifdef TEE_SINK
@@ -1885,14 +1885,13 @@
template <typename T>
void AudioFlinger::ThreadBase::ActiveTracks<T>::clear() {
for (const sp<T> &track : mActiveTracks) {
- BatteryNotifier::getInstance().noteStopAudio(track->uid());
+ track->endBatteryAttribution();
logTrack("clear", track);
}
mLastActiveTracksGeneration = mActiveTracksGeneration;
if (!mActiveTracks.empty()) { mHasChanged = true; }
mActiveTracks.clear();
mLatestActiveTrack.clear();
- mBatteryCounter.clear();
}
template <typename T>
@@ -1903,27 +1902,6 @@
thread->updateWakeLockUids_l(getWakeLockUids());
mLastActiveTracksGeneration = mActiveTracksGeneration;
}
-
- // Updates BatteryNotifier uids
- for (auto it = mBatteryCounter.begin(); it != mBatteryCounter.end();) {
- const uid_t uid = it->first;
- ssize_t &previous = it->second.first;
- ssize_t ¤t = it->second.second;
- if (current > 0) {
- if (previous == 0) {
- BatteryNotifier::getInstance().noteStartAudio(uid);
- }
- previous = current;
- ++it;
- } else if (current == 0) {
- if (previous > 0) {
- BatteryNotifier::getInstance().noteStopAudio(uid);
- }
- it = mBatteryCounter.erase(it); // std::map<> is stable on iterator erase.
- } else /* (current < 0) */ {
- LOG_ALWAYS_FATAL("negative battery count %zd", current);
- }
- }
}
template <typename T>
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index a5c1048..8b420c0 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -829,8 +829,6 @@
return wakeLockUids; // moved by underlying SharedBuffer
}
- std::map<uid_t, std::pair<ssize_t /* previous */, ssize_t /* current */>>
- mBatteryCounter;
SortedVector<sp<T>> mActiveTracks;
int mActiveTracksGeneration;
int mLastActiveTracksGeneration;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index d5b6a98..dec49ba 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -270,6 +270,23 @@
/** Set that a metadata has changed and needs to be notified to backend. Thread safe. */
void setMetadataHasChanged() { mChangeNotified.clear(); }
+ /**
+ * Called when a track moves to active state to record its contribution to battery usage.
+ * Track state transitions should eventually be handled within the track class.
+ */
+ void beginBatteryAttribution() {
+ mBatteryStatsHolder.emplace(uid());
+ }
+
+ /**
+ * Called when a track moves out of the active state to record its contribution
+ * to battery usage.
+ */
+ void endBatteryAttribution() {
+ mBatteryStatsHolder.reset();
+ }
+
+
protected:
DISALLOW_COPY_AND_ASSIGN(TrackBase);
@@ -413,6 +430,8 @@
// If the last track change was notified to the client with readAndClearHasChanged
std::atomic_flag mChangeNotified = ATOMIC_FLAG_INIT;
+ // RAII object for battery stats book-keeping
+ std::optional<mediautils::BatteryStatsAudioHandle> mBatteryStatsHolder;
};
// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 234104d..5d22ed4 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -1930,7 +1930,6 @@
// since it controls the mic permission for legacy apps.
mAppOpsManager.startWatchingMode(mAppOp, VALUE_OR_FATAL(aidl2legacy_string_view_String16(
mAttributionSource.packageName.value_or(""))),
- AppOpsManager::WATCH_FOREGROUND_CHANGES,
mOpCallback);
}
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 7f228c7..2ef6fe5 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -45,6 +45,8 @@
#define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (3 * AAUDIO_NANOS_PER_MILLISECOND)
#define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (-1 * AAUDIO_NANOS_PER_MILLISECOND)
+#define AAUDIO_MAX_OPEN_ATTEMPTS 10
+
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
@@ -77,14 +79,23 @@
{AUDIO_FORMAT_PCM_24_BIT_PACKED, AUDIO_FORMAT_PCM_16_BIT}
};
-audio_format_t getNextFormatToTry(audio_format_t curFormat, audio_format_t returnedFromAPM) {
- if (returnedFromAPM != AUDIO_FORMAT_DEFAULT) {
- return returnedFromAPM;
- }
+audio_format_t getNextFormatToTry(audio_format_t curFormat) {
const auto it = NEXT_FORMAT_TO_TRY.find(curFormat);
- return it != NEXT_FORMAT_TO_TRY.end() ? it->second : AUDIO_FORMAT_DEFAULT;
+ return it != NEXT_FORMAT_TO_TRY.end() ? it->second : curFormat;
}
+struct configComp {
+ bool operator() (const audio_config_base_t& lhs, const audio_config_base_t& rhs) const {
+ if (lhs.sample_rate != rhs.sample_rate) {
+ return lhs.sample_rate < rhs.sample_rate;
+ } else if (lhs.channel_mask != rhs.channel_mask) {
+ return lhs.channel_mask < rhs.channel_mask;
+ } else {
+ return lhs.format < rhs.format;
+ }
+ }
+};
+
} // namespace
aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
@@ -101,60 +112,66 @@
legacy2aidl_pid_t_int32_t(IPCThreadState::self()->getCallingPid()));
audio_format_t audioFormat = getFormat();
- std::set<audio_format_t> formatsTried;
- while (true) {
- if (formatsTried.find(audioFormat) != formatsTried.end()) {
+ int32_t sampleRate = getSampleRate();
+ if (sampleRate == AAUDIO_UNSPECIFIED) {
+ sampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
+ }
+
+ const aaudio_direction_t direction = getDirection();
+ audio_config_base_t config;
+ config.format = audioFormat;
+ config.sample_rate = sampleRate;
+ config.channel_mask = AAudio_getChannelMaskForOpen(
+ getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
+
+ std::set<audio_config_base_t, configComp> configsTried;
+ int32_t numberOfAttempts = 0;
+ while (numberOfAttempts < AAUDIO_MAX_OPEN_ATTEMPTS) {
+ if (configsTried.find(config) != configsTried.end()) {
// APM returning something that has already tried.
- ALOGW("Have already tried to open with format=%#x, but failed before", audioFormat);
+ ALOGW("Have already tried to open with format=%#x and sr=%d, but failed before",
+ config.format, config.sample_rate);
break;
}
- formatsTried.insert(audioFormat);
+ configsTried.insert(config);
- audio_format_t nextFormatToTry = AUDIO_FORMAT_DEFAULT;
- result = openWithFormat(audioFormat, &nextFormatToTry);
+ audio_config_base_t previousConfig = config;
+ result = openWithConfig(&config);
if (result != AAUDIO_ERROR_UNAVAILABLE) {
// Return if it is successful or there is an error that is not
// AAUDIO_ERROR_UNAVAILABLE happens.
- ALOGI("Opened format=%#x with result=%d", audioFormat, result);
+ ALOGI("Opened format=%#x sr=%d, with result=%d", previousConfig.format,
+ previousConfig.sample_rate, result);
break;
}
- nextFormatToTry = getNextFormatToTry(audioFormat, nextFormatToTry);
- ALOGD("%s() %#x failed, perhaps due to format. Try again with %#x",
- __func__, audioFormat, nextFormatToTry);
- audioFormat = nextFormatToTry;
- if (audioFormat == AUDIO_FORMAT_DEFAULT) {
- // Nothing else to try
- break;
+ // Try other formats if the config from APM is the same as our current config.
+ // Some HALs may report its format support incorrectly.
+ if ((previousConfig.format == config.format) &&
+ (previousConfig.sample_rate == config.sample_rate)) {
+ config.format = getNextFormatToTry(config.format);
}
+
+ ALOGD("%s() %#x %d failed, perhaps due to format or sample rate. Try again with %#x %d",
+ __func__, previousConfig.format, previousConfig.sample_rate, config.format,
+ config.sample_rate);
+ numberOfAttempts++;
}
return result;
}
-aaudio_result_t AAudioServiceEndpointMMAP::openWithFormat(
- audio_format_t audioFormat, audio_format_t* nextFormatToTry) {
+aaudio_result_t AAudioServiceEndpointMMAP::openWithConfig(
+ audio_config_base_t* config) {
aaudio_result_t result = AAUDIO_OK;
- audio_config_base_t config;
+ audio_config_base_t currentConfig = *config;
audio_port_handle_t deviceId;
const audio_attributes_t attributes = getAudioAttributesFrom(this);
deviceId = mRequestedDeviceId;
- // Fill in config
- config.format = audioFormat;
-
- int32_t aaudioSampleRate = getSampleRate();
- if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
- aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
- }
- config.sample_rate = aaudioSampleRate;
-
const aaudio_direction_t direction = getDirection();
- config.channel_mask = AAudio_getChannelMaskForOpen(
- getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
-
if (direction == AAUDIO_DIRECTION_OUTPUT) {
mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
@@ -177,11 +194,11 @@
// Open HAL stream. Set mMmapStream
ALOGD("%s trying to open MMAP stream with format=%#x, "
"sample_rate=%u, channel_mask=%#x, device=%d",
- __func__, config.format, config.sample_rate,
- config.channel_mask, deviceId);
+ __func__, config->format, config->sample_rate,
+ config->channel_mask, deviceId);
const status_t status = MmapStreamInterface::openMmapStream(streamDirection,
&attributes,
- &config,
+ config,
mMmapClient,
&deviceId,
&sessionId,
@@ -195,9 +212,9 @@
// not match the hardware.
ALOGD("%s() - openMmapStream() returned status=%d, suggested format=%#x, sample_rate=%u, "
"channel_mask=%#x",
- __func__, status, config.format, config.sample_rate, config.channel_mask);
- *nextFormatToTry = config.format != audioFormat ? config.format
- : *nextFormatToTry;
+ __func__, status, config->format, config->sample_rate, config->channel_mask);
+ // Keep the channel mask of the current config
+ config->channel_mask = currentConfig.channel_mask;
return AAUDIO_ERROR_UNAVAILABLE;
}
@@ -217,7 +234,7 @@
setSessionId(actualSessionId);
ALOGD("%s(format = 0x%X) deviceId = %d, sessionId = %d",
- __func__, audioFormat, getDeviceId(), getSessionId());
+ __func__, config->format, getDeviceId(), getSessionId());
// Create MMAP/NOIRQ buffer.
result = createMmapBuffer();
@@ -227,11 +244,11 @@
// Get information about the stream and pass it back to the caller.
setChannelMask(AAudioConvert_androidToAAudioChannelMask(
- config.channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
- AAudio_isChannelIndexMask(config.channel_mask)));
+ config->channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
+ AAudio_isChannelIndexMask(config->channel_mask)));
- setFormat(config.format);
- setSampleRate(config.sample_rate);
+ setFormat(config->format);
+ setSampleRate(config->sample_rate);
setHardwareSampleRate(getSampleRate());
setHardwareFormat(getFormat());
setHardwareSamplesPerFrame(AAudioConvert_channelMaskToCount(getChannelMask()));
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 38cf0ba..f19005c 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -97,7 +97,7 @@
private:
- aaudio_result_t openWithFormat(audio_format_t audioFormat, audio_format_t* nextFormatToTry);
+ aaudio_result_t openWithConfig(audio_config_base_t* config);
aaudio_result_t createMmapBuffer();