Merge "C2SoftDav1dDec: Fix performance issues in frame parallel multi-threading" into main
diff --git a/media/codec2/core/include/C2ParamDef.h b/media/codec2/core/include/C2ParamDef.h
index d578820..86dfe65 100644
--- a/media/codec2/core/include/C2ParamDef.h
+++ b/media/codec2/core/include/C2ParamDef.h
@@ -404,6 +404,7 @@
/// Specialization for an input port parameter.
struct input : public T, public S,
public _C2StructCheck<S, ParamIndex, T::PARAM_KIND | T::Index::DIR_INPUT> {
+ using T::operator!=;
_C2_CORE_INDEX_OVERRIDE(ParamIndex)
/// Wrapper around base structure's constructor.
template<typename ...Args>
@@ -416,6 +417,7 @@
/// Specialization for an output port parameter.
struct output : public T, public S,
public _C2StructCheck<S, ParamIndex, T::PARAM_KIND | T::Index::DIR_OUTPUT> {
+ using T::operator!=;
_C2_CORE_INDEX_OVERRIDE(ParamIndex)
/// Wrapper around base structure's constructor.
template<typename ...Args>
@@ -470,6 +472,7 @@
/// Specialization for an input port parameter.
struct input : public T,
public _C2FlexStructCheck<S, ParamIndex, T::PARAM_KIND | T::Index::DIR_INPUT> {
+ using T::operator!=;
private:
/// Wrapper around base structure's constructor while also specifying port/direction.
template<typename ...Args>
@@ -486,6 +489,7 @@
/// Specialization for an output port parameter.
struct output : public T,
public _C2FlexStructCheck<S, ParamIndex, T::PARAM_KIND | T::Index::DIR_OUTPUT> {
+ using T::operator!=;
private:
/// Wrapper around base structure's constructor while also specifying port/direction.
template<typename ...Args>
@@ -549,6 +553,7 @@
struct input : public T, public S,
public _C2StructCheck<S, ParamIndex,
T::PARAM_KIND | T::Index::IS_STREAM_FLAG | T::Type::DIR_INPUT> {
+ using T::operator!=;
_C2_CORE_INDEX_OVERRIDE(ParamIndex)
/// Default constructor. Stream-ID is undefined.
@@ -567,6 +572,7 @@
struct output : public T, public S,
public _C2StructCheck<S, ParamIndex,
T::PARAM_KIND | T::Index::IS_STREAM_FLAG | T::Type::DIR_OUTPUT> {
+ using T::operator!=;
_C2_CORE_INDEX_OVERRIDE(ParamIndex)
/// Default constructor. Stream-ID is undefined.
@@ -634,6 +640,7 @@
struct input : public T,
public _C2FlexStructCheck<S, ParamIndex,
T::PARAM_KIND | T::Index::IS_STREAM_FLAG | T::Type::DIR_INPUT> {
+ using T::operator!=;
private:
/// Default constructor. Stream-ID is undefined.
inline input(size_t flexCount) : T(_Type::CalcSize(flexCount), input::PARAM_TYPE) { }
@@ -656,6 +663,7 @@
struct output : public T,
public _C2FlexStructCheck<S, ParamIndex,
T::PARAM_KIND | T::Index::IS_STREAM_FLAG | T::Type::DIR_OUTPUT> {
+ using T::operator!=;
private:
/// Default constructor. Stream-ID is undefined.
inline output(size_t flexCount) : T(_Type::CalcSize(flexCount), output::PARAM_TYPE) { }
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index a5259aa..37633ae 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -56,9 +56,7 @@
":effectCommonFile",
],
defaults: [
- "aidlaudioservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
+ "aidlaudioeffectservice_defaults",
],
header_libs: [
"libaudioeffects",
diff --git a/media/libeffects/downmix/aidl/DownmixContext.cpp b/media/libeffects/downmix/aidl/DownmixContext.cpp
index 0e76d1d..13e0e5a 100644
--- a/media/libeffects/downmix/aidl/DownmixContext.cpp
+++ b/media/libeffects/downmix/aidl/DownmixContext.cpp
@@ -20,12 +20,60 @@
#include "DownmixContext.h"
-using aidl::android::hardware::audio::effect::IEffect;
using aidl::android::hardware::audio::common::getChannelCount;
+using aidl::android::hardware::audio::effect::IEffect;
using aidl::android::media::audio::common::AudioChannelLayout;
+using aidl::android::media::audio::common::AudioConfig;
namespace aidl::android::hardware::audio::effect {
+namespace {
+
+inline bool isChannelMaskValid(const AudioChannelLayout& channelMask) {
+ if (channelMask.getTag() != AudioChannelLayout::layoutMask) return false;
+ int chMask = channelMask.get<AudioChannelLayout::layoutMask>();
+ // check against unsupported channels (up to FCC_26)
+ constexpr uint32_t MAXIMUM_CHANNEL_MASK = AudioChannelLayout::LAYOUT_22POINT2 |
+ AudioChannelLayout::CHANNEL_FRONT_WIDE_LEFT |
+ AudioChannelLayout::CHANNEL_FRONT_WIDE_RIGHT;
+ if (chMask & ~MAXIMUM_CHANNEL_MASK) {
+ LOG(ERROR) << "Unsupported channels in " << (chMask & ~MAXIMUM_CHANNEL_MASK);
+ return false;
+ }
+ return true;
+}
+
+inline bool isStereoChannelMask(const AudioChannelLayout& channelMask) {
+ if (channelMask.getTag() != AudioChannelLayout::layoutMask) return false;
+
+ return channelMask.get<AudioChannelLayout::layoutMask>() == AudioChannelLayout::LAYOUT_STEREO;
+}
+
+} // namespace
+
+bool DownmixContext::validateCommonConfig(const Parameter::Common& common) {
+ const AudioConfig& input = common.input;
+ const AudioConfig& output = common.output;
+ if (input.base.sampleRate != output.base.sampleRate) {
+ LOG(ERROR) << __func__ << ": SRC not supported, input: " << input.toString()
+ << " output: " << output.toString();
+ return false;
+ }
+
+ if (!isStereoChannelMask(output.base.channelMask)) {
+ LOG(ERROR) << __func__ << ": output should be stereo, not "
+ << output.base.channelMask.toString();
+ return false;
+ }
+
+ if (!isChannelMaskValid(input.base.channelMask)) {
+ LOG(ERROR) << __func__ << ": invalid input channel, " << input.base.channelMask.toString();
+ return false;
+ }
+
+ return true;
+}
+
DownmixContext::DownmixContext(int statusDepth, const Parameter::Common& common)
: EffectContext(statusDepth, common) {
LOG(DEBUG) << __func__;
@@ -62,7 +110,7 @@
resetBuffer();
}
-IEffect::Status DownmixContext::lvmProcess(float* in, float* out, int samples) {
+IEffect::Status DownmixContext::downmixProcess(float* in, float* out, int samples) {
LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
IEffect::Status status = {EX_ILLEGAL_ARGUMENT, 0, 0};
@@ -122,18 +170,4 @@
}
}
-bool DownmixContext::isChannelMaskValid(AudioChannelLayout channelMask) {
- if (channelMask.getTag() != AudioChannelLayout::layoutMask) return false;
- int chMask = channelMask.get<AudioChannelLayout::layoutMask>();
- // check against unsupported channels (up to FCC_26)
- constexpr uint32_t MAXIMUM_CHANNEL_MASK = AudioChannelLayout::LAYOUT_22POINT2 |
- AudioChannelLayout::CHANNEL_FRONT_WIDE_LEFT |
- AudioChannelLayout::CHANNEL_FRONT_WIDE_RIGHT;
- if (chMask & ~MAXIMUM_CHANNEL_MASK) {
- LOG(ERROR) << "Unsupported channels in " << (chMask & ~MAXIMUM_CHANNEL_MASK);
- return false;
- }
- return true;
-}
-
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/DownmixContext.h b/media/libeffects/downmix/aidl/DownmixContext.h
index 1571c38..a381d7f 100644
--- a/media/libeffects/downmix/aidl/DownmixContext.h
+++ b/media/libeffects/downmix/aidl/DownmixContext.h
@@ -50,7 +50,9 @@
return RetCode::SUCCESS;
}
- IEffect::Status lvmProcess(float* in, float* out, int samples);
+ IEffect::Status downmixProcess(float* in, float* out, int samples);
+
+ static bool validateCommonConfig(const Parameter::Common& common);
private:
DownmixState mState;
@@ -60,7 +62,6 @@
// Common Params
void init_params(const Parameter::Common& common);
- bool isChannelMaskValid(::aidl::android::media::audio::common::AudioChannelLayout channelMask);
};
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/EffectDownmix.cpp b/media/libeffects/downmix/aidl/EffectDownmix.cpp
index 7068c5c..702a6f0 100644
--- a/media/libeffects/downmix/aidl/EffectDownmix.cpp
+++ b/media/libeffects/downmix/aidl/EffectDownmix.cpp
@@ -193,6 +193,8 @@
return mContext;
}
+ if (!DownmixContext::validateCommonConfig(common)) return nullptr;
+
mContext = std::make_shared<DownmixContext>(1 /* statusFmqDepth */, common);
return mContext;
}
@@ -210,7 +212,7 @@
LOG(ERROR) << __func__ << " nullContext";
return {EX_NULL_POINTER, 0, 0};
}
- return mContext->lvmProcess(in, out, sampleToProcess);
+ return mContext->downmixProcess(in, out, sampleToProcess);
}
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/Android.bp b/media/libeffects/dynamicsproc/Android.bp
index 7838117..9e154cf 100644
--- a/media/libeffects/dynamicsproc/Android.bp
+++ b/media/libeffects/dynamicsproc/Android.bp
@@ -86,9 +86,7 @@
],
defaults: [
- "aidlaudioservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
+ "aidlaudioeffectservice_defaults",
"dynamicsprocessingdefaults",
],
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
index e5e5368..f3a3860 100644
--- a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
@@ -313,8 +313,8 @@
void DynamicsProcessingContext::init() {
std::lock_guard lg(mMutex);
mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
- mChannelCount = ::aidl::android::hardware::audio::common::getChannelCount(
- mCommon.input.base.channelMask);
+ mChannelCount = static_cast<int>(::aidl::android::hardware::audio::common::getChannelCount(
+ mCommon.input.base.channelMask));
}
dp_fx::DPChannel* DynamicsProcessingContext::getChannel_l(int channel) {
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h
index ced7f19..839c6dd 100644
--- a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h
@@ -74,7 +74,7 @@
static constexpr float kPreferredProcessingDurationMs = 10.0f;
static constexpr int kBandCount = 5;
std::mutex mMutex;
- size_t mChannelCount GUARDED_BY(mMutex) = 0;
+ int mChannelCount GUARDED_BY(mMutex) = 0;
DynamicsProcessingState mState GUARDED_BY(mMutex) = DYNAMICS_PROCESSING_STATE_UNINITIALIZED;
std::unique_ptr<dp_fx::DPFrequency> mDpFreq GUARDED_BY(mMutex) = nullptr;
bool mEngineInited GUARDED_BY(mMutex) = false;
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index fc80211..cc19a80 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -75,9 +75,7 @@
],
defaults: [
- "aidlaudioservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
+ "aidlaudioeffectservice_defaults",
"hapticgeneratordefaults",
],
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
index de44e05..354ee00 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <cstddef>
#define LOG_TAG "AHAL_HapticGeneratorContext"
#include <Utils.h>
@@ -162,8 +163,8 @@
}
// Construct input buffer according to haptic channel source
- for (size_t i = 0; i < mFrameCount; ++i) {
- for (size_t j = 0; j < mParams.mHapticChannelCount; ++j) {
+ for (int64_t i = 0; i < mFrameCount; ++i) {
+ for (int j = 0; j < mParams.mHapticChannelCount; ++j) {
mInputBuffer[i * mParams.mHapticChannelCount + j] =
in[i * mParams.mAudioChannelCount + mParams.mHapticChannelSource[j]];
}
@@ -180,8 +181,7 @@
// buffer, which contains haptic data at the end of the buffer, directly to sink buffer.
// In that case, copy haptic data to input buffer instead of output buffer.
// Note: this may not work with rpc/binder calls
- int offset = samples;
- for (int i = 0; i < hapticSampleCount; ++i) {
+ for (size_t i = 0; i < hapticSampleCount; ++i) {
in[samples + i] = hapticOutBuffer[i];
}
return {STATUS_OK, samples, static_cast<int32_t>(samples + hapticSampleCount)};
@@ -199,7 +199,7 @@
mParams.mHapticChannelCount = ::aidl::android::hardware::audio::common::getChannelCount(
outputChMask, media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
LOG_ALWAYS_FATAL_IF(mParams.mHapticChannelCount > 2, "haptic channel count is too large");
- for (size_t i = 0; i < mParams.mHapticChannelCount; ++i) {
+ for (int i = 0; i < mParams.mHapticChannelCount; ++i) {
// By default, use the first audio channel to generate haptic channels.
mParams.mHapticChannelSource[i] = 0;
}
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
index a0a0a4c..26e69e4 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
@@ -92,7 +92,7 @@
HapticGeneratorState mState;
HapticGeneratorParam mParams GUARDED_BY(mMutex);
int mSampleRate;
- int mFrameCount = 0;
+ int64_t mFrameCount = 0;
// A cache for all shared pointers of the HapticGenerator
struct HapticGeneratorProcessorsRecord mProcessorsRecord;
diff --git a/media/libeffects/loudness/Android.bp b/media/libeffects/loudness/Android.bp
index 7acba11..05bbec3 100644
--- a/media/libeffects/loudness/Android.bp
+++ b/media/libeffects/loudness/Android.bp
@@ -54,9 +54,7 @@
":effectCommonFile",
],
defaults: [
- "aidlaudioservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
+ "aidlaudioeffectservice_defaults",
],
header_libs: [
"libaudioeffects",
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
index a163f4b..71bb2ef 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
@@ -227,7 +227,7 @@
bool viEnabled = params.VirtualizerOperatingMode == LVM_MODE_ON;
if (eqEnabled) {
- for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ for (size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
float bandFactor = mBandGainMdB[i] / 1500.0;
float bandCoefficient = lvm::kBandEnergyCoefficient[i];
float bandEnergy = bandFactor * bandCoefficient * bandCoefficient;
@@ -236,7 +236,7 @@
// cross EQ coefficients
float bandFactorSum = 0;
- for (int i = 0; i < lvm::MAX_NUM_BANDS - 1; i++) {
+ for (size_t i = 0; i < lvm::MAX_NUM_BANDS - 1; i++) {
float bandFactor1 = mBandGainMdB[i] / 1500.0;
float bandFactor2 = mBandGainMdB[i + 1] / 1500.0;
@@ -259,7 +259,7 @@
energyContribution += boostFactor * boostCoefficient * boostCoefficient;
if (eqEnabled) {
- for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ for (size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
float bandFactor = mBandGainMdB[i] / 1500.0;
float bandCrossCoefficient = lvm::kBassBoostEnergyCrossCoefficient[i];
float bandEnergy = boostFactor * bandFactor * bandCrossCoefficient;
@@ -421,7 +421,6 @@
RetCode BundleContext::setVolumeStereo(const Parameter::VolumeStereo& volume) {
LVM_ControlParams_t params;
- LVM_ReturnStatus_en status = LVM_SUCCESS;
// Convert volume to dB
float leftdB = VolToDb(volume.left);
@@ -512,7 +511,7 @@
const auto [min, max] =
std::minmax_element(bandLevels.begin(), bandLevels.end(),
[](const auto& a, const auto& b) { return a.index < b.index; });
- return min->index >= 0 && max->index < lvm::MAX_NUM_BANDS;
+ return min->index >= 0 && static_cast<size_t>(max->index) < lvm::MAX_NUM_BANDS;
}
RetCode BundleContext::updateControlParameter(const std::vector<Equalizer::BandLevel>& bandLevels) {
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index fa300d2..5fb3966 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -110,9 +110,7 @@
],
static_libs: ["libmusicbundle"],
defaults: [
- "aidlaudioservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
+ "aidlaudioeffectservice_defaults",
],
local_include_dirs: ["Aidl"],
header_libs: [
@@ -140,9 +138,7 @@
],
static_libs: ["libreverb"],
defaults: [
- "aidlaudioservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
+ "aidlaudioeffectservice_defaults",
],
local_include_dirs: ["Reverb/aidl"],
header_libs: [
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
index 79e67f2..468b268 100644
--- a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
@@ -329,7 +329,7 @@
*/
int ReverbContext::convertLevel(int level) {
- for (int i = 0; i < kLevelMapping.size(); i++) {
+ for (std::size_t i = 0; i < kLevelMapping.size(); i++) {
if (level <= kLevelMapping[i]) {
return i;
}
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index d018c47..564eb36 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -67,9 +67,7 @@
":effectCommonFile",
],
defaults: [
- "aidlaudioservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
+ "aidlaudioeffectservice_defaults",
],
local_include_dirs: ["aidl"],
shared_libs: [
diff --git a/media/libeffects/visualizer/Android.bp b/media/libeffects/visualizer/Android.bp
index cf782f7..a8b665b 100644
--- a/media/libeffects/visualizer/Android.bp
+++ b/media/libeffects/visualizer/Android.bp
@@ -60,8 +60,6 @@
],
defaults: [
"aidlaudioeffectservice_defaults",
- "latest_android_hardware_audio_effect_ndk_shared",
- "latest_android_media_audio_common_types_ndk_shared",
"visualizer_defaults",
],
cflags: [
diff --git a/media/utils/MethodStatistics.cpp b/media/utils/MethodStatistics.cpp
index 086757b..80f0fc4 100644
--- a/media/utils/MethodStatistics.cpp
+++ b/media/utils/MethodStatistics.cpp
@@ -20,6 +20,8 @@
// Repository for MethodStatistics Objects
+// It's important to have the HAL class name defined with suffix "Hidl/Aidl" because
+// TimerThread::isRequestFromHal use this string to match binder call to/from hal.
std::shared_ptr<std::vector<std::string>>
getStatisticsClassesForModule(std::string_view moduleName) {
static const std::map<std::string, std::shared_ptr<std::vector<std::string>>,
@@ -34,6 +36,15 @@
"StreamOutHalHidl",
})
},
+ {
+ METHOD_STATISTICS_MODULE_NAME_AUDIO_AIDL,
+ std::shared_ptr<std::vector<std::string>>(
+ new std::vector<std::string>{
+ "DeviceHalAidl",
+ "EffectHalAidl",
+ "StreamHalAidl",
+ })
+ },
};
auto it = m.find(moduleName);
if (it == m.end()) return {};
@@ -61,6 +72,9 @@
addClassesToMap(
getStatisticsClassesForModule(METHOD_STATISTICS_MODULE_NAME_AUDIO_HIDL),
m);
+ addClassesToMap(
+ getStatisticsClassesForModule(METHOD_STATISTICS_MODULE_NAME_AUDIO_AIDL),
+ m);
return m;
}();
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
index 3966103..25852e4 100644
--- a/media/utils/TimerThread.cpp
+++ b/media/utils/TimerThread.cpp
@@ -104,11 +104,16 @@
//
/* static */
bool TimerThread::isRequestFromHal(const std::shared_ptr<const Request>& request) {
- const size_t hidlPos = request->tag.asStringView().find("Hidl");
- if (hidlPos == std::string::npos) return false;
- // should be a separator afterwards Hidl which indicates the string was in the class.
- const size_t separatorPos = request->tag.asStringView().find("::", hidlPos);
- return separatorPos != std::string::npos;
+ for (const auto& s : {"Hidl", "Aidl"}) {
+ const auto& tagSV = request->tag.asStringView();
+ const size_t halStrPos = tagSV.find(s);
+ // should be a separator afterwards Hidl/Aidl which indicates the string was in the class.
+ if (halStrPos != std::string::npos && tagSV.find("::", halStrPos) != std::string::npos) {
+ return true;
+ }
+ }
+
+ return false;
}
struct TimerThread::SnapshotAnalysis TimerThread::getSnapshotAnalysis(size_t retiredCount) const {
diff --git a/media/utils/include/mediautils/MethodStatistics.h b/media/utils/include/mediautils/MethodStatistics.h
index c8b36d8..2543dfa 100644
--- a/media/utils/include/mediautils/MethodStatistics.h
+++ b/media/utils/include/mediautils/MethodStatistics.h
@@ -124,6 +124,7 @@
// Managed Statistics support.
// Supported Modules
#define METHOD_STATISTICS_MODULE_NAME_AUDIO_HIDL "AudioHidl"
+#define METHOD_STATISTICS_MODULE_NAME_AUDIO_AIDL "AudioAidl"
// Returns a vector of class names for the module, or a nullptr if module not found.
std::shared_ptr<std::vector<std::string>>
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 98b6c27..0ebb8ed 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -859,12 +859,15 @@
dprintf(fd, "\nIEffect binder call profile:\n");
write(fd, timeCheckStats.c_str(), timeCheckStats.size());
- // Automatically fetch HIDL statistics.
- std::shared_ptr<std::vector<std::string>> hidlClassNames =
- mediautils::getStatisticsClassesForModule(
- METHOD_STATISTICS_MODULE_NAME_AUDIO_HIDL);
- if (hidlClassNames) {
- for (const auto& className : *hidlClassNames) {
+ // Automatically fetch HIDL or AIDL statistics.
+ const std::string_view halType = (mDevicesFactoryHal->getHalVersion().getType() ==
+ AudioHalVersionInfo::Type::HIDL)
+ ? METHOD_STATISTICS_MODULE_NAME_AUDIO_HIDL
+ : METHOD_STATISTICS_MODULE_NAME_AUDIO_AIDL;
+ const std::shared_ptr<std::vector<std::string>> halClassNames =
+ mediautils::getStatisticsClassesForModule(halType);
+ if (halClassNames) {
+ for (const auto& className : *halClassNames) {
auto stats = mediautils::getStatisticsForClass(className);
if (stats) {
timeCheckStats = stats->dump();
@@ -3196,41 +3199,19 @@
return 0;
}
- audio_config_t halconfig = *config;
- sp<DeviceHalInterface> inHwHal = inHwDev->hwDevice();
- sp<StreamInHalInterface> inStream;
- status_t status = inHwHal->openInputStream(
- *input, devices, &halconfig, flags, address, source,
- outputDevice, outputDeviceAddress, &inStream);
- ALOGV("openInput_l() openInputStream returned input %p, devices %#x, SamplingRate %d"
- ", Format %#x, Channels %#x, flags %#x, status %d addr %s",
- inStream.get(),
+ AudioStreamIn *inputStream = nullptr;
+ status_t status = inHwDev->openInputStream(
+ &inputStream,
+ *input,
devices,
- halconfig.sample_rate,
- halconfig.format,
- halconfig.channel_mask,
flags,
- status, address);
+ config,
+ address,
+ source,
+ outputDevice,
+ outputDeviceAddress.c_str());
- // If the input could not be opened with the requested parameters and we can handle the
- // conversion internally, try to open again with the proposed parameters.
- if (status == BAD_VALUE &&
- audio_is_linear_pcm(config->format) &&
- audio_is_linear_pcm(halconfig.format) &&
- (halconfig.sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) &&
- (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_LIMIT) &&
- (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_LIMIT)) {
- // FIXME describe the change proposed by HAL (save old values so we can log them here)
- ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
- inStream.clear();
- status = inHwHal->openInputStream(
- *input, devices, &halconfig, flags, address, source,
- outputDevice, outputDeviceAddress, &inStream);
- // FIXME log this new status; HAL should not propose any further changes
- }
-
- if (status == NO_ERROR && inStream != 0) {
- AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream, flags);
+ if (status == NO_ERROR) {
if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
const sp<IAfMmapCaptureThread> thread =
IAfMmapCaptureThread::create(this, *input, inHwDev, inputStream, mSystemReady);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index d8d727c..4c6b02c 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -9544,10 +9544,24 @@
void RecordThread::readInputParameters_l()
{
- status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
- LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
- mFormat = mHALFormat;
+ const audio_config_base_t audioConfig = mInput->getAudioProperties();
+ mSampleRate = audioConfig.sample_rate;
+ mChannelMask = audioConfig.channel_mask;
+ if (!audio_is_input_channel(mChannelMask)) {
+ LOG_ALWAYS_FATAL("Channel mask %#x not valid for input", mChannelMask);
+ }
+
mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+
+ // Get actual HAL format.
+ status_t result = mInput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving input stream format: %d", result);
+ // Get format from the shim, which will be different than the HAL format
+ // if recording compressed audio from IEC61937 wrapped sources.
+ mFormat = audioConfig.format;
+ if (!audio_is_valid_format(mFormat)) {
+ LOG_ALWAYS_FATAL("Format %#x not valid for input", mFormat);
+ }
if (audio_is_linear_pcm(mFormat)) {
LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_LIMIT, "HAL channel count %d > %d",
mChannelCount, FCC_LIMIT);
@@ -9555,8 +9569,7 @@
// Can have more that FCC_LIMIT channels in encoded streams.
ALOGI("HAL format %#x is not linear pcm", mFormat);
}
- result = mInput->stream->getFrameSize(&mFrameSize);
- LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
+ mFrameSize = mInput->getFrameSize();
LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
mFrameSize);
result = mInput->stream->getBufferSize(&mBufferSize);
diff --git a/services/audioflinger/datapath/Android.bp b/services/audioflinger/datapath/Android.bp
index ee98aef..4235f14 100644
--- a/services/audioflinger/datapath/Android.bp
+++ b/services/audioflinger/datapath/Android.bp
@@ -43,11 +43,14 @@
srcs: [
"AudioHwDevice.cpp",
+ "AudioStreamIn.cpp",
"AudioStreamOut.cpp",
+ "SpdifStreamIn.cpp",
"SpdifStreamOut.cpp",
],
header_libs: [
+ "libaudioclient_headers",
"libaudiohal_headers",
"liberror_headers",
],
diff --git a/services/audioflinger/datapath/AudioHwDevice.cpp b/services/audioflinger/datapath/AudioHwDevice.cpp
index 67e9991..95e9ecc 100644
--- a/services/audioflinger/datapath/AudioHwDevice.cpp
+++ b/services/audioflinger/datapath/AudioHwDevice.cpp
@@ -1,19 +1,19 @@
/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ *
+ * Copyright 2007, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
#define LOG_TAG "AudioHwDevice"
//#define LOG_NDEBUG 0
@@ -21,10 +21,13 @@
#include <system/audio.h>
#include <utils/Log.h>
+#include <audio_utils/spdif/SPDIFDecoder.h>
#include <audio_utils/spdif/SPDIFEncoder.h>
+#include <media/AudioResamplerPublic.h>
#include "AudioHwDevice.h"
#include "AudioStreamOut.h"
+#include "SpdifStreamIn.h"
#include "SpdifStreamOut.h"
namespace android {
@@ -47,12 +50,8 @@
auto outputStream = new AudioStreamOut(this, flags);
// Try to open the HAL first using the current format.
- ALOGV("openOutputStream(), try "
- " sampleRate %d, Format %#x, "
- "channelMask %#x",
- config->sample_rate,
- config->format,
- config->channel_mask);
+ ALOGV("openOutputStream(), try sampleRate %d, format %#x, channelMask %#x", config->sample_rate,
+ config->format, config->channel_mask);
status_t status = outputStream->open(handle, deviceType, config, address);
if (status != NO_ERROR) {
@@ -62,13 +61,8 @@
// FIXME Look at any modification to the config.
// The HAL might modify the config to suggest a wrapped format.
// Log this so we can see what the HALs are doing.
- ALOGI("openOutputStream(), HAL returned"
- " sampleRate %d, Format %#x, "
- "channelMask %#x, status %d",
- config->sample_rate,
- config->format,
- config->channel_mask,
- status);
+ ALOGI("openOutputStream(), HAL returned sampleRate %d, format %#x, channelMask %#x,"
+ " status %d", config->sample_rate, config->format, config->channel_mask, status);
// If the data is encoded then try again using wrapped PCM.
const bool wrapperNeeded = !audio_has_proportional_frames(originalConfig.format)
@@ -96,6 +90,79 @@
return status;
}
+status_t AudioHwDevice::openInputStream(
+ AudioStreamIn **ppStreamIn,
+ audio_io_handle_t handle,
+ audio_devices_t deviceType,
+ audio_input_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ audio_source_t source,
+ audio_devices_t outputDevice,
+ const char *outputDeviceAddress) {
+
+ struct audio_config originalConfig = *config;
+ auto inputStream = new AudioStreamIn(this, flags);
+
+ // Try to open the HAL first using the current format.
+ ALOGV("openInputStream(), try sampleRate %d, format %#x, channelMask %#x", config->sample_rate,
+ config->format, config->channel_mask);
+ status_t status = inputStream->open(handle, deviceType, config, address, source, outputDevice,
+ outputDeviceAddress);
+
+ // If the input could not be opened with the requested parameters and we can handle the
+ // conversion internally, try to open again with the proposed parameters.
+ if (status == BAD_VALUE &&
+ audio_is_linear_pcm(originalConfig.format) &&
+ audio_is_linear_pcm(config->format) &&
+ (config->sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) &&
+ (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_LIMIT) &&
+ (audio_channel_count_from_in_mask(originalConfig.channel_mask) <= FCC_LIMIT)) {
+ // FIXME describe the change proposed by HAL (save old values so we can log them here)
+ ALOGV("openInputStream() reopening with proposed sampling rate and channel mask");
+ status = inputStream->open(handle, deviceType, config, address, source,
+ outputDevice, outputDeviceAddress);
+ // FIXME log this new status; HAL should not propose any further changes
+ if (status != NO_ERROR) {
+ delete inputStream;
+ inputStream = nullptr;
+ }
+ } else if (status != NO_ERROR) {
+ delete inputStream;
+ inputStream = nullptr;
+
+ // FIXME Look at any modification to the config.
+ // The HAL might modify the config to suggest a wrapped format.
+ // Log this so we can see what the HALs are doing.
+ ALOGI("openInputStream(), HAL returned sampleRate %d, format %#x, channelMask %#x,"
+ " status %d", config->sample_rate, config->format, config->channel_mask, status);
+
+ // If the data is encoded then try again using wrapped PCM.
+ const bool unwrapperNeeded = !audio_has_proportional_frames(originalConfig.format)
+ && ((flags & AUDIO_INPUT_FLAG_DIRECT) != 0);
+
+ if (unwrapperNeeded) {
+ if (SPDIFDecoder::isFormatSupported(originalConfig.format)) {
+ inputStream = new SpdifStreamIn(this, flags, originalConfig.format);
+ status = inputStream->open(handle, deviceType, &originalConfig, address, source,
+ outputDevice, outputDeviceAddress);
+ if (status != NO_ERROR) {
+ ALOGE("ERROR - openInputStream(), SPDIF open returned %d",
+ status);
+ delete inputStream;
+ inputStream = nullptr;
+ }
+ } else {
+ ALOGE("ERROR - openInputStream(), SPDIFDecoder does not support format 0x%08x",
+ originalConfig.format);
+ }
+ }
+ }
+
+ *ppStreamIn = inputStream;
+ return status;
+}
+
bool AudioHwDevice::supportsAudioPatches() const {
bool result;
return mHwDevice->supportsAudioPatches(&result) == OK ? result : false;
diff --git a/services/audioflinger/datapath/AudioHwDevice.h b/services/audioflinger/datapath/AudioHwDevice.h
index cfb6fbd..80c1473 100644
--- a/services/audioflinger/datapath/AudioHwDevice.h
+++ b/services/audioflinger/datapath/AudioHwDevice.h
@@ -1,22 +1,21 @@
/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ *
+ * Copyright 2007, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
-#ifndef ANDROID_AUDIO_HW_DEVICE_H
-#define ANDROID_AUDIO_HW_DEVICE_H
+#pragma once
#include <stdint.h>
#include <stdlib.h>
@@ -30,6 +29,7 @@
namespace android {
+class AudioStreamIn;
class AudioStreamOut;
class AudioHwDevice {
@@ -89,6 +89,17 @@
struct audio_config *config,
const char *address);
+ status_t openInputStream(
+ AudioStreamIn **ppStreamIn,
+ audio_io_handle_t handle,
+ audio_devices_t deviceType,
+ audio_input_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ audio_source_t source,
+ audio_devices_t outputDevice,
+ const char *outputDeviceAddress);
+
[[nodiscard]] bool supportsAudioPatches() const;
[[nodiscard]] status_t getAudioPort(struct audio_port_v7 *port) const;
@@ -112,5 +123,3 @@
};
} // namespace android
-
-#endif // ANDROID_AUDIO_HW_DEVICE_H
diff --git a/services/audioflinger/datapath/AudioStreamIn.cpp b/services/audioflinger/datapath/AudioStreamIn.cpp
new file mode 100644
index 0000000..24f3bb9
--- /dev/null
+++ b/services/audioflinger/datapath/AudioStreamIn.cpp
@@ -0,0 +1,137 @@
+/*
+ *
+ * Copyright 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+#include "AudioStreamIn.h"
+
+#include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
+#include <system/audio.h>
+#include <utils/Log.h>
+
+#include "AudioHwDevice.h"
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+AudioStreamIn::AudioStreamIn(AudioHwDevice *dev, audio_input_flags_t flags)
+ : audioHwDev(dev)
+ , flags(flags)
+{
+}
+
+// This must be defined here together with the HAL includes above and
+// not solely in the header.
+AudioStreamIn::~AudioStreamIn() = default;
+
+sp<DeviceHalInterface> AudioStreamIn::hwDev() const
+{
+ return audioHwDev->hwDevice();
+}
+
+status_t AudioStreamIn::getCapturePosition(int64_t* frames, int64_t* time)
+{
+ if (stream == nullptr) {
+ return NO_INIT;
+ }
+
+ int64_t halPosition = 0;
+ const status_t status = stream->getCapturePosition(&halPosition, time);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ // Adjust for standby using HAL rate frames.
+ // Only apply this correction if the HAL is getting PCM frames.
+ if (mHalFormatHasProportionalFrames) {
+ const uint64_t adjustedPosition = (halPosition <= mFramesReadAtStandby) ?
+ 0 : (halPosition - mFramesReadAtStandby);
+ // Scale from HAL sample rate to application rate.
+ *frames = adjustedPosition / mRateMultiplier;
+ } else {
+ // For compressed formats.
+ *frames = halPosition;
+ }
+
+ return status;
+}
+
+status_t AudioStreamIn::open(
+ audio_io_handle_t handle,
+ audio_devices_t deviceType,
+ struct audio_config *config,
+ const char *address,
+ audio_source_t source,
+ audio_devices_t outputDevice,
+ const char *outputDeviceAddress)
+{
+ sp<StreamInHalInterface> inStream;
+
+ int status = hwDev()->openInputStream(
+ handle,
+ deviceType,
+ config,
+ flags,
+ address,
+ source,
+ outputDevice,
+ outputDeviceAddress,
+ &inStream);
+ ALOGV("AudioStreamIn::open(), HAL returned stream %p, sampleRate %d, format %#x,"
+ " channelMask %#x, status %d", inStream.get(), config->sample_rate, config->format,
+ config->channel_mask, status);
+
+ if (status == NO_ERROR) {
+ stream = inStream;
+ mHalFormatHasProportionalFrames = audio_has_proportional_frames(config->format);
+ status = stream->getFrameSize(&mHalFrameSize);
+ LOG_ALWAYS_FATAL_IF(status != OK, "Error retrieving frame size from HAL: %d", status);
+ LOG_ALWAYS_FATAL_IF(mHalFrameSize == 0, "Error frame size was %zu but must be greater than"
+ " zero", mHalFrameSize);
+ }
+
+ return status;
+}
+
+audio_config_base_t AudioStreamIn::getAudioProperties() const
+{
+ audio_config_base_t result = AUDIO_CONFIG_BASE_INITIALIZER;
+ if (stream->getAudioProperties(&result) != OK) {
+ result.sample_rate = 0;
+ result.channel_mask = AUDIO_CHANNEL_INVALID;
+ result.format = AUDIO_FORMAT_INVALID;
+ }
+ return result;
+}
+
+status_t AudioStreamIn::standby()
+{
+ mFramesReadAtStandby = mFramesRead;
+ return stream->standby();
+}
+
+status_t AudioStreamIn::read(void* buffer, size_t bytes, size_t* read)
+{
+ const status_t result = stream->read(buffer, bytes, read);
+ if (result == OK && *read > 0 && mHalFrameSize > 0) {
+ mFramesRead += *read / mHalFrameSize;
+ }
+ return result;
+}
+
+} // namespace android
diff --git a/services/audioflinger/datapath/AudioStreamIn.h b/services/audioflinger/datapath/AudioStreamIn.h
index 604a4e4..6d1c6a7 100644
--- a/services/audioflinger/datapath/AudioStreamIn.h
+++ b/services/audioflinger/datapath/AudioStreamIn.h
@@ -31,30 +31,57 @@
virtual status_t standby() = 0;
};
-// AudioStreamIn is immutable, so its fields are const.
-// The methods must not be const to match StreamHalInterface signature.
-
-struct AudioStreamIn : public Source {
+/**
+ * Managed access to a HAL input stream.
+ */
+class AudioStreamIn : public Source {
+public:
const AudioHwDevice* const audioHwDev;
- const sp<StreamInHalInterface> stream;
+ sp<StreamInHalInterface> stream;
const audio_input_flags_t flags;
- AudioStreamIn(
- const AudioHwDevice* dev, const sp<StreamInHalInterface>& in,
- audio_input_flags_t flags)
- : audioHwDev(dev), stream(in), flags(flags) {}
+ [[nodiscard]] sp<DeviceHalInterface> hwDev() const;
- status_t read(void* buffer, size_t bytes, size_t* read) final {
- return stream->read(buffer, bytes, read);
- }
+ AudioStreamIn(AudioHwDevice *dev, audio_input_flags_t flags);
- status_t getCapturePosition(int64_t* frames, int64_t* time) final {
- return stream->getCapturePosition(frames, time);
- }
+ virtual status_t open(
+ audio_io_handle_t handle,
+ audio_devices_t deviceType,
+ struct audio_config *config,
+ const char *address,
+ audio_source_t source,
+ audio_devices_t outputDevice,
+ const char *outputDeviceAddress);
- status_t standby() final { return stream->standby(); }
+ ~AudioStreamIn() override;
- sp<DeviceHalInterface> hwDev() const { return audioHwDev->hwDevice(); }
+ status_t getCapturePosition(int64_t* frames, int64_t* time) override;
+
+ status_t read(void* buffer, size_t bytes, size_t* read) override;
+
+ /**
+ * @return frame size from the perspective of the application and the AudioFlinger.
+ */
+ [[nodiscard]] virtual size_t getFrameSize() const { return mHalFrameSize; }
+
+ /**
+ * @return audio stream configuration: channel mask, format, sample rate:
+ * - channel mask from the perspective of the application and the AudioFlinger,
+ * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI;
+ * - format from the perspective of the application and the AudioFlinger;
+ * - sample rate from the perspective of the application and the AudioFlinger,
+ * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
+ */
+ [[nodiscard]] virtual audio_config_base_t getAudioProperties() const;
+
+ status_t standby() override;
+
+protected:
+ uint64_t mFramesRead = 0;
+ int64_t mFramesReadAtStandby = 0;
+ int mRateMultiplier = 1;
+ bool mHalFormatHasProportionalFrames = false;
+ size_t mHalFrameSize = 0;
};
} // namespace android
diff --git a/services/audioflinger/datapath/AudioStreamOut.cpp b/services/audioflinger/datapath/AudioStreamOut.cpp
index 6fa82e5..1830d15 100644
--- a/services/audioflinger/datapath/AudioStreamOut.cpp
+++ b/services/audioflinger/datapath/AudioStreamOut.cpp
@@ -1,30 +1,31 @@
/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ *
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
+#include "AudioStreamOut.h"
+
#include <media/audiohal/DeviceHalInterface.h>
#include <media/audiohal/StreamHalInterface.h>
#include <system/audio.h>
#include <utils/Log.h>
#include "AudioHwDevice.h"
-#include "AudioStreamOut.h"
namespace android {
@@ -132,14 +133,9 @@
config,
address,
&outStream);
- ALOGV("AudioStreamOut::open(), HAL returned "
- " stream %p, sampleRate %d, Format %#x, "
- "channelMask %#x, status %d",
- outStream.get(),
- config->sample_rate,
- config->format,
- config->channel_mask,
- status);
+ ALOGV("AudioStreamOut::open(), HAL returned stream %p, sampleRate %d, format %#x,"
+ " channelMask %#x, status %d", outStream.get(), config->sample_rate, config->format,
+ config->channel_mask, status);
// Some HALs may not recognize AUDIO_FORMAT_IEC61937. But if we declare
// it as PCM then it will probably work.
@@ -162,7 +158,7 @@
mHalFormatHasProportionalFrames = audio_has_proportional_frames(config->format);
status = stream->getFrameSize(&mHalFrameSize);
LOG_ALWAYS_FATAL_IF(status != OK, "Error retrieving frame size from HAL: %d", status);
- LOG_ALWAYS_FATAL_IF(mHalFrameSize <= 0, "Error frame size was %zu but must be greater than"
+ LOG_ALWAYS_FATAL_IF(mHalFrameSize == 0, "Error frame size was %zu but must be greater than"
" zero", mHalFrameSize);
}
diff --git a/services/audioflinger/datapath/AudioStreamOut.h b/services/audioflinger/datapath/AudioStreamOut.h
index ce00f8c..ea41bba 100644
--- a/services/audioflinger/datapath/AudioStreamOut.h
+++ b/services/audioflinger/datapath/AudioStreamOut.h
@@ -1,27 +1,28 @@
/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ *
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
-#ifndef ANDROID_AUDIO_STREAM_OUT_H
-#define ANDROID_AUDIO_STREAM_OUT_H
+#pragma once
#include <stdint.h>
#include <sys/types.h>
#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
namespace android {
@@ -34,9 +35,6 @@
*/
class AudioStreamOut {
public:
-// AudioStreamOut is immutable, so its fields are const.
-// For emphasis, we could also make all pointers to them be "const *",
-// but that would clutter the code unnecessarily.
AudioHwDevice * const audioHwDev;
sp<StreamOutHalInterface> stream;
const audio_output_flags_t flags;
@@ -101,15 +99,13 @@
virtual void presentationComplete() { mExpectRetrograde = true; }
protected:
- uint64_t mFramesWritten = 0; // reset by flush
- uint64_t mFramesWrittenAtStandby = 0;
- uint64_t mRenderPosition = 0; // reset by flush, standby, or presentation complete
- int mRateMultiplier = 1;
- bool mHalFormatHasProportionalFrames = false;
- size_t mHalFrameSize = 0;
- bool mExpectRetrograde = false; // see presentationComplete
+ uint64_t mFramesWritten = 0; // reset by flush
+ uint64_t mFramesWrittenAtStandby = 0;
+ uint64_t mRenderPosition = 0; // reset by flush, standby, or presentation complete
+ int mRateMultiplier = 1;
+ bool mHalFormatHasProportionalFrames = false;
+ size_t mHalFrameSize = 0;
+ bool mExpectRetrograde = false; // see presentationComplete
};
} // namespace android
-
-#endif // ANDROID_AUDIO_STREAM_OUT_H
diff --git a/services/audioflinger/datapath/SpdifStreamIn.cpp b/services/audioflinger/datapath/SpdifStreamIn.cpp
new file mode 100644
index 0000000..98ce712
--- /dev/null
+++ b/services/audioflinger/datapath/SpdifStreamIn.cpp
@@ -0,0 +1,128 @@
+/*
+ *
+ * Copyright 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+#include "Configuration.h"
+#include <system/audio.h>
+#include <utils/Log.h>
+
+#include <audio_utils/spdif/SPDIFDecoder.h>
+
+#include "AudioHwDevice.h"
+#include "SpdifStreamIn.h"
+
+namespace android {
+
+/**
+ * If the HAL is generating IEC61937 data and AudioFlinger expects elementary stream then we need to
+ * extract the data using an SPDIF decoder.
+ */
+SpdifStreamIn::SpdifStreamIn(AudioHwDevice *dev,
+ audio_input_flags_t flags,
+ audio_format_t format)
+ : AudioStreamIn(dev, flags)
+ , mSpdifDecoder(this, format)
+{
+}
+
+status_t SpdifStreamIn::open(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ const char *address,
+ audio_source_t source,
+ audio_devices_t outputDevice,
+ const char* outputDeviceAddress)
+{
+ struct audio_config customConfig = *config;
+
+ mApplicationConfig.format = config->format;
+ mApplicationConfig.sample_rate = config->sample_rate;
+ mApplicationConfig.channel_mask = config->channel_mask;
+
+ mRateMultiplier = spdif_rate_multiplier(config->format);
+ if (mRateMultiplier <= 0) {
+ ALOGE("ERROR SpdifStreamIn::open() unrecognized format 0x%08X\n", config->format);
+ return BAD_VALUE;
+ }
+ customConfig.sample_rate = config->sample_rate * mRateMultiplier;
+ customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ customConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
+
+ // Always print this because otherwise it could be very confusing if the
+ // HAL and AudioFlinger are using different formats.
+ // Print before open() because HAL may modify customConfig.
+ ALOGI("SpdifStreamIn::open() AudioFlinger requested sampleRate %d, format %#x, channelMask %#x",
+ config->sample_rate, config->format, config->channel_mask);
+ ALOGI("SpdifStreamIn::open() HAL configured for sampleRate %d, format %#x, channelMask %#x",
+ customConfig.sample_rate, customConfig.format, customConfig.channel_mask);
+
+ const status_t status = AudioStreamIn::open(
+ handle,
+ devices,
+ &customConfig,
+ address,
+ source,
+ outputDevice,
+ outputDeviceAddress);
+
+ ALOGI("SpdifStreamIn::open() status = %d", status);
+
+#ifdef TEE_SINK
+ if (status == OK) {
+ // Don't use PCM 16-bit format to avoid WAV encoding IEC61937 data.
+ mTee.set(customConfig.sample_rate,
+ audio_channel_count_from_in_mask(customConfig.channel_mask),
+ AUDIO_FORMAT_IEC61937, NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
+ mTee.setId(std::string("_") + std::to_string(handle) + "_C");
+ }
+#endif
+
+ return status;
+}
+
+int SpdifStreamIn::standby()
+{
+ mSpdifDecoder.reset();
+ return AudioStreamIn::standby();
+}
+
+status_t SpdifStreamIn::readDataBurst(void* buffer, size_t bytes, size_t* read)
+{
+ status_t status = AudioStreamIn::read(buffer, bytes, read);
+
+#ifdef TEE_SINK
+ if (*read > 0) {
+ mTee.write(reinterpret_cast<const char *>(buffer), *read / AudioStreamIn::getFrameSize());
+ }
+#endif
+ return status;
+}
+
+status_t SpdifStreamIn::read(void* buffer, size_t numBytes, size_t* read)
+{
+ // Read from SPDIF extractor. It will call back to readDataBurst().
+ const auto bytesRead = mSpdifDecoder.read(buffer, numBytes);
+ if (bytesRead >= 0) {
+ *read = bytesRead;
+ return OK;
+ }
+ return NOT_ENOUGH_DATA;
+}
+
+} // namespace android
diff --git a/services/audioflinger/datapath/SpdifStreamIn.h b/services/audioflinger/datapath/SpdifStreamIn.h
new file mode 100644
index 0000000..78832ee
--- /dev/null
+++ b/services/audioflinger/datapath/SpdifStreamIn.h
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+
+#include "AudioStreamIn.h"
+
+#include <audio_utils/spdif/SPDIFDecoder.h>
+#include <afutils/NBAIO_Tee.h>
+
+namespace android {
+
+/**
+ * Stream that is a PCM data burst in the HAL but looks like an encoded stream
+ * to the AudioFlinger. Wraps encoded data in an SPDIF wrapper per IEC61973-3.
+ */
+class SpdifStreamIn : public AudioStreamIn {
+public:
+
+ SpdifStreamIn(AudioHwDevice *dev, audio_input_flags_t flags,
+ audio_format_t format);
+
+ status_t open(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ const char *address,
+ audio_source_t source,
+ audio_devices_t outputDevice,
+ const char* outputDeviceAddress) override;
+
+ /**
+ * Read audio buffer from driver. If at least one frame was read successfully prior to the error,
+ * it is suggested that the driver return that successful (short) byte count
+ * and then return an error in the subsequent call.
+ *
+ * If set_callback() has previously been called to enable non-blocking mode
+ * the write() is not allowed to block. It must write only the number of
+ * bytes that currently fit in the driver/hardware buffer and then return
+ * this byte count. If this is less than the requested write size the
+ * callback function must be called when more space is available in the
+ * driver/hardware buffer.
+ */
+ status_t read(void* buffer, size_t bytes, size_t* read) override;
+
+ /**
+ * @return frame size from the perspective of the application and the AudioFlinger.
+ */
+ [[nodiscard]] size_t getFrameSize() const override { return sizeof(int8_t); }
+
+ /**
+ * @return audio_config_base_t from the perspective of the application and the AudioFlinger.
+ */
+ [[nodiscard]] audio_config_base_t getAudioProperties() const override {
+ return mApplicationConfig;
+ }
+
+ /**
+ * @return format from the perspective of the application and the AudioFlinger.
+ */
+ [[nodiscard]] virtual audio_format_t getFormat() const { return mApplicationConfig.format; }
+
+ /**
+ * The HAL may be running at a higher sample rate if, for example, reading wrapped EAC3.
+ * @return sample rate from the perspective of the application and the AudioFlinger.
+ */
+ [[nodiscard]] virtual uint32_t getSampleRate() const { return mApplicationConfig.sample_rate; }
+
+ /**
+ * The HAL is in stereo mode when reading multi-channel compressed audio.
+ * @return channel mask from the perspective of the application and the AudioFlinger.
+ */
+ [[nodiscard]] virtual audio_channel_mask_t getChannelMask() const {
+ return mApplicationConfig.channel_mask;
+ }
+
+ status_t standby() override;
+
+private:
+
+ class MySPDIFDecoder : public SPDIFDecoder
+ {
+ public:
+ MySPDIFDecoder(SpdifStreamIn *spdifStreamIn, audio_format_t format)
+ : SPDIFDecoder(format)
+ , mSpdifStreamIn(spdifStreamIn)
+ {
+ }
+
+ ssize_t readInput(void* buffer, size_t bytes) override
+ {
+ size_t bytesRead = 0;
+ const auto result = mSpdifStreamIn->readDataBurst(buffer, bytes, &bytesRead);
+ if (result < 0) {
+ return result;
+ }
+ return bytesRead;
+ }
+
+ protected:
+ SpdifStreamIn * const mSpdifStreamIn;
+ };
+
+ MySPDIFDecoder mSpdifDecoder;
+ audio_config_base_t mApplicationConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+
+ status_t readDataBurst(void* data, size_t bytes, size_t* read);
+
+#ifdef TEE_SINK
+ NBAIO_Tee mTee;
+#endif
+
+};
+
+} // namespace android
diff --git a/services/audioflinger/datapath/SpdifStreamOut.cpp b/services/audioflinger/datapath/SpdifStreamOut.cpp
index 0c6a5a1..65a4eec 100644
--- a/services/audioflinger/datapath/SpdifStreamOut.cpp
+++ b/services/audioflinger/datapath/SpdifStreamOut.cpp
@@ -1,19 +1,19 @@
/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ *
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
@@ -42,10 +42,10 @@
}
status_t SpdifStreamOut::open(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- const char *address)
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ const char *address)
{
struct audio_config customConfig = *config;
@@ -53,22 +53,10 @@
mApplicationConfig.sample_rate = config->sample_rate;
mApplicationConfig.channel_mask = config->channel_mask;
- // Some data bursts run at a higher sample rate.
- // TODO Move this into the audio_utils as a static method.
- switch(config->format) {
- case AUDIO_FORMAT_E_AC3:
- case AUDIO_FORMAT_E_AC3_JOC:
- mRateMultiplier = 4;
- break;
- case AUDIO_FORMAT_AC3:
- case AUDIO_FORMAT_DTS:
- case AUDIO_FORMAT_DTS_HD:
- mRateMultiplier = 1;
- break;
- default:
- ALOGE("ERROR SpdifStreamOut::open() unrecognized format 0x%08X\n",
- config->format);
- return BAD_VALUE;
+ mRateMultiplier = spdif_rate_multiplier(config->format);
+ if (mRateMultiplier <= 0) {
+ ALOGE("ERROR SpdifStreamOut::open() unrecognized format 0x%08X\n", config->format);
+ return BAD_VALUE;
}
customConfig.sample_rate = config->sample_rate * mRateMultiplier;
@@ -78,16 +66,10 @@
// Always print this because otherwise it could be very confusing if the
// HAL and AudioFlinger are using different formats.
// Print before open() because HAL may modify customConfig.
- ALOGI("SpdifStreamOut::open() AudioFlinger requested"
- " sampleRate %d, format %#x, channelMask %#x",
- config->sample_rate,
- config->format,
- config->channel_mask);
- ALOGI("SpdifStreamOut::open() HAL configured for"
- " sampleRate %d, format %#x, channelMask %#x",
- customConfig.sample_rate,
- customConfig.format,
- customConfig.channel_mask);
+ ALOGI("SpdifStreamOut::open() AudioFlinger requested sampleRate %d, format %#x,"
+ " channelMask %#x", config->sample_rate, config->format, config->channel_mask);
+ ALOGI("SpdifStreamOut::open() HAL configured for sampleRate %d, format %#x, channelMask %#x",
+ customConfig.sample_rate, customConfig.format, customConfig.channel_mask);
const status_t status = AudioStreamOut::open(
handle,
diff --git a/services/audioflinger/datapath/SpdifStreamOut.h b/services/audioflinger/datapath/SpdifStreamOut.h
index 56d57f6..c6d27ba 100644
--- a/services/audioflinger/datapath/SpdifStreamOut.h
+++ b/services/audioflinger/datapath/SpdifStreamOut.h
@@ -1,22 +1,21 @@
/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
+ *
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
-#ifndef ANDROID_SPDIF_STREAM_OUT_H
-#define ANDROID_SPDIF_STREAM_OUT_H
+#pragma once
#include <stdint.h>
#include <sys/types.h>
@@ -40,8 +39,6 @@
SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags,
audio_format_t format);
- ~SpdifStreamOut() override = default;
-
status_t open(
audio_io_handle_t handle,
audio_devices_t devices,
@@ -116,10 +113,10 @@
SpdifStreamOut * const mSpdifStreamOut;
};
- MySPDIFEncoder mSpdifEncoder;
- audio_config_base_t mApplicationConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ MySPDIFEncoder mSpdifEncoder;
+ audio_config_base_t mApplicationConfig = AUDIO_CONFIG_BASE_INITIALIZER;
- ssize_t writeDataBurst(const void* data, size_t bytes);
+ ssize_t writeDataBurst(const void* data, size_t bytes);
#ifdef TEE_SINK
NBAIO_Tee mTee;
@@ -128,5 +125,3 @@
};
} // namespace android
-
-#endif // ANDROID_SPDIF_STREAM_OUT_H
diff --git a/services/audioflinger/datapath/ThreadMetrics.h b/services/audioflinger/datapath/ThreadMetrics.h
index c643a57..4eb8aa0 100644
--- a/services/audioflinger/datapath/ThreadMetrics.h
+++ b/services/audioflinger/datapath/ThreadMetrics.h
@@ -14,8 +14,7 @@
* limitations under the License.
*/
-#ifndef ANDROID_AUDIO_THREADMETRICS_H
-#define ANDROID_AUDIO_THREADMETRICS_H
+#pragma once
#include <media/MediaMetricsItem.h>
@@ -210,5 +209,3 @@
};
} // namespace android
-
-#endif // ANDROID_AUDIO_THREADMETRICS_H
diff --git a/services/audioflinger/datapath/TrackMetrics.h b/services/audioflinger/datapath/TrackMetrics.h
index 2b44acb..ad5d3db 100644
--- a/services/audioflinger/datapath/TrackMetrics.h
+++ b/services/audioflinger/datapath/TrackMetrics.h
@@ -14,8 +14,7 @@
* limitations under the License.
*/
-#ifndef ANDROID_AUDIO_TRACKMETRICS_H
-#define ANDROID_AUDIO_TRACKMETRICS_H
+#pragma once
#include <binder/IActivityManager.h>
#include <binder/IPCThreadState.h>
@@ -306,5 +305,3 @@
};
} // namespace android
-
-#endif // ANDROID_AUDIO_TRACKMETRICS_H
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index ab3d323..58e5311 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -4401,8 +4401,8 @@
if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
continue;
}
- if ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
- != AUDIO_OUTPUT_FLAG_NONE) {
+ if (offloadPossible && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+ != AUDIO_OUTPUT_FLAG_NONE)) {
if ((directMode & AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED)
!= AUDIO_DIRECT_NOT_SUPPORTED) {
// Already reports offload gapless supported. No need to report offload support.
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 7584632..6de71a3 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -164,6 +164,8 @@
status_t status = af->openInput(request, &response);
if (status == OK) {
*input = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_module_handle_t(response.input));
+ *config = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(response.config, true /*isInput*/));
}
return status;
}