audio: Add microphone settings to IModule, IStreamIn
Add 'MicrophoneInfo' and 'MicrophoneDynamicInfo' parcelables.
Add IModule.getMicrophones method.
Add following methods to IStreamIn:
- getActiveMicrophonesIds;
- get/setMicrophoneDirection;
- get/setMicrophoneFieldDimension.
Provide trivial implementations and VTS.
Also slightly refactor port retrieval from ModuleConfig
to unify common queries.
Bug: 205884982
Test: atest VtsHalAudioCoreTargetTest
Change-Id: I472c7733e2a331a67cea613cd9218889eff06a43
diff --git a/audio/aidl/default/Stream.cpp b/audio/aidl/default/Stream.cpp
index d7c352f..be5887c 100644
--- a/audio/aidl/default/Stream.cpp
+++ b/audio/aidl/default/Stream.cpp
@@ -18,12 +18,17 @@
#include <android-base/logging.h>
#include <utils/SystemClock.h>
+#include <Utils.h>
+
#include "core-impl/Module.h"
#include "core-impl/Stream.h"
using aidl::android::hardware::audio::common::SinkMetadata;
using aidl::android::hardware::audio::common::SourceMetadata;
+using aidl::android::media::audio::common::AudioDevice;
using aidl::android::media::audio::common::AudioOffloadInfo;
+using android::hardware::audio::common::getChannelCount;
+using android::hardware::audio::common::getFrameSizeInBytes;
namespace aidl::android::hardware::audio::core {
@@ -35,13 +40,17 @@
desc->reply = mReplyMQ->dupeDesc();
}
if (mDataMQ) {
- desc->frameSizeBytes = mFrameSize;
- desc->bufferSizeFrames =
- mDataMQ->getQuantumCount() * mDataMQ->getQuantumSize() / mFrameSize;
+ const size_t frameSize = getFrameSize();
+ desc->frameSizeBytes = frameSize;
+ desc->bufferSizeFrames = mDataMQ->getQuantumCount() * mDataMQ->getQuantumSize() / frameSize;
desc->audio.set<StreamDescriptor::AudioBuffer::Tag::fmq>(mDataMQ->dupeDesc());
}
}
+size_t StreamContext::getFrameSize() const {
+ return getFrameSizeInBytes(mFormat, mChannelLayout);
+}
+
bool StreamContext::isValid() const {
if (mCommandMQ && !mCommandMQ->isValid()) {
LOG(ERROR) << "command FMQ is invalid";
@@ -51,8 +60,8 @@
LOG(ERROR) << "reply FMQ is invalid";
return false;
}
- if (mFrameSize == 0) {
- LOG(ERROR) << "frame size is not set";
+ if (getFrameSize() == 0) {
+ LOG(ERROR) << "frame size is invalid";
return false;
}
if (mDataMQ && !mDataMQ->isValid()) {
@@ -530,11 +539,64 @@
return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_STATE);
}
-StreamIn::StreamIn(const SinkMetadata& sinkMetadata, StreamContext context)
- : StreamCommon<SinkMetadata, StreamInWorker>(sinkMetadata, std::move(context)) {
+namespace {
+static std::map<AudioDevice, std::string> transformMicrophones(
+ const std::vector<MicrophoneInfo>& microphones) {
+ std::map<AudioDevice, std::string> result;
+ std::transform(microphones.begin(), microphones.end(), std::inserter(result, result.begin()),
+ [](const auto& mic) { return std::make_pair(mic.device, mic.id); });
+ return result;
+}
+} // namespace
+
+StreamIn::StreamIn(const SinkMetadata& sinkMetadata, StreamContext context,
+ const std::vector<MicrophoneInfo>& microphones)
+ : StreamCommon<SinkMetadata, StreamInWorker>(sinkMetadata, std::move(context)),
+ mMicrophones(transformMicrophones(microphones)) {
LOG(DEBUG) << __func__;
}
+ndk::ScopedAStatus StreamIn::getActiveMicrophones(
+ std::vector<MicrophoneDynamicInfo>* _aidl_return) {
+ std::vector<MicrophoneDynamicInfo> result;
+ std::vector<MicrophoneDynamicInfo::ChannelMapping> channelMapping{
+ getChannelCount(mContext.getChannelLayout()),
+ MicrophoneDynamicInfo::ChannelMapping::DIRECT};
+ for (auto it = mConnectedDevices.begin(); it != mConnectedDevices.end(); ++it) {
+ if (auto micIt = mMicrophones.find(*it); micIt != mMicrophones.end()) {
+ MicrophoneDynamicInfo dynMic;
+ dynMic.id = micIt->second;
+ dynMic.channelMapping = channelMapping;
+ result.push_back(std::move(dynMic));
+ }
+ }
+ *_aidl_return = std::move(result);
+ LOG(DEBUG) << __func__ << ": returning " << ::android::internal::ToString(*_aidl_return);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus StreamIn::getMicrophoneDirection(MicrophoneDirection* _aidl_return) {
+ LOG(DEBUG) << __func__;
+ (void)_aidl_return;
+ return ndk::ScopedAStatus::fromExceptionCode(EX_UNSUPPORTED_OPERATION);
+}
+
+ndk::ScopedAStatus StreamIn::setMicrophoneDirection(MicrophoneDirection in_direction) {
+ LOG(DEBUG) << __func__ << ": direction " << toString(in_direction);
+ return ndk::ScopedAStatus::fromExceptionCode(EX_UNSUPPORTED_OPERATION);
+}
+
+ndk::ScopedAStatus StreamIn::getMicrophoneFieldDimension(float* _aidl_return) {
+ LOG(DEBUG) << __func__;
+ (void)_aidl_return;
+ return ndk::ScopedAStatus::fromExceptionCode(EX_UNSUPPORTED_OPERATION);
+}
+
+ndk::ScopedAStatus StreamIn::setMicrophoneFieldDimension(float in_zoom) {
+ LOG(DEBUG) << __func__ << ": zoom " << in_zoom;
+ return ndk::ScopedAStatus::fromExceptionCode(EX_UNSUPPORTED_OPERATION);
+}
+
StreamOut::StreamOut(const SourceMetadata& sourceMetadata, StreamContext context,
const std::optional<AudioOffloadInfo>& offloadInfo)
: StreamCommon<SourceMetadata, StreamOutWorker>(sourceMetadata, std::move(context)),