Merge "camera: Add crop and metering region correction for max resolution requests." into sc-dev am: 2a7630de12 am: a4cead956d
Original change: https://googleplex-android-review.googlesource.com/c/platform/frameworks/av/+/15301357
Change-Id: I397a9aab9a3b12fb4d72b855bc7956358aba2755
diff --git a/apex/OWNERS b/apex/OWNERS
index 5587f5f..54802d4 100644
--- a/apex/OWNERS
+++ b/apex/OWNERS
@@ -1,6 +1,7 @@
-chz@google.com
-dwkang@google.com
+essick@google.com
jiyong@google.com
lajos@google.com
-marcone@google.com
-wjia@google.com
+nchalko@google.com
+
+include platform/packages/modules/common:/MODULES_OWNERS
+
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index e6e3473..cd8fe97 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -1067,7 +1067,7 @@
std::optional<PhysicalDisplayId> displayId = SurfaceComposerClient::getInternalDisplayId();
if (!displayId) {
- fprintf(stderr, "Failed to get token for internal display\n");
+ fprintf(stderr, "Failed to get ID for internal display\n");
return 1;
}
@@ -1168,17 +1168,14 @@
}
break;
case 'd':
- gPhysicalDisplayId = PhysicalDisplayId(atoll(optarg));
- if (gPhysicalDisplayId.value == 0) {
- fprintf(stderr, "Please specify a valid physical display id\n");
- return 2;
- } else if (SurfaceComposerClient::
- getPhysicalDisplayToken(gPhysicalDisplayId) == nullptr) {
- fprintf(stderr, "Invalid physical display id: %s\n",
- to_string(gPhysicalDisplayId).c_str());
- return 2;
+ if (const auto id = android::DisplayId::fromValue<PhysicalDisplayId>(atoll(optarg));
+ id && SurfaceComposerClient::getPhysicalDisplayToken(*id)) {
+ gPhysicalDisplayId = *id;
+ break;
}
- break;
+
+ fprintf(stderr, "Invalid physical display ID\n");
+ return 2;
default:
if (ic != '?') {
fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index 7bd1568..87becb6 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -11,6 +11,9 @@
},
{
"include-filter": "android.media.cts.MediaDrmMetricsTest"
+ },
+ {
+ "include-filter": "android.media.cts.NativeMediaDrmClearkeyTest"
}
]
}
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
index 6ac3510..089eb1c 100644
--- a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -207,6 +207,7 @@
}
infoMap.clear();
+ android::Mutex::Autolock lock(mPlayPolicyLock);
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
}
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
index aa9b59d..95f15ca 100644
--- a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -262,7 +262,7 @@
void initProperties();
void setPlayPolicy();
- android::Mutex mPlayPolicyLock;
+ mutable android::Mutex mPlayPolicyLock;
android::KeyedVector<String8, String8> mPlayPolicy;
android::KeyedVector<String8, String8> mStringProperties;
android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index bc7c3f2..0cd9375 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -221,7 +221,6 @@
if (requestString.find(kOfflineLicense) != std::string::npos) {
std::string emptyResponse;
std::string keySetIdString(keySetId.begin(), keySetId.end());
- Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.StoreLicense(keySetIdString,
DeviceFiles::kLicenseStateReleasing,
emptyResponse)) {
@@ -337,7 +336,6 @@
}
*keySetId = kKeySetIdPrefix + ByteArrayToHexString(
reinterpret_cast<const uint8_t*>(randomData.data()), randomData.size());
- Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.LicenseExists(*keySetId)) {
// collision, regenerate
ALOGV("Retry generating KeySetId");
@@ -395,7 +393,6 @@
if (status == Status::OK) {
if (isOfflineLicense) {
if (isRelease) {
- Mutex::Autolock lock(mFileHandleLock);
mFileHandle.DeleteLicense(keySetId);
mSessionLibrary->destroySession(session);
} else {
@@ -404,7 +401,6 @@
return Void();
}
- Mutex::Autolock lock(mFileHandleLock);
bool ok = mFileHandle.StoreLicense(
keySetId,
DeviceFiles::kLicenseStateActive,
@@ -459,7 +455,6 @@
DeviceFiles::LicenseState licenseState;
std::string offlineLicense;
Status status = Status::OK;
- Mutex::Autolock lock(mFileHandleLock);
if (!mFileHandle.RetrieveLicense(std::string(keySetId.begin(), keySetId.end()),
&licenseState, &offlineLicense)) {
ALOGE("Failed to restore offline license");
@@ -582,7 +577,6 @@
Return<void> DrmPlugin::queryKeyStatus(
const hidl_vec<uint8_t>& sessionId,
queryKeyStatus_cb _hidl_cb) {
-
if (sessionId.size() == 0) {
// Returns empty key status KeyValue pair
_hidl_cb(Status::BAD_VALUE, hidl_vec<KeyValue>());
@@ -592,12 +586,14 @@
std::vector<KeyValue> infoMapVec;
infoMapVec.clear();
+ mPlayPolicyLock.lock();
KeyValue keyValuePair;
for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
keyValuePair.key = mPlayPolicy[i].key;
keyValuePair.value = mPlayPolicy[i].value;
infoMapVec.push_back(keyValuePair);
}
+ mPlayPolicyLock.unlock();
_hidl_cb(Status::OK, toHidlVec(infoMapVec));
return Void();
}
@@ -768,8 +764,6 @@
}
Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
- Mutex::Autolock lock(mFileHandleLock);
-
std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
std::vector<KeySetId> keySetIds;
if (mMockError != Status_V1_2::OK) {
@@ -790,7 +784,6 @@
return toStatus_1_0(mMockError);
}
std::string licenseName(keySetId.begin(), keySetId.end());
- Mutex::Autolock lock(mFileHandleLock);
if (mFileHandle.DeleteLicense(licenseName)) {
return Status::OK;
}
@@ -799,8 +792,6 @@
Return<void> DrmPlugin::getOfflineLicenseState(const KeySetId& keySetId,
getOfflineLicenseState_cb _hidl_cb) {
- Mutex::Autolock lock(mFileHandleLock);
-
std::string licenseName(keySetId.begin(), keySetId.end());
DeviceFiles::LicenseState state;
std::string license;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index e61db3f..56910be 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -24,13 +24,11 @@
}
bool MemoryFileSystem::FileExists(const std::string& fileName) const {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
return result != mMemoryFileSystem.end();
}
ssize_t MemoryFileSystem::GetFileSize(const std::string& fileName) const {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
return static_cast<ssize_t>(result->second.getFileSize());
@@ -42,7 +40,6 @@
std::vector<std::string> MemoryFileSystem::ListFiles() const {
std::vector<std::string> list;
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
for (const auto& filename : mMemoryFileSystem) {
list.push_back(filename.first);
}
@@ -51,7 +48,6 @@
size_t MemoryFileSystem::Read(const std::string& path, std::string* buffer) {
std::string key = GetFileName(path);
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
std::string serializedHashFile = result->second.getContent();
@@ -65,7 +61,6 @@
size_t MemoryFileSystem::Write(const std::string& path, const MemoryFile& memoryFile) {
std::string key = GetFileName(path);
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(key);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(key);
@@ -75,7 +70,6 @@
}
bool MemoryFileSystem::RemoveFile(const std::string& fileName) {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
auto result = mMemoryFileSystem.find(fileName);
if (result != mMemoryFileSystem.end()) {
mMemoryFileSystem.erase(result);
@@ -87,7 +81,6 @@
}
bool MemoryFileSystem::RemoveAllFiles() {
- std::lock_guard<std::mutex> lock(mMemoryFileSystemLock);
mMemoryFileSystem.clear();
return mMemoryFileSystem.empty();
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index 5d6e3da..cb5c9fe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -432,8 +432,7 @@
mMockError = Status_V1_2::OK;
}
- DeviceFiles mFileHandle GUARDED_BY(mFileHandleLock);
- Mutex mFileHandleLock;
+ DeviceFiles mFileHandle;
Mutex mSecureStopLock;
CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
index a90d818..1d98860 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
@@ -5,9 +5,7 @@
#ifndef CLEARKEY_MEMORY_FILE_SYSTEM_H_
#define CLEARKEY_MEMORY_FILE_SYSTEM_H_
-#include <android-base/thread_annotations.h>
#include <map>
-#include <mutex>
#include <string>
#include "ClearKeyTypes.h"
@@ -51,12 +49,10 @@
size_t Write(const std::string& pathName, const MemoryFile& memoryFile);
private:
- mutable std::mutex mMemoryFileSystemLock;
-
// License file name is made up of a unique keySetId, therefore,
// the filename can be used as the key to locate licenses in the
// memory file system.
- std::map<std::string, MemoryFile> mMemoryFileSystem GUARDED_BY(mMemoryFileSystemLock);
+ std::map<std::string, MemoryFile> mMemoryFileSystem;
std::string GetFileName(const std::string& path);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 4bc1777..b7a5686 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -245,6 +245,19 @@
})
.withSetter(CodedColorAspectsSetter, mColorAspects)
.build());
+
+ addParameter(
+ DefineParam(mPictureQuantization, C2_PARAMKEY_PICTURE_QUANTIZATION)
+ .withDefault(C2StreamPictureQuantizationTuning::output::AllocShared(
+ 0 /* flexCount */, 0u /* stream */))
+ .withFields({C2F(mPictureQuantization, m.values[0].type_).oneOf(
+ {C2Config::picture_type_t(I_FRAME),
+ C2Config::picture_type_t(P_FRAME),
+ C2Config::picture_type_t(B_FRAME)}),
+ C2F(mPictureQuantization, m.values[0].min).any(),
+ C2F(mPictureQuantization, m.values[0].max).any()})
+ .withSetter(PictureQuantizationSetter)
+ .build());
}
static C2R InputDelaySetter(
@@ -464,9 +477,69 @@
me.set().matrix = coded.v.matrix;
return C2R::Ok();
}
+ static C2R PictureQuantizationSetter(bool mayBlock,
+ C2P<C2StreamPictureQuantizationTuning::output> &me) {
+ (void)mayBlock;
+
+ // these are the ones we're going to set, so want them to default
+ // to the DEFAULT values for the codec
+ int32_t iMin = HEVC_QP_MIN, pMin = HEVC_QP_MIN, bMin = HEVC_QP_MIN;
+ int32_t iMax = HEVC_QP_MAX, pMax = HEVC_QP_MAX, bMax = HEVC_QP_MAX;
+
+ for (size_t i = 0; i < me.v.flexCount(); ++i) {
+ const C2PictureQuantizationStruct &layer = me.v.m.values[i];
+
+ // layerMin is clamped to [HEVC_QP_MIN, layerMax] to avoid error
+ // cases where layer.min > layer.max
+ int32_t layerMax = std::clamp(layer.max, HEVC_QP_MIN, HEVC_QP_MAX);
+ int32_t layerMin = std::clamp(layer.min, HEVC_QP_MIN, layerMax);
+ if (layer.type_ == C2Config::picture_type_t(I_FRAME)) {
+ iMax = layerMax;
+ iMin = layerMin;
+ ALOGV("iMin %d iMax %d", iMin, iMax);
+ } else if (layer.type_ == C2Config::picture_type_t(P_FRAME)) {
+ pMax = layerMax;
+ pMin = layerMin;
+ ALOGV("pMin %d pMax %d", pMin, pMax);
+ } else if (layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+ bMax = layerMax;
+ bMin = layerMin;
+ ALOGV("bMin %d bMax %d", bMin, bMax);
+ }
+ }
+
+ ALOGV("PictureQuantizationSetter(entry): i %d-%d p %d-%d b %d-%d",
+ iMin, iMax, pMin, pMax, bMin, bMax);
+
+ int32_t maxFrameQP = std::min(std::min(iMax, pMax), bMax);
+ int32_t minFrameQP = std::max(std::max(iMin, pMin), bMin);
+ if (minFrameQP > maxFrameQP) {
+ minFrameQP = maxFrameQP;
+ }
+
+ // put them back into the structure
+ for (size_t i = 0; i < me.v.flexCount(); ++i) {
+ const C2PictureQuantizationStruct &layer = me.v.m.values[i];
+
+ if (layer.type_ == C2Config::picture_type_t(I_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(P_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+ me.set().m.values[i].max = maxFrameQP;
+ me.set().m.values[i].min = minFrameQP;
+ }
+ }
+
+ ALOGV("PictureQuantizationSetter(exit): i = p = b = %d-%d",
+ minFrameQP, maxFrameQP);
+
+ return C2R::Ok();
+ }
std::shared_ptr<C2StreamColorAspectsInfo::output> getCodedColorAspects_l() {
return mCodedColorAspects;
}
+ std::shared_ptr<C2StreamPictureQuantizationTuning::output> getPictureQuantization_l() const {
+ return mPictureQuantization;
+ }
private:
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
@@ -482,6 +555,7 @@
std::shared_ptr<C2StreamGopTuning::output> mGop;
std::shared_ptr<C2StreamColorAspectsInfo::input> mColorAspects;
std::shared_ptr<C2StreamColorAspectsInfo::output> mCodedColorAspects;
+ std::shared_ptr<C2StreamPictureQuantizationTuning::output> mPictureQuantization;
};
static size_t GetCPUCoreCount() {
@@ -654,12 +728,41 @@
mEncParams.s_coding_tools_prms.i4_max_temporal_layers = 3;
}
- switch (mBitrateMode->value) {
- case C2Config::BITRATE_IGNORE:
- mEncParams.s_config_prms.i4_rate_control_mode = 3;
- mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
- getQpFromQuality(mQuality->value);
+ // we resolved out-of-bound and unspecified values in PictureQuantizationSetter()
+ // so we can start with defaults that are overridden as needed.
+ int32_t maxFrameQP = mEncParams.s_config_prms.i4_max_frame_qp;
+ int32_t minFrameQP = mEncParams.s_config_prms.i4_min_frame_qp;
+
+ for (size_t i = 0; i < mQpBounds->flexCount(); ++i) {
+ const C2PictureQuantizationStruct &layer = mQpBounds->m.values[i];
+
+ // no need to loop, hevc library takes same range for I/P/B picture type
+ if (layer.type_ == C2Config::picture_type_t(I_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(P_FRAME) ||
+ layer.type_ == C2Config::picture_type_t(B_FRAME)) {
+
+ maxFrameQP = layer.max;
+ minFrameQP = layer.min;
break;
+ }
+ }
+ mEncParams.s_config_prms.i4_max_frame_qp = maxFrameQP;
+ mEncParams.s_config_prms.i4_min_frame_qp = minFrameQP;
+
+ ALOGV("MaxFrameQp: %d MinFrameQp: %d", maxFrameQP, minFrameQP);
+
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
+ std::clamp(kDefaultInitQP, minFrameQP, maxFrameQP);
+
+ switch (mBitrateMode->value) {
+ case C2Config::BITRATE_IGNORE: {
+ mEncParams.s_config_prms.i4_rate_control_mode = 3;
+ // ensure initial qp values are within our newly configured bounds
+ int32_t frameQp = getQpFromQuality(mQuality->value);
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
+ std::clamp(frameQp, minFrameQP, maxFrameQP);
+ break;
+ }
case C2Config::BITRATE_CONST:
mEncParams.s_config_prms.i4_rate_control_mode = 5;
break;
@@ -723,6 +826,7 @@
mGop = mIntf->getGop_l();
mRequestSync = mIntf->getRequestSync_l();
mColorAspects = mIntf->getCodedColorAspects_l();
+ mQpBounds = mIntf->getPictureQuantization_l();;
}
c2_status_t status = initEncParams();
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
index 9dbf682..4217a8b 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.h
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -42,6 +42,11 @@
#define DEFAULT_B_FRAMES 0
#define DEFAULT_RC_LOOKAHEAD 0
+#define HEVC_QP_MIN 1
+#define HEVC_QP_MAX 51
+
+constexpr int32_t kDefaultInitQP = 32;
+
struct C2SoftHevcEnc : public SimpleC2Component {
class IntfImpl;
@@ -90,6 +95,7 @@
std::shared_ptr<C2StreamGopTuning::output> mGop;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
+ std::shared_ptr<C2StreamPictureQuantizationTuning::output> mQpBounds;
#ifdef FILE_DUMP_ENABLE
char mInFile[200];
char mOutFile[200];
diff --git a/media/codec2/fuzzer/C2Fuzzer.cpp b/media/codec2/fuzzer/C2Fuzzer.cpp
index 51e1013..e35ee48 100644
--- a/media/codec2/fuzzer/C2Fuzzer.cpp
+++ b/media/codec2/fuzzer/C2Fuzzer.cpp
@@ -194,12 +194,12 @@
}
std::vector<C2Param*> configParams;
+ C2StreamPictureSizeInfo::input inputSize(0u, kWidthOfVideo, kHeightOfVideo);
+ C2StreamSampleRateInfo::output sampleRateInfo(0u, kSamplingRateOfAudio);
+ C2StreamChannelCountInfo::output channelCountInfo(0u, kChannelsOfAudio);
if (domain.value == DOMAIN_VIDEO) {
- C2StreamPictureSizeInfo::input inputSize(0u, kWidthOfVideo, kHeightOfVideo);
configParams.push_back(&inputSize);
} else if (domain.value == DOMAIN_AUDIO) {
- C2StreamSampleRateInfo::output sampleRateInfo(0u, kSamplingRateOfAudio);
- C2StreamChannelCountInfo::output channelCountInfo(0u, kChannelsOfAudio);
configParams.push_back(&sampleRateInfo);
configParams.push_back(&channelCountInfo);
}
diff --git a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
index 234faef..b295258 100644
--- a/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
+++ b/media/codecs/m4v_h263/enc/src/mp4enc_api.cpp
@@ -501,13 +501,16 @@
/* check frame rate */
for (i = 0; i < encParams->nLayers; i++)
{
+ if (encOption->encFrameRate[i] <= 0. || encOption->encFrameRate[i] > 120)
+ {
+ goto CLEAN_UP;
+ }
encParams->LayerFrameRate[i] = encOption->encFrameRate[i];
}
if (encParams->nLayers > 1)
{
- if (encOption->encFrameRate[0] == encOption->encFrameRate[1] ||
- encOption->encFrameRate[0] == 0. || encOption->encFrameRate[1] == 0.) /* 7/31/03 */
+ if (encOption->encFrameRate[0] == encOption->encFrameRate[1])
goto CLEAN_UP;
}
/* set max frame rate */
@@ -610,18 +613,19 @@
/* Find the maximum width*height for memory allocation of the VOPs */
for (idx = 0; idx < nLayers; idx++)
{
- temp_w = video->encParams->LayerWidth[idx];
- temp_h = video->encParams->LayerHeight[idx];
+ temp_w = ((video->encParams->LayerWidth[idx] + 15) >> 4) << 4;
+ temp_h = ((video->encParams->LayerHeight[idx] + 15) >> 4) << 4;
+
+ if (temp_w > 2048 || temp_h > 2048) {
+ goto CLEAN_UP;
+ }
if ((temp_w*temp_h) > max)
{
max = temp_w * temp_h;
- max_width = ((temp_w + 15) >> 4) << 4;
- max_height = ((temp_h + 15) >> 4) << 4;
- if (((uint64_t)max_width * max_height) > (uint64_t)INT32_MAX
- || temp_w > INT32_MAX - 15 || temp_h > INT32_MAX - 15) {
- goto CLEAN_UP;
- }
+ max_width = temp_w;
+ max_height = temp_h;
+
nTotalMB = ((max_width * max_height) >> 8);
}
diff --git a/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp b/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
index 4338c43..c04f7f3 100644
--- a/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
+++ b/media/codecs/mp3dec/src/pvmp3_stereo_proc.cpp
@@ -219,6 +219,9 @@
; FUNCTION CODE
----------------------------------------------------------------------------*/
+#if __has_attribute(no_sanitize)
+__attribute__((no_sanitize("integer")))
+#endif
void pvmp3_st_intensity(int32 xr[SUBBANDS_NUMBER*FILTERBANK_BANDS],
int32 xl[SUBBANDS_NUMBER*FILTERBANK_BANDS],
int32 is_pos,
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 4b08295..fc86001 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -565,6 +565,145 @@
};
typedef int32_t aaudio_session_id_t;
+/**
+ * Defines the audio channel mask.
+ * Channel masks are used to describe the samples and their
+ * arrangement in the audio frame. They are also used in the endpoint
+ * (e.g. a USB audio interface, a DAC connected to headphones) to
+ * specify allowable configurations of a particular device.
+ *
+ * Added in API level 32.
+ */
+enum {
+ /**
+ * Invalid channel mask
+ */
+ AAUDIO_CHANNEL_INVALID = -1,
+
+ /**
+ * Output audio channel mask
+ */
+ AAUDIO_CHANNEL_FRONT_LEFT = 1 << 0,
+ AAUDIO_CHANNEL_FRONT_RIGHT = 1 << 1,
+ AAUDIO_CHANNEL_FRONT_CENTER = 1 << 2,
+ AAUDIO_CHANNEL_LOW_FREQUENCY = 1 << 3,
+ AAUDIO_CHANNEL_BACK_LEFT = 1 << 4,
+ AAUDIO_CHANNEL_BACK_RIGHT = 1 << 5,
+ AAUDIO_CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6,
+ AAUDIO_CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7,
+ AAUDIO_CHANNEL_BACK_CENTER = 1 << 8,
+ AAUDIO_CHANNEL_SIDE_LEFT = 1 << 9,
+ AAUDIO_CHANNEL_SIDE_RIGHT = 1 << 10,
+ AAUDIO_CHANNEL_TOP_CENTER = 1 << 11,
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT = 1 << 12,
+ AAUDIO_CHANNEL_TOP_FRONT_CENTER = 1 << 13,
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT = 1 << 14,
+ AAUDIO_CHANNEL_TOP_BACK_LEFT = 1 << 15,
+ AAUDIO_CHANNEL_TOP_BACK_CENTER = 1 << 16,
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT = 1 << 17,
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT = 1 << 18,
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT = 1 << 19,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22,
+ AAUDIO_CHANNEL_LOW_FREQUENCY_2 = 1 << 23,
+ AAUDIO_CHANNEL_FRONT_WIDE_LEFT = 1 << 24,
+ AAUDIO_CHANNEL_FRONT_WIDE_RIGHT = 1 << 25,
+
+ AAUDIO_CHANNEL_MONO = AAUDIO_CHANNEL_FRONT_LEFT,
+ AAUDIO_CHANNEL_STEREO = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT,
+ AAUDIO_CHANNEL_2POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_TRI = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER,
+ AAUDIO_CHANNEL_TRI_BACK = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_3POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_2POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_2POINT1POINT2 = AAUDIO_CHANNEL_2POINT0POINT2 |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_3POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_3POINT1POINT2 = AAUDIO_CHANNEL_3POINT0POINT2 |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_QUAD = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT,
+ AAUDIO_CHANNEL_QUAD_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_SURROUND = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_PENTA = AAUDIO_CHANNEL_QUAD |
+ AAUDIO_CHANNEL_FRONT_CENTER,
+ // aka 5POINT1_BACK
+ AAUDIO_CHANNEL_5POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT,
+ AAUDIO_CHANNEL_5POINT1_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_6POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_7POINT1 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_5POINT1POINT2 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_5POINT1POINT4 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_BACK_LEFT |
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+ AAUDIO_CHANNEL_7POINT1POINT2 = AAUDIO_CHANNEL_7POINT1 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_7POINT1POINT4 = AAUDIO_CHANNEL_7POINT1 |
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_BACK_LEFT |
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+ AAUDIO_CHANNEL_9POINT1POINT4 = AAUDIO_CHANNEL_7POINT1POINT4 |
+ AAUDIO_CHANNEL_FRONT_WIDE_LEFT |
+ AAUDIO_CHANNEL_FRONT_WIDE_RIGHT,
+ AAUDIO_CHANNEL_9POINT1POINT6 = AAUDIO_CHANNEL_9POINT1POINT4 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+
+ AAUDIO_CHANNEL_FRONT_BACK = AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_BACK_CENTER,
+};
+typedef uint32_t aaudio_channel_mask_t;
+
typedef struct AAudioStreamStruct AAudioStream;
typedef struct AAudioStreamBuilderStruct AAudioStreamBuilder;
@@ -643,8 +782,11 @@
* This is usually {@code Context#getPackageName()}.
*
* The default, if you do not call this function, is a random package in the calling uid.
- * The vast majority of apps have only one package per calling UID. If the package
- * name does not match the calling UID, then requests will be rejected.
+ * The vast majority of apps have only one package per calling UID.
+ * If an invalid package name is set, input streams may not be given permission to
+ * record when started.
+ *
+ * The package name is usually the applicationId in your app's build.gradle file.
*
* Available since API level 31.
*
@@ -699,6 +841,11 @@
* If an exact value is specified then an opened stream will use that value.
* If a stream cannot be opened with the specified value then the open will fail.
*
+ * As the channel count provided here may be different from the corresponding channel count
+ * of channel mask used in {@link AAudioStreamBuilder_setChannelMask}, the last called function
+ * will be respected if both this function and {@link AAudioStreamBuilder_setChannelMask} are
+ * called.
+ *
* Available since API level 26.
*
* @param builder reference provided by AAudio_createStreamBuilder()
@@ -714,6 +861,8 @@
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param samplesPerFrame Number of samples in a frame.
+ *
+ * @deprecated use {@link AAudioStreamBuilder_setChannelCount}
*/
AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
int32_t samplesPerFrame) __INTRODUCED_IN(26);
@@ -1136,6 +1285,32 @@
AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
__INTRODUCED_IN(26);
+/**
+ * Set audio channel mask for the stream.
+ *
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
+ * If both channel mask and count are not set, then stereo will then be chosen when the
+ * stream is opened.
+ * After opening a stream with an unspecified value, the application must query for the
+ * actual value, which may vary by device.
+ *
+ * If an exact value is specified then an opened stream will use that value.
+ * If a stream cannot be opened with the specified value then the open will fail.
+ *
+ * As the corresponding channel count of provided channel mask here may be different
+ * from the channel count used in {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame}, the last called function will be
+ * respected if this function and {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame} are called.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param channelMask Audio channel mask desired.
+ */
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+ aaudio_channel_mask_t channelMask) __INTRODUCED_IN(32);
+
// ============================================================
// Stream Control
// ============================================================
@@ -1652,6 +1827,18 @@
AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* stream)
__INTRODUCED_IN(30);
+/**
+ * Return the channel mask for the stream. This will be the mask set using
+ * {@link #AAudioStreamBuilder_setChannelMask}, or {@link #AAUDIO_UNSPECIFIED} otherwise.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual channel mask
+ */
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+ __INTRODUCED_IN(32);
+
#ifdef __cplusplus
}
#endif
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 33a5c7f..da59da6 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -7,6 +7,65 @@
default_applicable_licenses: ["frameworks_av_license"],
}
+tidy_errors = [
+ // https://clang.llvm.org/extra/clang-tidy/checks/list.html
+ // For many categories, the checks are too many to specify individually.
+ // Feel free to disable as needed - as warnings are generally ignored,
+ // we treat warnings as errors.
+ "android-*",
+ "bugprone-*",
+ "cert-*",
+ "clang-analyzer-security*",
+ "google-*",
+ "misc-*",
+ //"modernize-*", // explicitly list the modernize as they can be subjective.
+ "modernize-avoid-bind",
+ //"modernize-avoid-c-arrays", // std::array<> can be verbose
+ "modernize-concat-nested-namespaces",
+ //"modernize-deprecated-headers", // C headers still ok even if there is C++ equivalent.
+ "modernize-deprecated-ios-base-aliases",
+ "modernize-loop-convert",
+ "modernize-make-shared",
+ "modernize-make-unique",
+ "modernize-pass-by-value",
+ "modernize-raw-string-literal",
+ "modernize-redundant-void-arg",
+ "modernize-replace-auto-ptr",
+ "modernize-replace-random-shuffle",
+ "modernize-return-braced-init-list",
+ "modernize-shrink-to-fit",
+ "modernize-unary-static-assert",
+ // "modernize-use-auto", // found in AAudioAudio.cpp
+ "modernize-use-bool-literals",
+ "modernize-use-default-member-init",
+ "modernize-use-emplace",
+ "modernize-use-equals-default",
+ "modernize-use-equals-delete",
+ // "modernize-use-nodiscard", // found in aidl generated files
+ "modernize-use-noexcept",
+ "modernize-use-nullptr",
+ // "modernize-use-override", // found in aidl generated files
+ // "modernize-use-trailing-return-type", // not necessarily more readable
+ "modernize-use-transparent-functors",
+ "modernize-use-uncaught-exceptions",
+ // "modernize-use-using", // found typedef in several files
+ "performance-*",
+
+ // Remove some pedantic stylistic requirements.
+ "-android-cloexec-dup", // found in SharedMemoryParcelable.cpp
+ "-bugprone-macro-parentheses", // found in SharedMemoryParcelable.h
+ "-bugprone-narrowing-conversions", // found in several interface from size_t to int32_t
+
+ "-google-readability-casting", // C++ casts not always necessary and may be verbose
+ "-google-readability-todo", // do not require TODO(info)
+ "-google-build-using-namespace", // Reenable and fix later.
+ "-google-global-names-in-headers", // found in several files
+
+ "-misc-non-private-member-variables-in-classes", // found in aidl generated files
+
+ "-performance-no-int-to-ptr", // found in SharedMemoryParcelable.h
+]
+
cc_library {
name: "libaaudio",
@@ -52,7 +111,7 @@
"libcutils",
"libutils",
"libbinder",
- "libpermission",
+ "framework-permission-aidl-cpp",
],
sanitize: {
@@ -64,6 +123,13 @@
symbol_file: "libaaudio.map.txt",
versions: ["28"],
},
+
+ tidy: true,
+ tidy_checks: tidy_errors,
+ tidy_checks_as_errors: tidy_errors,
+ tidy_flags: [
+ "-format-style=file",
+ ]
}
cc_library {
@@ -102,6 +168,7 @@
"libbinder",
"framework-permission-aidl-cpp",
"aaudio-aidl-cpp",
+ "audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
],
@@ -156,6 +223,13 @@
integer_overflow: true,
misc_undefined: ["bounds"],
},
+
+ tidy: true,
+ tidy_checks: tidy_errors,
+ tidy_checks_as_errors: tidy_errors,
+ tidy_flags: [
+ "-format-style=file",
+ ]
}
aidl_interface {
@@ -172,19 +246,14 @@
"binding/aidl/aaudio/IAAudioService.aidl",
],
imports: [
- "audio_common-aidl",
+ "audioclient-types-aidl",
"shared-file-region-aidl",
- "framework-permission-aidl"
+ "framework-permission-aidl",
],
backend:
{
- cpp: {
- enabled: true,
- },
java: {
- // TODO: need to have audio_common-aidl available in Java to enable
- // this.
- enabled: false,
+ sdk_version: "module_current",
},
},
}
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index fa5a2da..135bac3 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -36,13 +36,10 @@
using android::IServiceManager;
using android::defaultServiceManager;
using android::interface_cast;
-using android::IInterface;
using android::Mutex;
using android::ProcessState;
using android::sp;
using android::status_t;
-using android::wp;
-using android::binder::Status;
using namespace aaudio;
@@ -93,7 +90,7 @@
ALOGE("%s() - linkToDeath() returned %d", __func__, status);
}
aaudioService = interface_cast<IAAudioService>(binder);
- mAdapter.reset(new Adapter(aaudioService, mAAudioClient));
+ mAdapter = std::make_shared<Adapter>(aaudioService, mAAudioClient);
needToRegister = true;
// Make sure callbacks can be received by mAAudioClient
ProcessState::self()->startThreadPool();
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index 6a7b639..557ced5 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -108,7 +108,7 @@
return AAUDIO_ERROR_UNAVAILABLE;
}
- void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {
+ void onStreamChange(aaudio_handle_t /*handle*/, int32_t /*opcode*/, int32_t /*value*/) {
// TODO This is just a stub so we can have a client Binder to pass to the service.
// TODO Implemented in a later CL.
ALOGW("onStreamChange called!");
@@ -116,7 +116,7 @@
class AAudioClient : public android::IBinder::DeathRecipient, public BnAAudioClient {
public:
- AAudioClient(android::wp<AAudioBinderClient> aaudioBinderClient)
+ explicit AAudioClient(const android::wp<AAudioBinderClient>& aaudioBinderClient)
: mBinderClient(aaudioBinderClient) {
}
@@ -150,10 +150,10 @@
class Adapter : public AAudioBinderAdapter {
public:
Adapter(const android::sp<IAAudioService>& delegate,
- const android::sp<AAudioClient>& aaudioClient)
+ android::sp<AAudioClient> aaudioClient)
: AAudioBinderAdapter(delegate.get()),
mDelegate(delegate),
- mAAudioClient(aaudioClient) {}
+ mAAudioClient(std::move(aaudioClient)) {}
virtual ~Adapter() {
if (mDelegate != nullptr) {
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 5d11512..bf94774 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -37,7 +37,7 @@
class AAudioServiceInterface {
public:
- AAudioServiceInterface() {};
+ AAudioServiceInterface() = default;
virtual ~AAudioServiceInterface() = default;
virtual void registerClient(const android::sp<IAAudioClient>& client) = 0;
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 2d501ef..cf76225 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -23,20 +23,23 @@
#include <sys/mman.h>
#include <aaudio/AAudio.h>
+#include <media/AidlConversion.h>
+
#include "binding/AAudioStreamConfiguration.h"
using namespace aaudio;
-using android::media::audio::common::AudioFormat;
+using android::media::AudioFormatDescription;
AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
- setSamplesPerFrame(parcelable.samplesPerFrame);
+ setChannelMask(parcelable.channelMask);
setSampleRate(parcelable.sampleRate);
setDeviceId(parcelable.deviceId);
static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
setSharingMode(parcelable.sharingMode);
- static_assert(sizeof(audio_format_t) == sizeof(parcelable.audioFormat));
- setFormat(static_cast<audio_format_t>(parcelable.audioFormat));
+ auto convFormat = android::aidl2legacy_AudioFormatDescription_audio_format_t(
+ parcelable.audioFormat);
+ setFormat(convFormat.ok() ? convFormat.value() : AUDIO_FORMAT_INVALID);
static_assert(sizeof(aaudio_direction_t) == sizeof(parcelable.direction));
setDirection(parcelable.direction);
static_assert(sizeof(audio_usage_t) == sizeof(parcelable.usage));
@@ -63,13 +66,18 @@
StreamParameters AAudioStreamConfiguration::parcelable() const {
StreamParameters result;
- result.samplesPerFrame = getSamplesPerFrame();
+ result.channelMask = getChannelMask();
result.sampleRate = getSampleRate();
result.deviceId = getDeviceId();
static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
result.sharingMode = getSharingMode();
- static_assert(sizeof(audio_format_t) == sizeof(result.audioFormat));
- result.audioFormat = static_cast<AudioFormat>(getFormat());
+ auto convAudioFormat = android::legacy2aidl_audio_format_t_AudioFormatDescription(getFormat());
+ if (convAudioFormat.ok()) {
+ result.audioFormat = convAudioFormat.value();
+ } else {
+ result.audioFormat = AudioFormatDescription{};
+ result.audioFormat.type = android::media::AudioFormatType::SYS_RESERVED_INVALID;
+ }
static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
result.direction = getDirection();
static_assert(sizeof(audio_usage_t) == sizeof(result.usage));
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 8d90034..a4cc2bd 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -30,7 +30,7 @@
using namespace aaudio;
AAudioStreamRequest::AAudioStreamRequest(const StreamRequest& parcelable) :
- mConfiguration(std::move(parcelable.params)),
+ mConfiguration(parcelable.params),
mAttributionSource(parcelable.attributionSource),
mSharingModeMatchRequired(parcelable.sharingModeMatchRequired),
mInService(parcelable.inService) {
@@ -38,7 +38,7 @@
StreamRequest AAudioStreamRequest::parcelable() const {
StreamRequest result;
- result.params = std::move(mConfiguration).parcelable();
+ result.params = mConfiguration.parcelable();
result.attributionSource = mAttributionSource;
result.sharingModeMatchRequired = mSharingModeMatchRequired;
result.inService = mInService;
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index aa4ac27..dea3e4a 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -29,17 +29,15 @@
#include "binding/AudioEndpointParcelable.h"
using android::base::unique_fd;
-using android::media::SharedFileRegion;
-using android::NO_ERROR;
using android::status_t;
using namespace aaudio;
AudioEndpointParcelable::AudioEndpointParcelable(Endpoint&& parcelable)
- : mUpMessageQueueParcelable(std::move(parcelable.upMessageQueueParcelable)),
- mDownMessageQueueParcelable(std::move(parcelable.downMessageQueueParcelable)),
- mUpDataQueueParcelable(std::move(parcelable.upDataQueueParcelable)),
- mDownDataQueueParcelable(std::move(parcelable.downDataQueueParcelable)),
+ : mUpMessageQueueParcelable(parcelable.upMessageQueueParcelable),
+ mDownMessageQueueParcelable(parcelable.downMessageQueueParcelable),
+ mUpDataQueueParcelable(parcelable.upDataQueueParcelable),
+ mDownDataQueueParcelable(parcelable.downDataQueueParcelable),
mNumSharedMemories(parcelable.sharedMemories.size()) {
for (size_t i = 0; i < parcelable.sharedMemories.size() && i < MAX_SHARED_MEMORIES; ++i) {
// Re-construct.
@@ -56,10 +54,10 @@
Endpoint AudioEndpointParcelable::parcelable()&& {
Endpoint result;
- result.upMessageQueueParcelable = std::move(mUpMessageQueueParcelable).parcelable();
- result.downMessageQueueParcelable = std::move(mDownMessageQueueParcelable).parcelable();
- result.upDataQueueParcelable = std::move(mUpDataQueueParcelable).parcelable();
- result.downDataQueueParcelable = std::move(mDownDataQueueParcelable).parcelable();
+ result.upMessageQueueParcelable = mUpMessageQueueParcelable.parcelable();
+ result.downMessageQueueParcelable = mDownMessageQueueParcelable.parcelable();
+ result.upDataQueueParcelable = mUpDataQueueParcelable.parcelable();
+ result.downDataQueueParcelable = mDownDataQueueParcelable.parcelable();
result.sharedMemories.reserve(std::min(mNumSharedMemories, MAX_SHARED_MEMORIES));
for (size_t i = 0; i < mNumSharedMemories && i < MAX_SHARED_MEMORIES; ++i) {
result.sharedMemories.emplace_back(std::move(mSharedMemories[i]).parcelable());
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index 5237a1a..544aa92 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -43,7 +43,7 @@
// Ctor/assignment from a parcelable representation.
// Since the parcelable object owns unique FDs (for shared memory blocks), move semantics are
// provided to avoid the need to dupe.
- AudioEndpointParcelable(Endpoint&& parcelable);
+ explicit AudioEndpointParcelable(Endpoint&& parcelable);
AudioEndpointParcelable& operator=(Endpoint&& parcelable);
/**
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index a4b3cec..fa7ca72 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -30,9 +30,9 @@
using namespace aaudio;
RingBufferParcelable::RingBufferParcelable(const RingBuffer& parcelable)
- : mReadCounterParcelable(std::move(parcelable.readCounterParcelable)),
- mWriteCounterParcelable(std::move(parcelable.writeCounterParcelable)),
- mDataParcelable(std::move(parcelable.dataParcelable)),
+ : mReadCounterParcelable(parcelable.readCounterParcelable),
+ mWriteCounterParcelable(parcelable.writeCounterParcelable),
+ mDataParcelable(parcelable.dataParcelable),
mBytesPerFrame(parcelable.bytesPerFrame),
mFramesPerBurst(parcelable.framesPerBurst),
mCapacityInFrames(parcelable.capacityInFrames),
@@ -42,9 +42,9 @@
RingBuffer RingBufferParcelable::parcelable() const {
RingBuffer result;
- result.readCounterParcelable = std::move(mReadCounterParcelable).parcelable();
- result.writeCounterParcelable = std::move(mWriteCounterParcelable).parcelable();
- result.dataParcelable = std::move(mDataParcelable).parcelable();
+ result.readCounterParcelable = mReadCounterParcelable.parcelable();
+ result.writeCounterParcelable = mWriteCounterParcelable.parcelable();
+ result.dataParcelable = mDataParcelable.parcelable();
result.bytesPerFrame = mBytesPerFrame;
result.framesPerBurst = mFramesPerBurst;
result.capacityInFrames = mCapacityInFrames;
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index eef238f..3a49655 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -32,7 +32,6 @@
#include "binding/SharedMemoryParcelable.h"
using android::base::unique_fd;
-using android::NO_ERROR;
using android::status_t;
using android::media::SharedFileRegion;
@@ -78,7 +77,7 @@
}
aaudio_result_t SharedMemoryParcelable::resolveSharedMemory(const unique_fd& fd) {
- mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
+ mResolvedAddress = (uint8_t *) mmap(nullptr, mSizeInBytes, PROT_READ | PROT_WRITE,
MAP_SHARED, fd.get(), 0);
if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
ALOGE("mmap() failed for fd = %d, nBytes = %" PRId64 ", errno = %s",
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 56b99c0..6fa109b 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -29,10 +29,7 @@
#include "binding/SharedMemoryParcelable.h"
#include "binding/SharedRegionParcelable.h"
-using android::NO_ERROR;
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
using namespace aaudio;
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index b7c4f70..3b274f2 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -16,14 +16,14 @@
package aaudio;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioFormatDescription;
parcelable StreamParameters {
- int samplesPerFrame; // = AAUDIO_UNSPECIFIED;
+ int channelMask; // = AAUDIO_UNSPECIFIED;
int sampleRate; // = AAUDIO_UNSPECIFIED;
int deviceId; // = AAUDIO_UNSPECIFIED;
int /* aaudio_sharing_mode_t */ sharingMode; // = AAUDIO_SHARING_MODE_SHARED;
- AudioFormat audioFormat; // = AUDIO_FORMAT_DEFAULT;
+ AudioFormatDescription audioFormat; // = AUDIO_FORMAT_DEFAULT;
int /* aaudio_direction_t */ direction; // = AAUDIO_DIRECTION_OUTPUT;
int /* aaudio_usage_t */ usage; // = AAUDIO_UNSPECIFIED;
int /* aaudio_content_type_t */ contentType; // = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index ebc9f2b..24888de 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -31,13 +31,6 @@
#define RIDICULOUSLY_LARGE_BUFFER_CAPACITY (256 * 1024)
#define RIDICULOUSLY_LARGE_FRAME_SIZE 4096
-AudioEndpoint::AudioEndpoint()
- : mFreeRunning(false)
- , mDataReadCounter(0)
- , mDataWriteCounter(0)
-{
-}
-
// TODO Consider moving to a method in RingBufferDescriptor
static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
const RingBufferDescriptor *descriptor) {
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 4c8d60f..b3dbc20 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -34,7 +34,7 @@
class AudioEndpoint {
public:
- AudioEndpoint();
+ AudioEndpoint() = default;
/**
* Configure based on the EndPointDescriptor_t.
@@ -95,9 +95,9 @@
private:
std::unique_ptr<android::FifoBufferIndirect> mUpCommandQueue;
std::unique_ptr<android::FifoBufferIndirect> mDataQueue;
- bool mFreeRunning;
- android::fifo_counter_t mDataReadCounter; // only used if free-running
- android::fifo_counter_t mDataWriteCounter; // only used if free-running
+ bool mFreeRunning{false};
+ android::fifo_counter_t mDataReadCounter{0}; // only used if free-running
+ android::fifo_counter_t mDataWriteCounter{0}; // only used if free-running
};
} // namespace aaudio
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index cf2abe8..91f3004 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -49,8 +49,6 @@
// This is needed to make sense of the logs more easily.
#define LOG_TAG (mInService ? "AudioStreamInternal_Service" : "AudioStreamInternal_Client")
-using android::Mutex;
-using android::WrappingBuffer;
using android::content::AttributionSourceState;
using namespace aaudio;
@@ -123,9 +121,9 @@
request.getConfiguration().setDeviceId(getDeviceId());
request.getConfiguration().setSampleRate(getSampleRate());
- request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
request.getConfiguration().setDirection(getDirection());
request.getConfiguration().setSharingMode(getSharingMode());
+ request.getConfiguration().setChannelMask(getChannelMask());
request.getConfiguration().setUsage(getUsage());
request.getConfiguration().setContentType(getContentType());
@@ -138,7 +136,8 @@
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
if (mServiceStreamHandle < 0
- && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
+ && (request.getConfiguration().getSamplesPerFrame() == 1
+ || request.getConfiguration().getChannelMask() == AAUDIO_CHANNEL_MONO)
&& getDirection() == AAUDIO_DIRECTION_OUTPUT
&& !isInService()) {
// if that failed then try switching from mono to stereo if OUTPUT.
@@ -146,7 +145,7 @@
// that writes to a stereo MMAP stream.
ALOGD("%s() - openStream() returned %d, try switching from MONO to STEREO",
__func__, mServiceStreamHandle);
- request.getConfiguration().setSamplesPerFrame(2); // stereo
+ request.getConfiguration().setChannelMask(AAUDIO_CHANNEL_STEREO);
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
}
if (mServiceStreamHandle < 0) {
@@ -171,9 +170,10 @@
goto error;
}
// Save results of the open.
- if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
- setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
+ if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+ setChannelMask(configurationOutput.getChannelMask());
}
+
mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
setSampleRate(configurationOutput.getSampleRate());
@@ -329,10 +329,10 @@
{
AudioStreamInternal *stream = (AudioStreamInternal *)context;
//LOGD("oboe_callback_thread, stream = %p", stream);
- if (stream != NULL) {
+ if (stream != nullptr) {
return stream->callbackLoop();
} else {
- return NULL;
+ return nullptr;
}
}
@@ -421,7 +421,7 @@
if (isDataCallbackSet()
&& (isActive() || getState() == AAUDIO_STREAM_STATE_DISCONNECTED)) {
mCallbackEnabled.store(false);
- aaudio_result_t result = joinThread_l(NULL); // may temporarily unlock mStreamLock
+ aaudio_result_t result = joinThread_l(nullptr); // may temporarily unlock mStreamLock
if (result == AAUDIO_ERROR_INVALID_HANDLE) {
ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
result = AAUDIO_OK;
@@ -508,7 +508,7 @@
return result;
}
-aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t /*clockId*/,
int64_t *framePosition,
int64_t *timeNanoseconds) {
// Generated in server and passed to client. Return latest.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index fbe4c13..eab1382 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -116,7 +116,7 @@
virtual void prepareBuffersForStart() {}
- virtual void advanceClientToMatchServerPosition(int32_t serverMargin = 0) = 0;
+ virtual void advanceClientToMatchServerPosition(int32_t serverMargin) = 0;
virtual void onFlushFromServer() {}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 2da5406..1efccb1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -46,8 +46,6 @@
}
-AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
-
void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
int64_t readCounter = mAudioEndpoint->getDataReadCounter();
int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
@@ -109,7 +107,7 @@
if (mNeedCatchUp.isRequested()) {
// Catch an MMAP pointer that is already advancing.
// This will avoid initial underruns caused by a slow cold start.
- advanceClientToMatchServerPosition();
+ advanceClientToMatchServerPosition(0 /*serverMargin*/);
mNeedCatchUp.acknowledge();
}
@@ -228,7 +226,7 @@
void *AudioStreamInternalCapture::callbackLoop() {
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- if (!isDataCallbackSet()) return NULL;
+ if (!isDataCallbackSet()) return nullptr;
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -260,5 +258,5 @@
ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
result, (int) isActive());
- return NULL;
+ return nullptr;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 251a7f2..87017de 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -28,8 +28,9 @@
class AudioStreamInternalCapture : public AudioStreamInternal {
public:
- AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface, bool inService = false);
- virtual ~AudioStreamInternalCapture();
+ explicit AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
+ bool inService = false);
+ virtual ~AudioStreamInternalCapture() = default;
aaudio_result_t read(void *buffer,
int32_t numFrames,
@@ -45,7 +46,7 @@
}
protected:
- void advanceClientToMatchServerPosition(int32_t serverOffset = 0) override;
+ void advanceClientToMatchServerPosition(int32_t serverOffset) override;
/**
* Low level data processing that will not block. It will just read or write as much as it can.
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 71bde90..5921799 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -44,8 +44,6 @@
}
-AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
-
constexpr int kRampMSec = 10; // time to apply a change in volume
aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
@@ -115,7 +113,7 @@
}
void AudioStreamInternalPlay::onFlushFromServer() {
- advanceClientToMatchServerPosition();
+ advanceClientToMatchServerPosition(0 /*serverMargin*/);
}
// Write the data, block if needed and timeoutMillis > 0
@@ -281,7 +279,7 @@
ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- if (!isDataCallbackSet()) return NULL;
+ if (!isDataCallbackSet()) return nullptr;
int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
// result might be a frame count
@@ -309,7 +307,7 @@
ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
__func__, result, (int) isActive());
- return NULL;
+ return nullptr;
}
//------------------------------------------------------------------------------
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index 03c957d..e761807 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -30,8 +30,9 @@
class AudioStreamInternalPlay : public AudioStreamInternal {
public:
- AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface, bool inService = false);
- virtual ~AudioStreamInternalPlay();
+ explicit AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
+ bool inService = false);
+ virtual ~AudioStreamInternalPlay() = default;
aaudio_result_t open(const AudioStreamBuilder &builder) override;
@@ -66,7 +67,7 @@
void prepareBuffersForStart() override;
- void advanceClientToMatchServerPosition(int32_t serverMargin = 0) override;
+ void advanceClientToMatchServerPosition(int32_t serverMargin) override;
void onFlushFromServer() override;
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index f0dcd44..6921271 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -43,14 +43,7 @@
// and dumped to the log when the stream is stopped.
IsochronousClockModel::IsochronousClockModel()
- : mMarkerFramePosition(0)
- , mMarkerNanoTime(0)
- , mSampleRate(48000)
- , mFramesPerBurst(48)
- , mBurstPeriodNanos(0) // this will be updated before use
- , mMaxMeasuredLatenessNanos(0)
- , mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
- , mState(STATE_STOPPED)
+ : mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
{
if ((AAudioProperty_getLogMask() & AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM) != 0) {
mHistogramMicros = std::make_unique<Histogram>(kHistogramBinCount,
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 6280013..3007237 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -149,16 +149,16 @@
static constexpr int32_t kHistogramBinWidthMicros = 50;
static constexpr int32_t kHistogramBinCount = 128;
- int64_t mMarkerFramePosition; // Estimated HW position.
- int64_t mMarkerNanoTime; // Estimated HW time.
- int32_t mSampleRate;
- int32_t mFramesPerBurst; // number of frames transferred at one time.
- int32_t mBurstPeriodNanos; // Time between HW bursts.
+ int64_t mMarkerFramePosition{0}; // Estimated HW position.
+ int64_t mMarkerNanoTime{0}; // Estimated HW time.
+ int32_t mSampleRate{48000};
+ int32_t mFramesPerBurst{48}; // number of frames transferred at one time.
+ int32_t mBurstPeriodNanos{0}; // Time between HW bursts.
// Includes mBurstPeriodNanos because we sample randomly over time.
- int32_t mMaxMeasuredLatenessNanos;
+ int32_t mMaxMeasuredLatenessNanos{0};
// Threshold for lateness that triggers a drift later in time.
int32_t mLatenessForDriftNanos;
- clock_model_state_t mState; // State machine handles startup sequence.
+ clock_model_state_t mState{STATE_STOPPED}; // State machine handles startup sequence.
int32_t mTimestampCount = 0; // For logging.
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index d103aca..02e7f5f 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -128,7 +128,8 @@
int32_t samplesPerFrame)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- streamBuilder->setSamplesPerFrame(samplesPerFrame);
+ const aaudio_channel_mask_t channelMask = AAudioConvert_channelCountToMask(samplesPerFrame);
+ streamBuilder->setChannelMask(channelMask);
}
AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
@@ -223,6 +224,13 @@
streamBuilder->setFramesPerDataCallback(frames);
}
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+ aaudio_channel_mask_t channelMask)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setChannelMask(channelMask);
+}
+
AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
AAudioStream** streamPtr)
{
@@ -562,3 +570,11 @@
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
return audioStream->isPrivacySensitive();
}
+
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ const aaudio_channel_mask_t channelMask = audioStream->getChannelMask();
+ // Do not return channel index masks as they are not public.
+ return AAudio_isChannelIndexMask(channelMask) ? AAUDIO_UNSPECIFIED : channelMask;
+}
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index acfac24..84133f4 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -30,9 +30,6 @@
// HDMI supports up to 32 channels at 1536000 Hz.
#define SAMPLE_RATE_HZ_MAX 1600000
-AAudioStreamParameters::AAudioStreamParameters() {}
-AAudioStreamParameters::~AAudioStreamParameters() {}
-
void AAudioStreamParameters::copyFrom(const AAudioStreamParameters &other) {
mSamplesPerFrame = other.mSamplesPerFrame;
mSampleRate = other.mSampleRate;
@@ -49,6 +46,7 @@
mIsPrivacySensitive = other.mIsPrivacySensitive;
mOpPackageName = other.mOpPackageName;
mAttributionTag = other.mAttributionTag;
+ mChannelMask = other.mChannelMask;
}
static aaudio_result_t isFormatValid(audio_format_t format) {
@@ -187,7 +185,94 @@
// break;
}
- return AAUDIO_OK;
+ return validateChannelMask();
+}
+
+bool AAudioStreamParameters::validateChannelMask() const {
+ if (mChannelMask == AAUDIO_UNSPECIFIED) {
+ return AAUDIO_OK;
+ }
+
+ if (mChannelMask & AAUDIO_CHANNEL_BIT_INDEX) {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_INDEX_MASK_1:
+ case AAUDIO_CHANNEL_INDEX_MASK_2:
+ case AAUDIO_CHANNEL_INDEX_MASK_3:
+ case AAUDIO_CHANNEL_INDEX_MASK_4:
+ case AAUDIO_CHANNEL_INDEX_MASK_5:
+ case AAUDIO_CHANNEL_INDEX_MASK_6:
+ case AAUDIO_CHANNEL_INDEX_MASK_7:
+ case AAUDIO_CHANNEL_INDEX_MASK_8:
+ case AAUDIO_CHANNEL_INDEX_MASK_9:
+ case AAUDIO_CHANNEL_INDEX_MASK_10:
+ case AAUDIO_CHANNEL_INDEX_MASK_11:
+ case AAUDIO_CHANNEL_INDEX_MASK_12:
+ case AAUDIO_CHANNEL_INDEX_MASK_13:
+ case AAUDIO_CHANNEL_INDEX_MASK_14:
+ case AAUDIO_CHANNEL_INDEX_MASK_15:
+ case AAUDIO_CHANNEL_INDEX_MASK_16:
+ case AAUDIO_CHANNEL_INDEX_MASK_17:
+ case AAUDIO_CHANNEL_INDEX_MASK_18:
+ case AAUDIO_CHANNEL_INDEX_MASK_19:
+ case AAUDIO_CHANNEL_INDEX_MASK_20:
+ case AAUDIO_CHANNEL_INDEX_MASK_21:
+ case AAUDIO_CHANNEL_INDEX_MASK_22:
+ case AAUDIO_CHANNEL_INDEX_MASK_23:
+ case AAUDIO_CHANNEL_INDEX_MASK_24:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel index mask %#x", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ }
+
+ if (getDirection() == AAUDIO_DIRECTION_INPUT) {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ case AAUDIO_CHANNEL_STEREO:
+ case AAUDIO_CHANNEL_FRONT_BACK:
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ case AAUDIO_CHANNEL_5POINT1:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel mask %#x, IN", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ } else {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ case AAUDIO_CHANNEL_STEREO:
+ case AAUDIO_CHANNEL_2POINT1:
+ case AAUDIO_CHANNEL_TRI:
+ case AAUDIO_CHANNEL_TRI_BACK:
+ case AAUDIO_CHANNEL_3POINT1:
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ case AAUDIO_CHANNEL_QUAD:
+ case AAUDIO_CHANNEL_QUAD_SIDE:
+ case AAUDIO_CHANNEL_SURROUND:
+ case AAUDIO_CHANNEL_PENTA:
+ case AAUDIO_CHANNEL_5POINT1:
+ case AAUDIO_CHANNEL_5POINT1_SIDE:
+ case AAUDIO_CHANNEL_5POINT1POINT2:
+ case AAUDIO_CHANNEL_5POINT1POINT4:
+ case AAUDIO_CHANNEL_6POINT1:
+ case AAUDIO_CHANNEL_7POINT1:
+ case AAUDIO_CHANNEL_7POINT1POINT2:
+ case AAUDIO_CHANNEL_7POINT1POINT4:
+ case AAUDIO_CHANNEL_9POINT1POINT4:
+ case AAUDIO_CHANNEL_9POINT1POINT6:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel mask %#x. OUT", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ }
}
void AAudioStreamParameters::dump() const {
@@ -195,6 +280,7 @@
ALOGD("mSessionId = %6d", mSessionId);
ALOGD("mSampleRate = %6d", mSampleRate);
ALOGD("mSamplesPerFrame = %6d", mSamplesPerFrame);
+ ALOGD("mChannelMask = %#x", mChannelMask);
ALOGD("mSharingMode = %6d", (int)mSharingMode);
ALOGD("mAudioFormat = %6d", (int)mAudioFormat);
ALOGD("mDirection = %6d", mDirection);
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 5737052..7a6ed3e 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -26,8 +26,8 @@
class AAudioStreamParameters {
public:
- AAudioStreamParameters();
- virtual ~AAudioStreamParameters();
+ AAudioStreamParameters() = default;
+ virtual ~AAudioStreamParameters() = default;
int32_t getDeviceId() const {
return mDeviceId;
@@ -49,13 +49,6 @@
return mSamplesPerFrame;
}
- /**
- * This is also known as channelCount.
- */
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
audio_format_t getFormat() const {
return mAudioFormat;
}
@@ -141,7 +134,7 @@
}
// TODO b/182392769: reexamine if Identity can be used
- void setOpPackageName(const std::optional<std::string> opPackageName) {
+ void setOpPackageName(const std::optional<std::string>& opPackageName) {
mOpPackageName = opPackageName;
}
@@ -149,10 +142,19 @@
return mAttributionTag;
}
- void setAttributionTag(const std::optional<std::string> attributionTag) {
+ void setAttributionTag(const std::optional<std::string>& attributionTag) {
mAttributionTag = attributionTag;
}
+ aaudio_channel_mask_t getChannelMask() const {
+ return mChannelMask;
+ }
+
+ void setChannelMask(aaudio_channel_mask_t channelMask) {
+ mChannelMask = channelMask;
+ mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+ }
+
/**
* @return bytes per frame of getFormat()
*/
@@ -171,6 +173,8 @@
void dump() const;
private:
+ bool validateChannelMask() const;
+
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
@@ -186,6 +190,7 @@
bool mIsPrivacySensitive = false;
std::optional<std::string> mOpPackageName = {};
std::optional<std::string> mAttributionTag = {};
+ aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
index 1e88d15..6c22744 100644
--- a/media/libaaudio/src/core/AudioGlobal.h
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -31,7 +31,8 @@
const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode);
const char* AudioGlobal_convertSharingModeToText(aaudio_sharing_mode_t mode);
const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state);
-}
+
+} // namespace aaudio
#endif // AAUDIO_AUDIOGLOBAL_H
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 09d9535..ffc3b9d 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -76,6 +76,7 @@
// Copy parameters from the Builder because the Builder may be deleted after this call.
// TODO AudioStream should be a subclass of AudioStreamParameters
mSamplesPerFrame = builder.getSamplesPerFrame();
+ mChannelMask = builder.getChannelMask();
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 9835c8c..8bb9b19 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -270,7 +270,8 @@
}
/**
- * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+ * This is only valid after setChannelMask() and setFormat()
+ * have been called.
*/
int32_t getBytesPerFrame() const {
return mSamplesPerFrame * getBytesPerSample();
@@ -284,7 +285,7 @@
}
/**
- * This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
+ * This is only valid after setChannelMask() and setDeviceFormat() have been called.
*/
int32_t getBytesPerDeviceFrame() const {
return getSamplesPerFrame() * audio_bytes_per_sample(getDeviceFormat());
@@ -318,6 +319,15 @@
return mFramesPerDataCallback;
}
+ aaudio_channel_mask_t getChannelMask() const {
+ return mChannelMask;
+ }
+
+ void setChannelMask(aaudio_channel_mask_t channelMask) {
+ mChannelMask = channelMask;
+ mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+ }
+
/**
* @return true if data callback has been specified
*/
@@ -429,7 +439,7 @@
// PlayerBase allows the system to control the stream volume.
class MyPlayerBase : public android::PlayerBase {
public:
- MyPlayerBase() {};
+ MyPlayerBase() = default;
virtual ~MyPlayerBase() = default;
@@ -495,11 +505,6 @@
}
// This should not be called after the open() call.
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
- // This should not be called after the open() call.
void setFramesPerBurst(int32_t framesPerBurst) {
mFramesPerBurst = framesPerBurst;
}
@@ -563,7 +568,7 @@
* @param numFrames
* @return original pointer or the conversion buffer
*/
- virtual const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+ virtual const void * maybeConvertDeviceData(const void *audioData, int32_t /*numFrames*/) {
return audioData;
}
@@ -633,6 +638,7 @@
// These do not change after open().
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index e015592..a8fd0d9 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -53,16 +53,10 @@
/*
* AudioStreamBuilder
*/
-AudioStreamBuilder::AudioStreamBuilder() {
-}
-
-AudioStreamBuilder::~AudioStreamBuilder() {
-}
-
static aaudio_result_t builder_createStream(aaudio_direction_t direction,
- aaudio_sharing_mode_t sharingMode,
- bool tryMMap,
- android::sp<AudioStream> &stream) {
+ aaudio_sharing_mode_t /*sharingMode*/,
+ bool tryMMap,
+ android::sp<AudioStream> &stream) {
aaudio_result_t result = AAUDIO_OK;
switch (direction) {
@@ -268,8 +262,8 @@
void AudioStreamBuilder::logParameters() const {
// This is very helpful for debugging in the future. Please leave it in.
- ALOGI("rate = %6d, channels = %d, format = %d, sharing = %s, dir = %s",
- getSampleRate(), getSamplesPerFrame(), getFormat(),
+ ALOGI("rate = %6d, channels = %d, channelMask = %#x, format = %d, sharing = %s, dir = %s",
+ getSampleRate(), getSamplesPerFrame(), getChannelMask(), getFormat(),
AAudio_convertSharingModeToShortText(getSharingMode()),
AAudio_convertDirectionToText(getDirection()));
ALOGI("device = %6d, sessionId = %d, perfMode = %d, callback: %s with frames = %d",
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 9f93341..f91c25a 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -31,9 +31,9 @@
*/
class AudioStreamBuilder : public AAudioStreamParameters {
public:
- AudioStreamBuilder();
+ AudioStreamBuilder() = default;
- ~AudioStreamBuilder();
+ ~AudioStreamBuilder() = default;
bool isSharingModeMatchRequired() const {
return mSharingModeMatchRequired;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 37548f0..7b0aca1 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -38,7 +38,7 @@
class FifoBuffer {
public:
- FifoBuffer(int32_t bytesPerFrame);
+ explicit FifoBuffer(int32_t bytesPerFrame);
virtual ~FifoBuffer() = default;
@@ -162,6 +162,6 @@
uint8_t *mExternalStorage = nullptr;
};
-} // android
+} // namespace android
#endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
index 057a94e..e15d444 100644
--- a/media/libaaudio/src/fifo/FifoController.h
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -36,7 +36,7 @@
, mWriteCounter(0)
{}
- virtual ~FifoController() {}
+ virtual ~FifoController() = default;
// TODO review use of memory barriers, probably incorrect
virtual fifo_counter_t getReadCounter() override {
@@ -57,6 +57,6 @@
std::atomic<fifo_counter_t> mWriteCounter;
};
-} // android
+} // namespace android
#endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
index 1dece0e..ad6d041 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.cpp
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -29,9 +29,6 @@
{
}
-FifoControllerBase::~FifoControllerBase() {
-}
-
fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
fifo_frames_t temp = 0;
__builtin_sub_overflow(getWriteCounter(), getReadCounter(), &temp);
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
index 1edb8a3..2a6173b 100644
--- a/media/libaaudio/src/fifo/FifoControllerBase.h
+++ b/media/libaaudio/src/fifo/FifoControllerBase.h
@@ -43,7 +43,7 @@
*/
FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
- virtual ~FifoControllerBase();
+ virtual ~FifoControllerBase() = default;
// Abstract methods to be implemented in subclasses.
/**
@@ -123,6 +123,6 @@
fifo_frames_t mThreshold;
};
-} // android
+} // namespace android
#endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
index ec48e57..a59225a 100644
--- a/media/libaaudio/src/fifo/FifoControllerIndirect.h
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -44,7 +44,7 @@
setReadCounter(0);
setWriteCounter(0);
}
- virtual ~FifoControllerIndirect() {};
+ virtual ~FifoControllerIndirect() = default;
// TODO review use of memory barriers, probably incorrect
virtual fifo_counter_t getReadCounter() override {
@@ -68,6 +68,6 @@
std::atomic<fifo_counter_t> * mWriteCounterAddress;
};
-} // android
+} // namespace android
#endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp b/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
index 5667fdb..d8ffd00 100644
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
+++ b/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
@@ -34,8 +34,7 @@
int32_t samplesPerFrame,
int32_t framesPerBlock)
: AudioPort(parent, samplesPerFrame)
- , mFramesPerBlock(framesPerBlock)
- , mSampleBlock(NULL) {
+ , mFramesPerBlock(framesPerBlock) {
int32_t numFloats = framesPerBlock * getSamplesPerFrame();
mSampleBlock = new float[numFloats]{0.0f};
}
@@ -61,13 +60,13 @@
/***************************************************************************/
int32_t AudioFloatInputPort::pullData(int64_t framePosition, int32_t numFrames) {
- return (mConnected == NULL)
+ return (mConnected == nullptr)
? std::min(getFramesPerBlock(), numFrames)
: mConnected->pullData(framePosition, numFrames);
}
float *AudioFloatInputPort::getBlock() {
- if (mConnected == NULL) {
+ if (mConnected == nullptr) {
return AudioFloatBlockPort::getBlock(); // loaded using setValue()
} else {
return mConnected->getBlock();
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
index 78aad52..c6fcac6 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
@@ -26,8 +26,6 @@
, output(*this, channelCount) {
}
-MonoToMultiConverter::~MonoToMultiConverter() { }
-
int32_t MonoToMultiConverter::onProcess(int64_t framePosition, int32_t numFrames) {
int32_t framesToProcess = input.pullData(framePosition, numFrames);
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
index 34d53c7..5058ae0 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
@@ -29,7 +29,7 @@
public:
explicit MonoToMultiConverter(int32_t channelCount);
- virtual ~MonoToMultiConverter();
+ virtual ~MonoToMultiConverter() = default;
int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
diff --git a/media/libaaudio/src/flowgraph/RampLinear.cpp b/media/libaaudio/src/flowgraph/RampLinear.cpp
index a260828..0cc32e5 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.cpp
+++ b/media/libaaudio/src/flowgraph/RampLinear.cpp
@@ -37,6 +37,10 @@
void RampLinear::setTarget(float target) {
mTarget.store(target);
+ // If the ramp has not been used then start immediately at this level.
+ if (mLastFramePosition < 0) {
+ forceCurrent(target);
+ }
}
float RampLinear::interpolateCurrent() {
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.cpp b/media/libaaudio/src/flowgraph/SourceFloat.cpp
index 4bb674f..5b3a51e 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SourceFloat.cpp
@@ -25,7 +25,7 @@
: AudioSource(channelCount) {
}
-int32_t SourceFloat::onProcess(int64_t framePosition, int32_t numFrames) {
+int32_t SourceFloat::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
float *outputBuffer = output.getBlock();
int32_t channelCount = output.getSamplesPerFrame();
diff --git a/media/libaaudio/src/flowgraph/SourceI16.cpp b/media/libaaudio/src/flowgraph/SourceI16.cpp
index c3fcec2..a645cc2 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI16.cpp
@@ -30,7 +30,7 @@
: AudioSource(channelCount) {
}
-int32_t SourceI16::onProcess(int64_t framePosition, int32_t numFrames) {
+int32_t SourceI16::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
float *floatData = output.getBlock();
int32_t channelCount = output.getSamplesPerFrame();
diff --git a/media/libaaudio/src/flowgraph/SourceI24.cpp b/media/libaaudio/src/flowgraph/SourceI24.cpp
index 097954e..50fb98e 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI24.cpp
@@ -32,7 +32,7 @@
: AudioSource(channelCount) {
}
-int32_t SourceI24::onProcess(int64_t framePosition, int32_t numFrames) {
+int32_t SourceI24::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
float *floatData = output.getBlock();
int32_t channelCount = output.getSamplesPerFrame();
diff --git a/media/libaaudio/src/flowgraph/SourceI32.cpp b/media/libaaudio/src/flowgraph/SourceI32.cpp
index e8177ad..95bfd8f 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI32.cpp
@@ -30,7 +30,7 @@
: AudioSource(channelCount) {
}
-int32_t SourceI32::onProcess(int64_t framePosition, int32_t numFrames) {
+int32_t SourceI32::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
float *floatData = output.getBlock();
int32_t channelCount = output.getSamplesPerFrame();
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index e96e134..0f24771 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -37,9 +37,6 @@
: AudioStream() {
}
-AudioStreamLegacy::~AudioStreamLegacy() {
-}
-
// Called from AudioTrack.cpp or AudioRecord.cpp
static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 88ef270..d9ba990 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -60,7 +60,7 @@
public:
AudioStreamLegacy();
- virtual ~AudioStreamLegacy();
+ virtual ~AudioStreamLegacy() = default;
aaudio_legacy_callback_t getLegacyCallback();
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index dc66742..12771cc 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -65,11 +65,8 @@
const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
// TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
- audio_channel_in_mask_from_count(samplesPerFrame) :
- audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+ audio_channel_mask_t channelMask =
+ AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), true /*isInput*/);
size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
: builder.getBufferCapacity();
@@ -115,7 +112,7 @@
constexpr int32_t kMostLikelySampleRateForFast = 48000;
if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
&& perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
- && (samplesPerFrame <= 2) // FAST only for mono and stereo
+ && (audio_channel_count_from_in_mask(channelMask) <= 2) // FAST only for mono and stereo
&& (getSampleRate() == kMostLikelySampleRateForFast
|| getSampleRate() == AAUDIO_UNSPECIFIED)) {
setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
@@ -228,7 +225,9 @@
.set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(requestedFormat).c_str()).record();
// Get the actual values from the AudioRecord.
- setSamplesPerFrame(mAudioRecord->channelCount());
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ mAudioRecord->channelMask(), true /*isInput*/,
+ AAudio_isChannelIndexMask(getChannelMask())));
setSampleRate(mAudioRecord->getSampleRate());
setBufferCapacity(getBufferCapacityFromDevice());
setFramesPerBurst(getFramesPerBurstFromDevice());
@@ -505,7 +504,7 @@
return (aaudio_result_t) framesRead;
}
-aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
+aaudio_result_t AudioStreamRecord::setBufferSize(int32_t /*requestedFrames*/)
{
return getBufferSize();
}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1d412c0..118c004 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -66,13 +66,8 @@
const aaudio_session_id_t requestedSessionId = builder.getSessionId();
const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
- // Try to create an AudioTrack
- // Use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
- audio_channel_out_mask_from_count(samplesPerFrame) :
- audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+ audio_channel_mask_t channelMask =
+ AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), false /*isInput*/);
audio_output_flags_t flags;
aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -162,11 +157,11 @@
callback,
callbackData,
notificationFrames,
- 0, // DEFAULT sharedBuffer*/,
+ nullptr, // DEFAULT sharedBuffer*/,
false, // DEFAULT threadCanCallJava
sessionId,
streamTransferType,
- NULL, // DEFAULT audio_offload_info_t
+ nullptr, // DEFAULT audio_offload_info_t
AttributionSourceState(), // DEFAULT uid and pid
&attributes,
// WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
@@ -199,7 +194,9 @@
doSetVolume();
// Get the actual values from the AudioTrack.
- setSamplesPerFrame(mAudioTrack->channelCount());
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ mAudioTrack->channelMask(), false /*isInput*/,
+ AAudio_isChannelIndexMask(getChannelMask())));
setFormat(mAudioTrack->format());
setDeviceFormat(mAudioTrack->format());
setSampleRate(mAudioTrack->getSampleRate());
diff --git a/media/libaaudio/src/libaaudio.map.txt b/media/libaaudio/src/libaaudio.map.txt
index 1dd44d1..8fa9e38 100644
--- a/media/libaaudio/src/libaaudio.map.txt
+++ b/media/libaaudio/src/libaaudio.map.txt
@@ -25,6 +25,7 @@
AAudioStreamBuilder_setPrivacySensitive; # introduced=30
AAudioStreamBuilder_setPackageName; # introduced=31
AAudioStreamBuilder_setAttributionTag; # introduced=31
+ AAudioStreamBuilder_setChannelMask; # introduced=32
AAudioStreamBuilder_openStream;
AAudioStreamBuilder_delete;
AAudioStream_close;
@@ -61,6 +62,7 @@
AAudioStream_isMMapUsed;
AAudioStream_isPrivacySensitive; # introduced=30
AAudioStream_release; # introduced=30
+ AAudioStream_getChannelMask; # introduced=32
local:
*;
};
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index d795725..d829934 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -256,6 +256,248 @@
return privacySensitive ? AUDIO_FLAG_CAPTURE_PRIVATE : AUDIO_FLAG_NONE;
}
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+ aaudio_channel_mask_t channelMask, bool isInput) {
+ if (isInput) {
+ switch (channelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ return AUDIO_CHANNEL_IN_MONO;
+ case AAUDIO_CHANNEL_STEREO:
+ return AUDIO_CHANNEL_IN_STEREO;
+ case AAUDIO_CHANNEL_FRONT_BACK:
+ return AUDIO_CHANNEL_IN_FRONT_BACK;
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ return AUDIO_CHANNEL_IN_2POINT0POINT2;
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ return AUDIO_CHANNEL_IN_2POINT1POINT2;
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ return AUDIO_CHANNEL_IN_3POINT0POINT2;
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ return AUDIO_CHANNEL_IN_3POINT1POINT2;
+ case AAUDIO_CHANNEL_5POINT1:
+ return AUDIO_CHANNEL_IN_5POINT1;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ } else {
+ switch (channelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ return AUDIO_CHANNEL_OUT_MONO;
+ case AAUDIO_CHANNEL_STEREO:
+ return AUDIO_CHANNEL_OUT_STEREO;
+ case AAUDIO_CHANNEL_2POINT1:
+ return AUDIO_CHANNEL_OUT_2POINT1;
+ case AAUDIO_CHANNEL_TRI:
+ return AUDIO_CHANNEL_OUT_TRI;
+ case AAUDIO_CHANNEL_TRI_BACK:
+ return AUDIO_CHANNEL_OUT_TRI_BACK;
+ case AAUDIO_CHANNEL_3POINT1:
+ return AUDIO_CHANNEL_OUT_3POINT1;
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ return AUDIO_CHANNEL_OUT_2POINT0POINT2;
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_2POINT1POINT2;
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ return AUDIO_CHANNEL_OUT_3POINT0POINT2;
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_3POINT1POINT2;
+ case AAUDIO_CHANNEL_QUAD:
+ return AUDIO_CHANNEL_OUT_QUAD;
+ case AAUDIO_CHANNEL_QUAD_SIDE:
+ return AUDIO_CHANNEL_OUT_QUAD_SIDE;
+ case AAUDIO_CHANNEL_SURROUND:
+ return AUDIO_CHANNEL_OUT_SURROUND;
+ case AAUDIO_CHANNEL_PENTA:
+ return AUDIO_CHANNEL_OUT_PENTA;
+ case AAUDIO_CHANNEL_5POINT1:
+ return AUDIO_CHANNEL_OUT_5POINT1;
+ case AAUDIO_CHANNEL_5POINT1_SIDE:
+ return AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+ case AAUDIO_CHANNEL_5POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_5POINT1POINT2;
+ case AAUDIO_CHANNEL_5POINT1POINT4:
+ return AUDIO_CHANNEL_OUT_5POINT1POINT4;
+ case AAUDIO_CHANNEL_6POINT1:
+ return AUDIO_CHANNEL_OUT_6POINT1;
+ case AAUDIO_CHANNEL_7POINT1:
+ return AUDIO_CHANNEL_OUT_7POINT1;
+ case AAUDIO_CHANNEL_7POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_7POINT1POINT2;
+ case AAUDIO_CHANNEL_7POINT1POINT4:
+ return AUDIO_CHANNEL_OUT_7POINT1POINT4;
+ // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+ // case AAUDIO_CHANNEL_9POINT1POINT4:
+ // return AUDIO_CHANNEL_OUT_9POINT1POINT4;
+ // case AAUDIO_CHANNEL_9POINT1POINT6:
+ // return AUDIO_CHANNEL_OUT_9POINT1POINT6;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ }
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+ audio_channel_mask_t channelMask, bool isInput) {
+ if (isInput) {
+ switch (channelMask) {
+ case AUDIO_CHANNEL_IN_MONO:
+ return AAUDIO_CHANNEL_MONO;
+ case AUDIO_CHANNEL_IN_STEREO:
+ return AAUDIO_CHANNEL_STEREO;
+ case AUDIO_CHANNEL_IN_FRONT_BACK:
+ return AAUDIO_CHANNEL_FRONT_BACK;
+ case AUDIO_CHANNEL_IN_2POINT0POINT2:
+ return AAUDIO_CHANNEL_2POINT0POINT2;
+ case AUDIO_CHANNEL_IN_2POINT1POINT2:
+ return AAUDIO_CHANNEL_2POINT1POINT2;
+ case AUDIO_CHANNEL_IN_3POINT0POINT2:
+ return AAUDIO_CHANNEL_3POINT0POINT2;
+ case AUDIO_CHANNEL_IN_3POINT1POINT2:
+ return AAUDIO_CHANNEL_3POINT1POINT2;
+ case AUDIO_CHANNEL_IN_5POINT1:
+ return AAUDIO_CHANNEL_5POINT1;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ } else {
+ switch (channelMask) {
+ case AUDIO_CHANNEL_OUT_MONO:
+ return AAUDIO_CHANNEL_MONO;
+ case AUDIO_CHANNEL_OUT_STEREO:
+ return AAUDIO_CHANNEL_STEREO;
+ case AUDIO_CHANNEL_OUT_2POINT1:
+ return AAUDIO_CHANNEL_2POINT1;
+ case AUDIO_CHANNEL_OUT_TRI:
+ return AAUDIO_CHANNEL_TRI;
+ case AUDIO_CHANNEL_OUT_TRI_BACK:
+ return AAUDIO_CHANNEL_TRI_BACK;
+ case AUDIO_CHANNEL_OUT_3POINT1:
+ return AAUDIO_CHANNEL_3POINT1;
+ case AUDIO_CHANNEL_OUT_2POINT0POINT2:
+ return AAUDIO_CHANNEL_2POINT0POINT2;
+ case AUDIO_CHANNEL_OUT_2POINT1POINT2:
+ return AAUDIO_CHANNEL_2POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_3POINT0POINT2:
+ return AAUDIO_CHANNEL_3POINT0POINT2;
+ case AUDIO_CHANNEL_OUT_3POINT1POINT2:
+ return AAUDIO_CHANNEL_3POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_QUAD:
+ return AAUDIO_CHANNEL_QUAD;
+ case AUDIO_CHANNEL_OUT_QUAD_SIDE:
+ return AAUDIO_CHANNEL_QUAD_SIDE;
+ case AUDIO_CHANNEL_OUT_SURROUND:
+ return AAUDIO_CHANNEL_SURROUND;
+ case AUDIO_CHANNEL_OUT_PENTA:
+ return AAUDIO_CHANNEL_PENTA;
+ case AUDIO_CHANNEL_OUT_5POINT1:
+ return AAUDIO_CHANNEL_5POINT1;
+ case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
+ return AAUDIO_CHANNEL_5POINT1_SIDE;
+ case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+ return AAUDIO_CHANNEL_5POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+ return AAUDIO_CHANNEL_5POINT1POINT4;
+ case AUDIO_CHANNEL_OUT_6POINT1:
+ return AAUDIO_CHANNEL_6POINT1;
+ case AUDIO_CHANNEL_OUT_7POINT1:
+ return AAUDIO_CHANNEL_7POINT1;
+ case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+ return AAUDIO_CHANNEL_7POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+ return AAUDIO_CHANNEL_7POINT1POINT4;
+ // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+ // case AUDIO_CHANNEL_OUT_9POINT1POINT4:
+ // return AAUDIO_CHANNEL_9POINT1POINT4;
+ // case AUDIO_CHANNEL_OUT_9POINT1POINT6:
+ // return AAUDIO_CHANNEL_9POINT1POINT6;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ }
+}
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask) {
+ return __builtin_popcount(channelMask & ~AAUDIO_CHANNEL_BIT_INDEX);
+}
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount) {
+ if (channelCount < 0 || channelCount > AUDIO_CHANNEL_COUNT_MAX) {
+ return AAUDIO_CHANNEL_INVALID;
+ }
+
+ if (channelCount == 0) {
+ return AAUDIO_UNSPECIFIED;
+ }
+
+ // Return index mask if the channel count is greater than 2.
+ return AAUDIO_CHANNEL_BIT_INDEX | ((1 << channelCount) - 1);
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+ audio_channel_mask_t channelMask) {
+ if (audio_channel_mask_get_representation(channelMask) != AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ ALOGE("%s() %#x not an index mask", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ return (channelMask & ~AUDIO_CHANNEL_INDEX_HDR) | AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+ aaudio_channel_mask_t channelMask) {
+ if (!AAudio_isChannelIndexMask(channelMask)) {
+ ALOGE("%s() %#x not an index mask", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ return audio_channel_mask_for_index_assignment_from_count(
+ AAudioConvert_channelMaskToCount(channelMask));
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+ audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired) {
+ if (audio_channel_mask_get_representation(channelMask) == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ return AAudioConvert_androidToAAudioChannelIndexMask(channelMask);
+ }
+ if (indexMaskRequired) {
+ // Require index mask, `channelMask` here is a position mask.
+ const int channelCount = isInput ? audio_channel_count_from_in_mask(channelMask)
+ : audio_channel_count_from_out_mask(channelMask);
+ return AAudioConvert_channelCountToMask(channelCount);
+ }
+ return AAudioConvert_androidToAAudioChannelLayoutMask(channelMask, isInput);
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+ aaudio_channel_mask_t channelMask, bool isInput) {
+ return AAudio_isChannelIndexMask(channelMask)
+ ? AAudioConvert_aaudioToAndroidChannelIndexMask(channelMask)
+ : AAudioConvert_aaudioToAndroidChannelLayoutMask(channelMask, isInput);
+}
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask) {
+ return (channelMask & AAUDIO_CHANNEL_BIT_INDEX) == AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+ aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput) {
+ if (channelMask != AAUDIO_UNSPECIFIED) {
+ if (AAudio_isChannelIndexMask(channelMask) && samplesPerFrame <= 2) {
+ // When it is index mask and the count is less than 3, use position mask
+ // instead of index mask for opening a stream. This may need to be revisited
+ // when making channel index mask public.
+ return isInput ? audio_channel_in_mask_from_count(samplesPerFrame)
+ : audio_channel_out_mask_from_count(samplesPerFrame);
+ }
+ return AAudioConvert_aaudioToAndroidChannelMask(channelMask, isInput);
+ }
+
+ // Return stereo when unspecified.
+ return isInput ? AUDIO_CHANNEL_IN_STEREO : AUDIO_CHANNEL_OUT_STEREO;
+}
+
int32_t AAudioConvert_framesToBytes(int32_t numFrames,
int32_t bytesPerFrame,
int32_t *sizeInBytes) {
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 82eb77d..52e57da 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -96,6 +96,33 @@
audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
bool privacySensitive);
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+ aaudio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+ audio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+ audio_channel_mask_t channelMask);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+ aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+ audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+ aaudio_channel_mask_t channelMask, bool isInput);
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask);
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount);
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+ aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput);
+
// Note that this code may be replaced by Settings or by some other system configuration tool.
/**
@@ -198,7 +225,7 @@
* @return true if f() eventually returns true.
*/
static inline bool AAudio_tryUntilTrue(
- std::function<bool()> f, int times, int sleepMs) {
+ const std::function<bool()>& f, int times, int sleepMs) {
static const useconds_t US_PER_MS = 1000;
sleepMs = std::max(sleepMs, 0);
@@ -270,9 +297,7 @@
class Timestamp {
public:
- Timestamp()
- : mPosition(0)
- , mNanoseconds(0) {}
+ Timestamp() = default;
Timestamp(int64_t position, int64_t nanoseconds)
: mPosition(position)
, mNanoseconds(nanoseconds) {}
@@ -283,8 +308,8 @@
private:
// These cannot be const because we need to implement the copy assignment operator.
- int64_t mPosition;
- int64_t mNanoseconds;
+ int64_t mPosition{0};
+ int64_t mNanoseconds{0};
};
@@ -318,4 +343,36 @@
std::atomic<int> mRequested{0};
std::atomic<int> mAcknowledged{0};
};
+
+enum {
+ /**
+ * Audio channel index mask, only used internally.
+ */
+ AAUDIO_CHANNEL_BIT_INDEX = 0x80000000,
+ AAUDIO_CHANNEL_INDEX_MASK_1 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 1) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_2 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 2) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_3 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 3) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_4 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 4) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_5 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 5) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_6 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 6) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_7 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 7) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_8 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 8) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_9 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 9) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_10 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 10) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_11 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 11) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_12 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 12) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_13 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 13) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_14 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 14) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_15 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 15) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_16 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 16) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_17 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 17) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_18 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 18) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_19 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 19) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_20 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 20) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_21 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 21) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_22 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 22) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_23 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 23) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_24 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 24) - 1,
+};
+
#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
index 4dc7e68..290e473 100644
--- a/media/libaaudio/src/utility/FixedBlockAdapter.h
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -35,7 +35,7 @@
class FixedBlockAdapter
{
public:
- FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+ explicit FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
: mFixedBlockProcessor(fixedBlockProcessor) {}
virtual ~FixedBlockAdapter() = default;
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
index 128dd52..dc82416 100644
--- a/media/libaaudio/src/utility/FixedBlockReader.h
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -30,7 +30,7 @@
class FixedBlockReader : public FixedBlockAdapter
{
public:
- FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+ explicit FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
virtual ~FixedBlockReader() = default;
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
index f1d917c..3e89b5d 100644
--- a/media/libaaudio/src/utility/FixedBlockWriter.h
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -28,7 +28,7 @@
class FixedBlockWriter : public FixedBlockAdapter
{
public:
- FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+ explicit FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
virtual ~FixedBlockWriter() = default;
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 63add4e..313ccbd 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -30,8 +30,8 @@
class MonotonicCounter {
public:
- MonotonicCounter() {};
- virtual ~MonotonicCounter() {};
+ MonotonicCounter() = default;
+ virtual ~MonotonicCounter() = default;
/**
* @return current value of the counter
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index d563a7e..611cbf7 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -76,31 +76,40 @@
}
TEST(test_flowgraph, module_ramp_linear) {
+ constexpr int singleNumOutput = 1;
constexpr int rampSize = 5;
constexpr int numOutput = 100;
constexpr float value = 1.0f;
- constexpr float target = 100.0f;
+ constexpr float initialTarget = 10.0f;
+ constexpr float finalTarget = 100.0f;
+ constexpr float tolerance = 0.0001f; // arbitrary
float output[numOutput] = {};
RampLinear rampLinear{1};
SinkFloat sinkFloat{1};
rampLinear.input.setValue(value);
rampLinear.setLengthInFrames(rampSize);
- rampLinear.setTarget(target);
- rampLinear.forceCurrent(0.0f);
-
rampLinear.output.connect(&sinkFloat.input);
+ // Check that the values go to the initial target instantly.
+ rampLinear.setTarget(initialTarget);
+ int32_t singleNumRead = sinkFloat.read(output, singleNumOutput);
+ ASSERT_EQ(singleNumRead, singleNumOutput);
+ EXPECT_NEAR(value * initialTarget, output[0], tolerance);
+
+ // Now set target and check that the linear ramp works as expected.
+ rampLinear.setTarget(finalTarget);
int32_t numRead = sinkFloat.read(output, numOutput);
+ const float incrementSize = (finalTarget - initialTarget) / rampSize;
ASSERT_EQ(numOutput, numRead);
- constexpr float tolerance = 0.0001f; // arbitrary
+
int i = 0;
for (; i < rampSize; i++) {
- float expected = i * value * target / rampSize;
+ float expected = value * (initialTarget + i * incrementSize);
EXPECT_NEAR(expected, output[i], tolerance);
}
for (; i < numOutput; i++) {
- float expected = value * target;
+ float expected = value * finalTarget;
EXPECT_NEAR(expected, output[i], tolerance);
}
}
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 321e7f9..4c155fe 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -14,6 +14,11 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
#define LOG_TAG "AidlConversion"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -21,6 +26,7 @@
#include "media/AidlConversion.h"
#include <media/ShmemCompat.h>
+#include <media/stagefright/foundation/MediaDefs.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
// Utilities
@@ -273,21 +279,7 @@
enumToMask_index<int32_t, media::AudioPortConfigType>);
}
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl) {
- // TODO(ytai): should we convert bit-by-bit?
- // One problem here is that the representation is both opaque and is different based on the
- // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
- return convertReinterpret<audio_channel_mask_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy) {
- // TODO(ytai): should we convert bit-by-bit?
- // One problem here is that the representation is both opaque and is different based on the
- // context (input vs. output). Can determine based on type and role, as per useInChannelMask().
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
media::AudioIoConfigEvent aidl) {
switch (aidl) {
case media::AudioIoConfigEvent::OUTPUT_REGISTERED:
@@ -312,8 +304,8 @@
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
- audio_io_config_event legacy) {
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+ audio_io_config_event_t legacy) {
switch (legacy) {
case AUDIO_OUTPUT_REGISTERED:
return media::AudioIoConfigEvent::OUTPUT_REGISTERED;
@@ -393,18 +385,836 @@
return unexpected(BAD_VALUE);
}
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
- media::audio::common::AudioFormat aidl) {
- // This relies on AudioFormat being kept in sync with audio_format_t.
- static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
- return static_cast<audio_format_t>(aidl);
+namespace {
+
+namespace detail {
+using AudioChannelPair = std::pair<audio_channel_mask_t, media::AudioChannelLayout>;
+using AudioChannelPairs = std::vector<AudioChannelPair>;
+using AudioDevicePair = std::pair<audio_devices_t, media::AudioDeviceDescription>;
+using AudioDevicePairs = std::vector<AudioDevicePair>;
+using AudioFormatPair = std::pair<audio_format_t, media::AudioFormatDescription>;
+using AudioFormatPairs = std::vector<AudioFormatPair>;
}
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
+const detail::AudioChannelPairs& getIndexAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_INDEX_MASK(n) \
+ { \
+ AUDIO_CHANNEL_INDEX_MASK_##n, \
+ media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>( \
+ media::AudioChannelLayout::INDEX_MASK_##n) \
+ }
+
+ DEFINE_INDEX_MASK(1),
+ DEFINE_INDEX_MASK(2),
+ DEFINE_INDEX_MASK(3),
+ DEFINE_INDEX_MASK(4),
+ DEFINE_INDEX_MASK(5),
+ DEFINE_INDEX_MASK(6),
+ DEFINE_INDEX_MASK(7),
+ DEFINE_INDEX_MASK(8),
+ DEFINE_INDEX_MASK(9),
+ DEFINE_INDEX_MASK(10),
+ DEFINE_INDEX_MASK(11),
+ DEFINE_INDEX_MASK(12),
+ DEFINE_INDEX_MASK(13),
+ DEFINE_INDEX_MASK(14),
+ DEFINE_INDEX_MASK(15),
+ DEFINE_INDEX_MASK(16),
+ DEFINE_INDEX_MASK(17),
+ DEFINE_INDEX_MASK(18),
+ DEFINE_INDEX_MASK(19),
+ DEFINE_INDEX_MASK(20),
+ DEFINE_INDEX_MASK(21),
+ DEFINE_INDEX_MASK(22),
+ DEFINE_INDEX_MASK(23),
+ DEFINE_INDEX_MASK(24)
+#undef DEFINE_INDEX_MASK
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getInAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_INPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_##n, \
+ media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
+ media::AudioChannelLayout::LAYOUT_##n) \
+ }
+
+ DEFINE_INPUT_LAYOUT(MONO),
+ DEFINE_INPUT_LAYOUT(STEREO),
+ DEFINE_INPUT_LAYOUT(FRONT_BACK),
+ // AUDIO_CHANNEL_IN_6 not supported
+ DEFINE_INPUT_LAYOUT(2POINT0POINT2),
+ DEFINE_INPUT_LAYOUT(2POINT1POINT2),
+ DEFINE_INPUT_LAYOUT(3POINT0POINT2),
+ DEFINE_INPUT_LAYOUT(3POINT1POINT2),
+ DEFINE_INPUT_LAYOUT(5POINT1)
+#undef DEFINE_INPUT_LAYOUT
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getOutAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_OUTPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_OUT_##n, \
+ media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
+ media::AudioChannelLayout::LAYOUT_##n) \
+ }
+
+ DEFINE_OUTPUT_LAYOUT(MONO),
+ DEFINE_OUTPUT_LAYOUT(STEREO),
+ DEFINE_OUTPUT_LAYOUT(2POINT1),
+ DEFINE_OUTPUT_LAYOUT(TRI),
+ DEFINE_OUTPUT_LAYOUT(TRI_BACK),
+ DEFINE_OUTPUT_LAYOUT(3POINT1),
+ DEFINE_OUTPUT_LAYOUT(2POINT0POINT2),
+ DEFINE_OUTPUT_LAYOUT(2POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(3POINT0POINT2),
+ DEFINE_OUTPUT_LAYOUT(3POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(QUAD),
+ DEFINE_OUTPUT_LAYOUT(QUAD_SIDE),
+ DEFINE_OUTPUT_LAYOUT(SURROUND),
+ DEFINE_OUTPUT_LAYOUT(PENTA),
+ DEFINE_OUTPUT_LAYOUT(5POINT1),
+ DEFINE_OUTPUT_LAYOUT(5POINT1_SIDE),
+ DEFINE_OUTPUT_LAYOUT(5POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(5POINT1POINT4),
+ DEFINE_OUTPUT_LAYOUT(6POINT1),
+ DEFINE_OUTPUT_LAYOUT(7POINT1),
+ DEFINE_OUTPUT_LAYOUT(7POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(7POINT1POINT4),
+ DEFINE_OUTPUT_LAYOUT(13POINT_360RA),
+ DEFINE_OUTPUT_LAYOUT(22POINT2),
+ DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_A),
+ DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_A),
+ DEFINE_OUTPUT_LAYOUT(HAPTIC_AB),
+ DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_AB),
+ DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_AB)
+#undef DEFINE_OUTPUT_LAYOUT
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_VOICE_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_VOICE_##n, \
+ media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>( \
+ media::AudioChannelLayout::VOICE_##n) \
+ }
+ DEFINE_VOICE_LAYOUT(UPLINK_MONO),
+ DEFINE_VOICE_LAYOUT(DNLINK_MONO),
+ DEFINE_VOICE_LAYOUT(CALL_MONO)
+#undef DEFINE_VOICE_LAYOUT
+ };
+ return pairs;
+}
+
+media::AudioDeviceDescription make_AudioDeviceDescription(media::AudioDeviceType type,
+ const std::string& connection = "") {
+ media::AudioDeviceDescription result;
+ result.type = type;
+ result.connection = connection;
+ return result;
+}
+
+void append_AudioDeviceDescription(detail::AudioDevicePairs& pairs,
+ audio_devices_t inputType, audio_devices_t outputType,
+ media::AudioDeviceType inType, media::AudioDeviceType outType,
+ const std::string& connection = "") {
+ pairs.push_back(std::make_pair(inputType, make_AudioDeviceDescription(inType, connection)));
+ pairs.push_back(std::make_pair(outputType, make_AudioDeviceDescription(outType, connection)));
+}
+
+const detail::AudioDevicePairs& getAudioDevicePairs() {
+ static const detail::AudioDevicePairs pairs = []() {
+ detail::AudioDevicePairs pairs = {{
+ {
+ AUDIO_DEVICE_NONE, media::AudioDeviceDescription{}
+ },
+ {
+ AUDIO_DEVICE_OUT_EARPIECE, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_SPEAKER_EARPIECE)
+ },
+ {
+ AUDIO_DEVICE_OUT_SPEAKER, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_SPEAKER)
+ },
+ {
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_HEADPHONE,
+ media::AudioDeviceDescription::CONNECTION_ANALOG())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_BT_SCO())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_CARKIT,
+ media::AudioDeviceDescription::CONNECTION_BT_SCO())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_HEADPHONE,
+ media::AudioDeviceDescription::CONNECTION_BT_A2DP())
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_SPEAKER,
+ media::AudioDeviceDescription::CONNECTION_BT_A2DP())
+ },
+ {
+ AUDIO_DEVICE_OUT_TELEPHONY_TX, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_TELEPHONY_TX)
+ },
+ {
+ AUDIO_DEVICE_OUT_AUX_LINE, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_LINE_AUX)
+ },
+ {
+ AUDIO_DEVICE_OUT_SPEAKER_SAFE, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_SPEAKER_SAFE)
+ },
+ {
+ AUDIO_DEVICE_OUT_HEARING_AID, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_HEARING_AID,
+ media::AudioDeviceDescription::CONNECTION_WIRELESS())
+ },
+ {
+ AUDIO_DEVICE_OUT_ECHO_CANCELLER, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_ECHO_CANCELLER)
+ },
+ {
+ AUDIO_DEVICE_OUT_BLE_SPEAKER, make_AudioDeviceDescription(
+ media::AudioDeviceType::OUT_SPEAKER,
+ media::AudioDeviceDescription::CONNECTION_BT_LE())
+ },
+ // AUDIO_DEVICE_IN_AMBIENT and IN_COMMUNICATION are removed since they were deprecated.
+ {
+ AUDIO_DEVICE_IN_BUILTIN_MIC, make_AudioDeviceDescription(
+ media::AudioDeviceType::IN_MICROPHONE)
+ },
+ {
+ AUDIO_DEVICE_IN_BACK_MIC, make_AudioDeviceDescription(
+ media::AudioDeviceType::IN_MICROPHONE_BACK)
+ },
+ {
+ AUDIO_DEVICE_IN_TELEPHONY_RX, make_AudioDeviceDescription(
+ media::AudioDeviceType::IN_TELEPHONY_RX)
+ },
+ {
+ AUDIO_DEVICE_IN_TV_TUNER, make_AudioDeviceDescription(
+ media::AudioDeviceType::IN_TV_TUNER)
+ },
+ {
+ AUDIO_DEVICE_IN_LOOPBACK, make_AudioDeviceDescription(
+ media::AudioDeviceType::IN_LOOPBACK)
+ },
+ {
+ AUDIO_DEVICE_IN_BLUETOOTH_BLE, make_AudioDeviceDescription(
+ media::AudioDeviceType::IN_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_BT_LE())
+ },
+ {
+ AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
+ media::AudioDeviceType::IN_ECHO_REFERENCE)
+ }
+ }};
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_DEFAULT, AUDIO_DEVICE_OUT_DEFAULT,
+ media::AudioDeviceType::IN_DEFAULT, media::AudioDeviceType::OUT_DEFAULT);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADSET,
+ media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_ANALOG());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
+ media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_BT_SCO());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI, AUDIO_DEVICE_OUT_HDMI,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_HDMI());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ media::AudioDeviceType::IN_SUBMIX, media::AudioDeviceType::OUT_SUBMIX);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
+ media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_ANALOG_DOCK());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
+ media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_DIGITAL_DOCK());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_ACCESSORY,
+ media::AudioDeviceType::IN_ACCESSORY, media::AudioDeviceType::OUT_ACCESSORY,
+ media::AudioDeviceDescription::CONNECTION_USB());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_OUT_USB_DEVICE,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_USB());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_FM_TUNER, AUDIO_DEVICE_OUT_FM,
+ media::AudioDeviceType::IN_FM_TUNER, media::AudioDeviceType::OUT_FM);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_LINE, AUDIO_DEVICE_OUT_LINE,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_ANALOG());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_SPDIF, AUDIO_DEVICE_OUT_SPDIF,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_SPDIF());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_BT_A2DP());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_IP, AUDIO_DEVICE_OUT_IP,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_IP_V4());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_OUT_BUS,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_BUS());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_PROXY, AUDIO_DEVICE_OUT_PROXY,
+ media::AudioDeviceType::IN_AFE_PROXY, media::AudioDeviceType::OUT_AFE_PROXY);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
+ media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_USB());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_ARC,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_HDMI_ARC());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI_EARC, AUDIO_DEVICE_OUT_HDMI_EARC,
+ media::AudioDeviceType::IN_DEVICE, media::AudioDeviceType::OUT_DEVICE,
+ media::AudioDeviceDescription::CONNECTION_HDMI_EARC());
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_HEADSET,
+ media::AudioDeviceType::IN_HEADSET, media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_BT_LE());
+ return pairs;
+ }();
+ return pairs;
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
+ media::AudioFormatDescription result;
+ result.type = type;
+ return result;
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
+ auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+ result.pcm = pcm;
+ return result;
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ media::AudioFormatDescription result;
+ result.encoding = encoding;
+ return result;
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+ const std::string& encoding) {
+ auto result = make_AudioFormatDescription(encoding);
+ result.pcm = transport;
+ return result;
+}
+
+const detail::AudioFormatPairs& getAudioFormatPairs() {
+ static const detail::AudioFormatPairs pairs = {{
+ {
+ AUDIO_FORMAT_INVALID,
+ make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID)
+ },
+ {
+ AUDIO_FORMAT_DEFAULT, media::AudioFormatDescription{}
+ },
+ {
+ AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(media::PcmType::INT_16_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(media::PcmType::UINT_8_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(media::PcmType::INT_32_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(media::PcmType::FIXED_Q_8_24)
+ },
+ {
+ AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(media::PcmType::FLOAT_32_BIT)
+ },
+ {
+ AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(media::PcmType::INT_24_BIT)
+ },
+ {
+ // See the comment in MediaDefs.h.
+ AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
+ },
+ {
+ AUDIO_FORMAT_AMR_NB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_NB)
+ },
+ {
+ AUDIO_FORMAT_AMR_WB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB)
+ },
+ {
+ // Note: in MediaDefs.cpp MEDIA_MIMETYPE_AUDIO_AAC = "audio/mp4a-latm".
+ AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_FORMAT)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription("audio/aac.main")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription("audio/aac.lc")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription("audio/aac.ssr")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription("audio/aac.ltp")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription("audio/aac.he.v1")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_SCALABLE, make_AudioFormatDescription("audio/aac.scalable")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription("audio/aac.erlc")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription("audio/aac.ld")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription("audio/aac.he.v2")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription("audio/aac.eld")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription("audio/aac.xhe")
+ },
+ // AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
+ // ago.
+ {
+ AUDIO_FORMAT_VORBIS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_VORBIS)
+ },
+ {
+ AUDIO_FORMAT_OPUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_OPUS)
+ },
+ {
+ AUDIO_FORMAT_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC3)
+ },
+ {
+ AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_E_AC3_JOC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3_JOC)
+ },
+ {
+ AUDIO_FORMAT_DTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS)
+ },
+ {
+ AUDIO_FORMAT_DTS_HD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_HD)
+ },
+ // In the future, we would like to represent encapsulated bitstreams as
+ // nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
+ // specify the format of the encapsulated bitstream.
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_IEC61937,
+ make_AudioFormatDescription(media::PcmType::INT_16_BIT, "audio/x-iec61937")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_DOLBY_TRUEHD, make_AudioFormatDescription("audio/vnd.dolby.truehd")
+ },
+ {
+ AUDIO_FORMAT_EVRC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRC)
+ },
+ {
+ AUDIO_FORMAT_EVRCB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCB)
+ },
+ {
+ AUDIO_FORMAT_EVRCWB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCWB)
+ },
+ {
+ AUDIO_FORMAT_EVRCNW, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCNW)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription("audio/aac.adif")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_WMA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_WMA)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_WMA_PRO, make_AudioFormatDescription("audio/x-ms-wma.pro")
+ },
+ {
+ AUDIO_FORMAT_AMR_WB_PLUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MP2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)
+ },
+ {
+ AUDIO_FORMAT_QCELP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_QCELP)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_DSD, make_AudioFormatDescription("audio/vnd.sony.dsd")
+ },
+ {
+ AUDIO_FORMAT_FLAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_FLAC)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_ALAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_ALAC)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_MAIN, make_AudioFormatDescription("audio/aac-adts.main")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription("audio/aac-adts.lc")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_SSR, make_AudioFormatDescription("audio/aac-adts.ssr")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_LTP, make_AudioFormatDescription("audio/aac-adts.ltp")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_HE_V1, make_AudioFormatDescription("audio/aac-adts.he.v1")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_SCALABLE, make_AudioFormatDescription("audio/aac-adts.scalable")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_ERLC, make_AudioFormatDescription("audio/aac-adts.erlc")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription("audio/aac-adts.ld")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_HE_V2, make_AudioFormatDescription("audio/aac-adts.he.v2")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_ELD, make_AudioFormatDescription("audio/aac-adts.eld")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_ADTS_XHE, make_AudioFormatDescription("audio/aac-adts.xhe")
+ },
+ {
+ // Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
+ AUDIO_FORMAT_SBC, make_AudioFormatDescription("audio/x-sbc")
+ },
+ {
+ AUDIO_FORMAT_APTX, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_APTX)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")
+ },
+ {
+ // Note: not in the IANA registry. Matches MediaDefs.cpp.
+ AUDIO_FORMAT_AC4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC4)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT, make_AudioFormatDescription("audio/vnd.dolby.mat")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_1_0, make_AudioFormatDescription("audio/vnd.dolby.mat.1.0")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_2_0, make_AudioFormatDescription("audio/vnd.dolby.mat.2.0")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_2_1, make_AudioFormatDescription("audio/vnd.dolby.mat.2.1")
+ },
+ {
+ AUDIO_FORMAT_AAC_LATM, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC)
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription("audio/mp4a-latm.lc")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_LATM_HE_V1, make_AudioFormatDescription("audio/mp4a-latm.he.v1")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_AAC_LATM_HE_V2, make_AudioFormatDescription("audio/mp4a-latm.he.v2")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_CELT, make_AudioFormatDescription("audio/x-celt")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_ADAPTIVE, make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LHDC, make_AudioFormatDescription("audio/vnd.savitech.lhdc")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LHDC_LL, make_AudioFormatDescription("audio/vnd.savitech.lhdc.ll")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_TWSP, make_AudioFormatDescription("audio/vnd.qcom.aptx.twsp")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MPEGH, make_AudioFormatDescription("audio/x-mpegh")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription("audio/x-mpegh.bl.l3")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription("audio/x-mpegh.bl.l4")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription("audio/x-mpegh.lc.l3")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription("audio/x-mpegh.lc.l4")
+ },
+ {
+ // Note: not in the IANA registry.
+ AUDIO_FORMAT_IEC60958,
+ make_AudioFormatDescription(media::PcmType::INT_24_BIT, "audio/x-iec60958")
+ },
+ {
+ AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
+ },
+ {
+ AUDIO_FORMAT_DRA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DRA)
+ },
+ }};
+ return pairs;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(const std::vector<std::pair<S, T>>& v) {
+ std::unordered_map<S, T> result(v.begin(), v.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+ return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<S, T> make_DirectMap(
+ const std::vector<std::pair<S, T>>& v1, const std::vector<std::pair<S, T>>& v2) {
+ std::unordered_map<S, T> result(v1.begin(), v1.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v1.size(), "Duplicate key elements detected in v1");
+ result.insert(v2.begin(), v2.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v1.size() + v2.size(),
+ "Duplicate key elements detected in v1+v2");
+ return result;
+}
+
+template<typename S, typename T>
+std::unordered_map<T, S> make_ReverseMap(const std::vector<std::pair<S, T>>& v) {
+ std::unordered_map<T, S> result;
+ std::transform(v.begin(), v.end(), std::inserter(result, result.begin()),
+ [](const std::pair<S, T>& p) {
+ return std::make_pair(p.second, p.first);
+ });
+ LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+ return result;
+}
+
+} // namespace
+
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ const media::AudioChannelLayout& aidl, bool isInput) {
+ using ReverseMap = std::unordered_map<media::AudioChannelLayout, audio_channel_mask_t>;
+ using Tag = media::AudioChannelLayout::Tag;
+ static const ReverseMap mIdx = make_ReverseMap(getIndexAudioChannelPairs());
+ static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
+ static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
+ static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
+
+ auto convert = [](const media::AudioChannelLayout& aidl, const ReverseMap& m,
+ const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
+ aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+ };
+
+ switch (aidl.getTag()) {
+ case Tag::none:
+ return AUDIO_CHANNEL_NONE;
+ case Tag::invalid:
+ return AUDIO_CHANNEL_INVALID;
+ case Tag::indexMask:
+ return convert(aidl, mIdx, __func__, "index");
+ case Tag::layoutMask:
+ return convert(aidl, isInput ? mIn : mOut, __func__, isInput ? "input" : "output");
+ case Tag::voiceMask:
+ return convert(aidl, mVoice, __func__, "voice");
+ }
+ ALOGE("%s: unexpected tag value %d", __func__, aidl.getTag());
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ audio_channel_mask_t legacy, bool isInput) {
+ using DirectMap = std::unordered_map<audio_channel_mask_t, media::AudioChannelLayout>;
+ using Tag = media::AudioChannelLayout::Tag;
+ static const DirectMap mIdx = make_DirectMap(getIndexAudioChannelPairs());
+ static const DirectMap mInAndVoice = make_DirectMap(
+ getInAudioChannelPairs(), getVoiceAudioChannelPairs());
+ static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
+
+ auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
+ const char* func, const char* type) -> ConversionResult<media::AudioChannelLayout> {
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
+ func, type, legacy);
+ return unexpected(BAD_VALUE);
+ }
+ };
+
+ if (legacy == AUDIO_CHANNEL_NONE) {
+ return media::AudioChannelLayout{};
+ } else if (legacy == AUDIO_CHANNEL_INVALID) {
+ return media::AudioChannelLayout::make<Tag::invalid>(0);
+ }
+
+ const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
+ if (repr == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ return convert(legacy, mIdx, __func__, "index");
+ } else if (repr == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+ return convert(legacy, isInput ? mInAndVoice : mOut, __func__,
+ isInput ? "input / voice" : "output");
+ }
+
+ ALOGE("%s: unknown representation %d in audio_channel_mask_t value 0x%x",
+ __func__, repr, legacy);
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+ const media::AudioDeviceDescription& aidl) {
+ static const std::unordered_map<media::AudioDeviceDescription, audio_devices_t> m =
+ make_ReverseMap(getAudioDevicePairs());
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no legacy audio_devices_t found for %s", __func__, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<media::AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
+ audio_devices_t legacy) {
+ static const std::unordered_map<audio_devices_t, media::AudioDeviceDescription> m =
+ make_DirectMap(getAudioDevicePairs());
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no AudioDeviceDescription found for legacy audio_devices_t value 0x%x",
+ __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+ const media::AudioFormatDescription& aidl) {
+ static const std::unordered_map<media::AudioFormatDescription, audio_format_t> m =
+ make_ReverseMap(getAudioFormatPairs());
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no legacy audio_format_t found for %s", __func__, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
audio_format_t legacy) {
- // This relies on AudioFormat being kept in sync with audio_format_t.
- static_assert(sizeof(media::audio::common::AudioFormat) == sizeof(audio_format_t));
- return static_cast<media::audio::common::AudioFormat>(legacy);
+ static const std::unordered_map<audio_format_t, media::AudioFormatDescription> m =
+ make_DirectMap(getAudioFormatPairs());
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no AudioFormatDescription found for legacy audio_format_t value 0x%x",
+ __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
}
ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl) {
@@ -449,24 +1259,14 @@
enumToMask_index<int32_t, media::AudioGainMode>);
}
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl) {
- // TODO(ytai): bitfield?
- return convertReinterpret<audio_devices_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy) {
- // TODO(ytai): bitfield?
- return convertReinterpret<int32_t>(legacy);
-}
-
ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type) {
audio_gain_config legacy;
legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
- legacy.channel_mask =
- VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
+ legacy.channel_mask = VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
const bool isJoint = bitmaskIsSet(aidl.mode, media::AudioGainMode::JOINT);
size_t numValues = isJoint ? 1
: isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
@@ -486,9 +1286,9 @@
media::AudioGainConfig aidl;
aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
- aidl.channelMask =
- VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
const bool isJoint = (legacy.mode & AUDIO_GAIN_MODE_JOINT) != 0;
size_t numValues = isJoint ? 1
: isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
@@ -722,7 +1522,7 @@
const media::AudioPortConfigDeviceExt& aidl) {
audio_port_config_device_ext legacy;
legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
- legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
+ legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
RETURN_IF_ERROR(aidl2legacy_string(aidl.address, legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
return legacy;
}
@@ -732,7 +1532,7 @@
const audio_port_config_device_ext& legacy) {
media::AudioPortConfigDeviceExt aidl;
aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
+ aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.type));
aidl.address = VALUE_OR_RETURN(
legacy2aidl_string(legacy.address, AUDIO_DEVICE_MAX_ADDRESS_LEN));
return aidl;
@@ -1053,11 +1853,15 @@
legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.sampleRate));
}
if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::CHANNEL_MASK)) {
+ const bool isInput = VALUE_OR_RETURN(direction(aidl.role, aidl.type)) == Direction::INPUT;
legacy.channel_mask =
- VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+ VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.channelMask, isInput));
}
if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::FORMAT)) {
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+ legacy.format = VALUE_OR_RETURN(
+ aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
}
if (bitmaskIsSet(aidl.configMask, media::AudioPortConfigType::GAIN)) {
legacy.gain = VALUE_OR_RETURN(
@@ -1082,11 +1886,14 @@
aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
}
if (legacy.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- aidl.channelMask =
- VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+ const bool isInput = VALUE_OR_RETURN(
+ direction(legacy.role, legacy.type)) == Direction::INPUT;
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
}
if (legacy.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ aidl.format = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
}
if (legacy.config_mask & AUDIO_PORT_CONFIG_GAIN) {
aidl.gain = VALUE_OR_RETURN(legacy2aidl_audio_gain_config_AudioGainConfig(
@@ -1148,33 +1955,40 @@
ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
const media::AudioIoDescriptor& aidl) {
- sp<AudioIoDescriptor> legacy(new AudioIoDescriptor());
- legacy->mIoHandle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
- legacy->mPatch = VALUE_OR_RETURN(aidl2legacy_AudioPatch_audio_patch(aidl.patch));
- legacy->mSamplingRate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
- legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
- legacy->mChannelMask =
- VALUE_OR_RETURN(aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
- legacy->mFrameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
- legacy->mFrameCountHAL = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
- legacy->mLatency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
- legacy->mPortId = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
- return legacy;
+ const audio_io_handle_t io_handle = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
+ const struct audio_patch patch = VALUE_OR_RETURN(
+ aidl2legacy_AudioPatch_audio_patch(aidl.patch));
+ const bool isInput = aidl.isInput;
+ const uint32_t sampling_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
+ const audio_format_t format = VALUE_OR_RETURN(
+ aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+ const audio_channel_mask_t channel_mask = VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ const size_t frame_count = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
+ const size_t frame_count_hal = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCountHAL));
+ const uint32_t latency = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.latency));
+ const audio_port_handle_t port_id = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_port_handle_t(aidl.portId));
+ return sp<AudioIoDescriptor>::make(io_handle, patch, isInput, sampling_rate, format,
+ channel_mask, frame_count, frame_count_hal, latency, port_id);
}
ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
const sp<AudioIoDescriptor>& legacy) {
media::AudioIoDescriptor aidl;
- aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->mIoHandle));
- aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->mPatch));
- aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mSamplingRate));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy->mFormat));
- aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy->mChannelMask));
- aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCount));
- aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->mFrameCountHAL));
- aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->mLatency));
- aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->mPortId));
+ aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->getIoHandle()));
+ aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->getPatch()));
+ aidl.isInput = legacy->getIsInput();
+ aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getSamplingRate()));
+ aidl.format = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(legacy->getFormat()));
+ aidl.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ legacy->getChannelMask(), legacy->getIsInput()));
+ aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCount()));
+ aidl.frameCountHAL = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy->getFrameCountHAL()));
+ aidl.latency = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getLatency()));
+ aidl.portId = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy->getPortId()));
return aidl;
}
@@ -1473,7 +2287,7 @@
legacy.version = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.version));
legacy.size = sizeof(audio_offload_info_t);
audio_config_base_t config = VALUE_OR_RETURN(
- aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config, false /*isInput*/));
legacy.sample_rate = config.sample_rate;
legacy.channel_mask = config.channel_mask;
legacy.format = config.format;
@@ -1502,9 +2316,10 @@
}
aidl.version = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.version));
aidl.config.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
- aidl.config.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
- aidl.config.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ aidl.config.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ legacy.channel_mask, false /*isInput*/));
+ aidl.config.format = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
aidl.streamType = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream_type));
aidl.bitRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
@@ -1531,12 +2346,12 @@
}
ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl) {
+aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl, bool isInput) {
audio_config_t legacy;
legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
legacy.offload_info = VALUE_OR_RETURN(
aidl2legacy_AudioOffloadInfo_audio_offload_info_t(aidl.offloadInfo));
legacy.frame_count = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.frameCount));
@@ -1544,12 +2359,12 @@
}
ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy) {
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput) {
media::AudioConfig aidl;
aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
aidl.offloadInfo = VALUE_OR_RETURN(
legacy2aidl_audio_offload_info_t_AudioOffloadInfo(legacy.offload_info));
aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.frame_count));
@@ -1557,22 +2372,22 @@
}
ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl) {
+aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl, bool isInput) {
audio_config_base_t legacy;
legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
return legacy;
}
ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy) {
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput) {
media::AudioConfigBase aidl;
aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
return aidl;
}
@@ -1785,7 +2600,8 @@
aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const media::AudioPortDeviceExt& aidl) {
audio_port_device_ext legacy;
legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
- legacy.type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
+ legacy.type = VALUE_OR_RETURN(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.device.type));
RETURN_IF_ERROR(
aidl2legacy_string(aidl.device.address, legacy.address, sizeof(legacy.address)));
legacy.encapsulation_modes = VALUE_OR_RETURN(
@@ -1799,7 +2615,8 @@
legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(const audio_port_device_ext& legacy) {
media::AudioPortDeviceExt aidl;
aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.type));
+ aidl.device.type = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.type));
aidl.device.address = VALUE_OR_RETURN(
legacy2aidl_string(legacy.address, sizeof(legacy.address)));
aidl.encapsulationModes = VALUE_OR_RETURN(
@@ -1899,9 +2716,9 @@
}
ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl) {
+aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl, bool isInput) {
audio_profile legacy;
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(aidl.format));
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
if (aidl.samplingRates.size() > std::size(legacy.sample_rates)) {
return unexpected(BAD_VALUE);
@@ -1916,7 +2733,9 @@
}
RETURN_IF_ERROR(
convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
- aidl2legacy_int32_t_audio_channel_mask_t));
+ [isInput](const media::AudioChannelLayout& l) {
+ return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+ }));
legacy.num_channel_masks = aidl.channelMasks.size();
legacy.encapsulation_type = VALUE_OR_RETURN(
@@ -1925,9 +2744,9 @@
}
ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy) {
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput) {
media::AudioProfile aidl;
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(legacy.format));
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
return unexpected(BAD_VALUE);
@@ -1943,7 +2762,9 @@
RETURN_IF_ERROR(
convertRange(legacy.channel_masks, legacy.channel_masks + legacy.num_channel_masks,
std::back_inserter(aidl.channelMasks),
- legacy2aidl_audio_channel_mask_t_int32_t));
+ [isInput](audio_channel_mask_t m) {
+ return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+ }));
aidl.encapsulationType = VALUE_OR_RETURN(
legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
@@ -1955,8 +2776,8 @@
aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl) {
audio_gain legacy;
legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
- legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_channel_mask_t(aidl.channelMask));
+ legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.channelMask, aidl.isInput));
legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
@@ -1967,11 +2788,12 @@
}
ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy) {
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput) {
media::AudioGain aidl;
aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
+ aidl.isInput = isInput;
aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
aidl.maxValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_value));
aidl.defaultValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.default_value));
@@ -1992,8 +2814,11 @@
if (aidl.profiles.size() > std::size(legacy.audio_profiles)) {
return unexpected(BAD_VALUE);
}
+ const bool isInput = VALUE_OR_RETURN(direction(aidl.role, aidl.type)) == Direction::INPUT;
RETURN_IF_ERROR(convertRange(aidl.profiles.begin(), aidl.profiles.end(), legacy.audio_profiles,
- aidl2legacy_AudioProfile_audio_profile));
+ [isInput](const media::AudioProfile& p) {
+ return aidl2legacy_AudioProfile_audio_profile(p, isInput);
+ }));
legacy.num_audio_profiles = aidl.profiles.size();
if (aidl.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
@@ -2029,10 +2854,13 @@
if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
return unexpected(BAD_VALUE);
}
+ const bool isInput = VALUE_OR_RETURN(direction(legacy.role, legacy.type)) == Direction::INPUT;
RETURN_IF_ERROR(
convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
std::back_inserter(aidl.profiles),
- legacy2aidl_audio_profile_AudioProfile));
+ [isInput](const audio_profile& p) {
+ return legacy2aidl_audio_profile_AudioProfile(p, isInput);
+ }));
if (legacy.num_extra_audio_descriptors > std::size(legacy.extra_audio_descriptors)) {
return unexpected(BAD_VALUE);
@@ -2049,7 +2877,9 @@
RETURN_IF_ERROR(
convertRange(legacy.gains, legacy.gains + legacy.num_gains,
std::back_inserter(aidl.gains),
- legacy2aidl_audio_gain_AudioGain));
+ [isInput](const audio_gain& g) {
+ return legacy2aidl_audio_gain_AudioGain(g, isInput);
+ }));
aidl.activeConfig = VALUE_OR_RETURN(
legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 9c307ff..63da4d1 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -229,6 +229,7 @@
"libbinder",
"liblog",
"libshmemcompat",
+ "libstagefright_foundation",
"libutils",
"shared-file-region-aidl-cpp",
"framework-permission-aidl-cpp",
@@ -302,15 +303,20 @@
local_include_dir: "aidl",
srcs: [
"aidl/android/media/AudioAttributesInternal.aidl",
+ "aidl/android/media/AudioChannelLayout.aidl",
"aidl/android/media/AudioClient.aidl",
"aidl/android/media/AudioConfig.aidl",
"aidl/android/media/AudioConfigBase.aidl",
"aidl/android/media/AudioContentType.aidl",
"aidl/android/media/AudioDevice.aidl",
+ "aidl/android/media/AudioDeviceDescription.aidl",
+ "aidl/android/media/AudioDeviceType.aidl",
"aidl/android/media/AudioDualMonoMode.aidl",
"aidl/android/media/AudioEncapsulationMode.aidl",
"aidl/android/media/AudioEncapsulationMetadataType.aidl",
"aidl/android/media/AudioEncapsulationType.aidl",
+ "aidl/android/media/AudioFormatDescription.aidl",
+ "aidl/android/media/AudioFormatType.aidl",
"aidl/android/media/AudioFlag.aidl",
"aidl/android/media/AudioGain.aidl",
"aidl/android/media/AudioGainConfig.aidl",
@@ -350,10 +356,11 @@
"aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
"aidl/android/media/ExtraAudioDescriptor.aidl",
+ "aidl/android/media/PcmType.aidl",
"aidl/android/media/TrackSecondaryOutputInfo.aidl",
],
imports: [
- "audio_common-aidl",
+ "android.media.audio.common.types",
"framework-permission-aidl",
],
backend: {
@@ -364,6 +371,9 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
},
}
aidl_interface {
@@ -391,7 +401,6 @@
"aidl/android/media/SoundTriggerSession.aidl",
],
imports: [
- "audio_common-aidl",
"audioclient-types-aidl",
],
backend: {
@@ -402,6 +411,9 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
},
}
@@ -431,7 +443,6 @@
"aidl/android/media/IAudioTrackCallback.aidl",
],
imports: [
- "audio_common-aidl",
"audioclient-types-aidl",
"av-types-aidl",
"effect-aidl",
@@ -447,6 +458,9 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
},
}
@@ -465,7 +479,6 @@
"aidl/android/media/IAudioPolicyServiceClient.aidl",
],
imports: [
- "audio_common-aidl",
"audioclient-types-aidl",
"audiopolicy-types-aidl",
"capture_state_listener-aidl",
@@ -481,5 +494,8 @@
"com.android.media",
],
},
+ java: {
+ sdk_version: "module_current",
+ },
},
}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a1d3bdb..a02a373 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -668,6 +668,8 @@
// ---- Explicit Routing ---------------------------------------------------
status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
AutoMutex lock(mLock);
+ ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+ __func__, mPortId, deviceId, mSelectedDeviceId);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
if (mStatus == NO_ERROR) {
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 88e752b..2d376b9 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -336,7 +336,7 @@
if (desc == 0) {
*samplingRate = af->sampleRate(ioHandle);
} else {
- *samplingRate = desc->mSamplingRate;
+ *samplingRate = desc->getSamplingRate();
}
if (*samplingRate == 0) {
ALOGE("AudioSystem::getSamplingRate failed for ioHandle %d", ioHandle);
@@ -371,7 +371,7 @@
if (desc == 0) {
*frameCount = af->frameCount(ioHandle);
} else {
- *frameCount = desc->mFrameCount;
+ *frameCount = desc->getFrameCount();
}
if (*frameCount == 0) {
ALOGE("AudioSystem::getFrameCount failed for ioHandle %d", ioHandle);
@@ -406,7 +406,7 @@
if (outputDesc == 0) {
*latency = af->latency(output);
} else {
- *latency = outputDesc->mLatency;
+ *latency = outputDesc->getLatency();
}
ALOGV("getLatency() output %d, latency %d", output, *latency);
@@ -480,6 +480,12 @@
return af->systemReady();
}
+status_t AudioSystem::audioPolicyReady() {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return NO_INIT;
+ return af->audioPolicyReady();
+}
+
status_t AudioSystem::getFrameCountHAL(audio_io_handle_t ioHandle,
size_t* frameCount) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -488,7 +494,7 @@
if (desc == 0) {
*frameCount = af->frameCountHAL(ioHandle);
} else {
- *frameCount = desc->mFrameCountHAL;
+ *frameCount = desc->getFrameCountHAL();
}
if (*frameCount == 0) {
ALOGE("AudioSystem::getFrameCountHAL failed for ioHandle %d", ioHandle);
@@ -529,15 +535,15 @@
Status AudioSystem::AudioFlingerClient::ioConfigChanged(
media::AudioIoConfigEvent _event,
const media::AudioIoDescriptor& _ioDesc) {
- audio_io_config_event event = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioIoConfigEvent_audio_io_config_event(_event));
+ audio_io_config_event_t event = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(_event));
sp<AudioIoDescriptor> ioDesc(
VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(_ioDesc)));
ALOGV("ioConfigChanged() event %d", event);
- if (ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return Status::ok();
+ if (ioDesc->getIoHandle() == AUDIO_IO_HANDLE_NONE) return Status::ok();
audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
std::vector<sp<AudioDeviceCallback>> callbacksToCall;
@@ -550,93 +556,88 @@
case AUDIO_OUTPUT_REGISTERED:
case AUDIO_INPUT_OPENED:
case AUDIO_INPUT_REGISTERED: {
- sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+ sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
if (oldDesc == 0) {
- mIoDescriptors.add(ioDesc->mIoHandle, ioDesc);
+ mIoDescriptors.add(ioDesc->getIoHandle(), ioDesc);
} else {
deviceId = oldDesc->getDeviceId();
- mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+ mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
}
if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
deviceId = ioDesc->getDeviceId();
if (event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED) {
- auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+ auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
if (it != mAudioDeviceCallbacks.end()) {
callbacks = it->second;
}
}
}
- ALOGV("ioConfigChanged() new %s %s %d samplingRate %u, format %#x channel mask %#x "
- "frameCount %zu deviceId %d",
+ ALOGV("ioConfigChanged() new %s %s %s",
event == AUDIO_OUTPUT_OPENED || event == AUDIO_OUTPUT_REGISTERED ?
"output" : "input",
event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED ?
"opened" : "registered",
- ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
- ioDesc->mChannelMask,
- ioDesc->mFrameCount, ioDesc->getDeviceId());
+ ioDesc->toDebugString().c_str());
}
break;
case AUDIO_OUTPUT_CLOSED:
case AUDIO_INPUT_CLOSED: {
- if (getIoDescriptor_l(ioDesc->mIoHandle) == 0) {
+ if (getIoDescriptor_l(ioDesc->getIoHandle()) == 0) {
ALOGW("ioConfigChanged() closing unknown %s %d",
- event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+ event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
break;
}
ALOGV("ioConfigChanged() %s %d closed",
- event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+ event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->getIoHandle());
- mIoDescriptors.removeItem(ioDesc->mIoHandle);
- mAudioDeviceCallbacks.erase(ioDesc->mIoHandle);
+ mIoDescriptors.removeItem(ioDesc->getIoHandle());
+ mAudioDeviceCallbacks.erase(ioDesc->getIoHandle());
}
break;
case AUDIO_OUTPUT_CONFIG_CHANGED:
case AUDIO_INPUT_CONFIG_CHANGED: {
- sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+ sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
if (oldDesc == 0) {
ALOGW("ioConfigChanged() modifying unknown %s! %d",
event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
- ioDesc->mIoHandle);
+ ioDesc->getIoHandle());
break;
}
deviceId = oldDesc->getDeviceId();
- mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+ mIoDescriptors.replaceValueFor(ioDesc->getIoHandle(), ioDesc);
if (deviceId != ioDesc->getDeviceId()) {
deviceId = ioDesc->getDeviceId();
- auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+ auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
if (it != mAudioDeviceCallbacks.end()) {
callbacks = it->second;
}
}
- ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
- "channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
+ ALOGV("ioConfigChanged() new config for %s %s",
event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
- ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
- ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->mFrameCountHAL,
- ioDesc->getDeviceId());
+ ioDesc->toDebugString().c_str());
}
break;
case AUDIO_CLIENT_STARTED: {
- sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->mIoHandle);
+ sp<AudioIoDescriptor> oldDesc = getIoDescriptor_l(ioDesc->getIoHandle());
if (oldDesc == 0) {
- ALOGW("ioConfigChanged() start client on unknown io! %d", ioDesc->mIoHandle);
+ ALOGW("ioConfigChanged() start client on unknown io! %d",
+ ioDesc->getIoHandle());
break;
}
ALOGV("ioConfigChanged() AUDIO_CLIENT_STARTED io %d port %d num callbacks %zu",
- ioDesc->mIoHandle, ioDesc->mPortId, mAudioDeviceCallbacks.size());
- oldDesc->mPatch = ioDesc->mPatch;
- auto it = mAudioDeviceCallbacks.find(ioDesc->mIoHandle);
+ ioDesc->getIoHandle(), ioDesc->getPortId(), mAudioDeviceCallbacks.size());
+ oldDesc->setPatch(ioDesc->getPatch());
+ auto it = mAudioDeviceCallbacks.find(ioDesc->getIoHandle());
if (it != mAudioDeviceCallbacks.end()) {
auto cbks = it->second;
- auto it2 = cbks.find(ioDesc->mPortId);
+ auto it2 = cbks.find(ioDesc->getPortId());
if (it2 != cbks.end()) {
- callbacks.emplace(ioDesc->mPortId, it2->second);
+ callbacks.emplace(ioDesc->getPortId(), it2->second);
deviceId = oldDesc->getDeviceId();
}
}
@@ -655,8 +656,8 @@
// Callbacks must be called without mLock held. May lead to dead lock if calling for
// example getRoutedDevice that updates the device and tries to acquire mLock.
for (auto cb : callbacksToCall) {
- // If callbacksToCall is not empty, it implies ioDesc->mIoHandle and deviceId are valid
- cb->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
+ // If callbacksToCall is not empty, it implies ioDesc->getIoHandle() and deviceId are valid
+ cb->onAudioDeviceUpdate(ioDesc->getIoHandle(), deviceId);
}
return Status::ok();
@@ -846,7 +847,8 @@
}
media::AudioDevice deviceAidl;
- deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ deviceAidl.type = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
deviceAidl.address = address;
return statusTFromBinderStatus(
@@ -855,7 +857,8 @@
VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(state)),
name,
- VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+ VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
}
audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -865,7 +868,8 @@
auto result = [&]() -> ConversionResult<audio_policy_dev_state_t> {
media::AudioDevice deviceAidl;
- deviceAidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
+ deviceAidl.type = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
deviceAidl.address = device_address;
media::AudioPolicyDeviceState result;
@@ -895,12 +899,13 @@
}
media::AudioDevice deviceAidl;
- deviceAidl.type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ deviceAidl.type = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
deviceAidl.address = address;
return statusTFromBinderStatus(
aps->handleDeviceConfigChange(deviceAidl, name, VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_format_t_AudioFormat(encodedFormat))));
+ legacy2aidl_audio_format_t_AudioFormatDescription(encodedFormat))));
}
status_t AudioSystem::setPhoneState(audio_mode_t state, uid_t uid) {
@@ -998,7 +1003,7 @@
legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attr));
int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
media::AudioConfig configAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_t_AudioConfig(*config));
+ legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
int32_t flagsAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
@@ -1092,7 +1097,7 @@
int32_t riidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_unique_id_t_int32_t(riid));
int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
media::AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_base_t_AudioConfigBase(*config));
+ legacy2aidl_audio_config_base_t_AudioConfigBase(*config, true /*isInput*/));
int32_t flagsAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));
@@ -1165,7 +1170,8 @@
media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
return statusTFromBinderStatus(
aps->setStreamVolumeIndex(streamAidl, deviceAidl, indexAidl));
}
@@ -1178,7 +1184,8 @@
media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
int32_t indexAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getStreamVolumeIndex(streamAidl, deviceAidl, &indexAidl)));
@@ -1197,7 +1204,8 @@
media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
return statusTFromBinderStatus(
aps->setVolumeIndexForAttributes(attrAidl, deviceAidl, indexAidl));
}
@@ -1210,7 +1218,8 @@
media::AudioAttributesInternal attrAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
- int32_t deviceAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_devices_t_int32_t(device));
+ media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
int32_t indexAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getVolumeIndexForAttributes(attrAidl, deviceAidl, &indexAidl)));
@@ -1259,19 +1268,20 @@
return result.value_or(PRODUCT_STRATEGY_NONE);
}
-audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return AUDIO_DEVICE_NONE;
+ if (aps == 0) return DeviceTypeSet{};
- auto result = [&]() -> ConversionResult<audio_devices_t> {
+ auto result = [&]() -> ConversionResult<DeviceTypeSet> {
media::AudioStreamType streamAidl = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
- int32_t resultAidl;
+ std::vector<media::AudioDeviceDescription> resultAidl;
RETURN_IF_ERROR(statusTFromBinderStatus(
aps->getDevicesForStream(streamAidl, &resultAidl)));
- return aidl2legacy_int32_t_audio_devices_t(resultAidl);
+ return convertContainer<DeviceTypeSet>(resultAidl,
+ aidl2legacy_AudioDeviceDescription_audio_devices_t);
}();
- return result.value_or(AUDIO_DEVICE_NONE);
+ return result.value_or(DeviceTypeSet{});
}
status_t AudioSystem::getDevicesForAttributes(const AudioAttributes& aa,
@@ -1690,7 +1700,8 @@
statusTFromBinderStatus(aps->acquireSoundTriggerSession(&retAidl)));
*session = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_session_t(retAidl.session));
*ioHandle = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(retAidl.ioHandle));
- *device = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_devices_t(retAidl.device));
+ *device = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(retAidl.device));
return OK;
}
@@ -1830,7 +1841,8 @@
media::AudioStreamType streamAidl = VALUE_OR_RETURN(
legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
int32_t indexAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(index));
- int32_t deviceAidl = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(device));
+ media::AudioDeviceDescription deviceAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
float retAidl;
RETURN_IF_ERROR(statusTFromBinderStatus(
aps->getStreamVolumeDB(streamAidl, indexAidl, deviceAidl, &retAidl)));
@@ -1865,7 +1877,7 @@
media::Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+ std::vector<media::AudioFormatDescription> surroundFormatsAidl;
std::vector<bool> surroundFormatsEnabledAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl,
@@ -1875,7 +1887,7 @@
convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
RETURN_STATUS_IF_ERROR(
convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
- aidl2legacy_AudioFormat_audio_format_t));
+ aidl2legacy_AudioFormatDescription_audio_format_t));
std::copy(surroundFormatsEnabledAidl.begin(), surroundFormatsEnabledAidl.end(),
surroundFormatsEnabled);
return OK;
@@ -1892,7 +1904,7 @@
media::Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::audio::common::AudioFormat> surroundFormatsAidl;
+ std::vector<media::AudioFormatDescription> surroundFormatsAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getReportedSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl)));
@@ -1900,7 +1912,7 @@
convertIntegral<unsigned int>(numSurroundFormatsAidl.value));
RETURN_STATUS_IF_ERROR(
convertRange(surroundFormatsAidl.begin(), surroundFormatsAidl.end(), surroundFormats,
- aidl2legacy_AudioFormat_audio_format_t));
+ aidl2legacy_AudioFormatDescription_audio_format_t));
return OK;
}
@@ -1908,8 +1920,8 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::audio::common::AudioFormat audioFormatAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_format_t_AudioFormat(audioFormat));
+ media::AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_format_t_AudioFormatDescription(audioFormat));
return statusTFromBinderStatus(
aps->setSurroundFormatEnabled(audioFormatAidl, enabled));
}
@@ -1962,12 +1974,13 @@
& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- std::vector<media::audio::common::AudioFormat> formatsAidl;
+ std::vector<media::AudioFormatDescription> formatsAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getHwOffloadEncodingFormatsSupportedForA2DP(&formatsAidl)));
*formats = VALUE_OR_RETURN_STATUS(
- convertContainer<std::vector<audio_format_t>>(formatsAidl,
- aidl2legacy_AudioFormat_audio_format_t));
+ convertContainer<std::vector<audio_format_t>>(
+ formatsAidl,
+ aidl2legacy_AudioFormatDescription_audio_format_t));
return OK;
}
@@ -2418,13 +2431,13 @@
record_client_info_t clientInfoLegacy = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_RecordClientInfo_record_client_info_t(clientInfo));
audio_config_base_t clientConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(clientConfig, true /*isInput*/));
std::vector<effect_descriptor_t> clientEffectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<std::vector<effect_descriptor_t>>(
clientEffects,
aidl2legacy_EffectDescriptor_effect_descriptor_t));
audio_config_base_t deviceConfigLegacy = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(deviceConfig, true /*isInput*/));
std::vector<effect_descriptor_t> effectsLegacy = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<std::vector<effect_descriptor_t>>(
effects,
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 5f802de..5b7760b 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -171,7 +171,7 @@
auto result = [&]() -> ConversionResult<bool> {
media::AudioConfigBase configAidl = VALUE_OR_RETURN(
- legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+ legacy2aidl_audio_config_base_t_AudioConfigBase(config, false /*isInput*/));
media::AudioAttributesInternal attributesAidl = VALUE_OR_RETURN(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
bool retAidl;
@@ -1275,10 +1275,6 @@
if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
return NO_INIT;
}
- // Reject if timed track or compressed audio.
- if (!audio_is_linear_pcm(mFormat)) {
- return INVALID_OPERATION;
- }
ssize_t originalBufferSize = mProxy->getBufferSizeInFrames();
ssize_t finalBufferSize = mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
@@ -1555,6 +1551,8 @@
status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
AutoMutex lock(mLock);
+ ALOGV("%s(%d): deviceId=%d mSelectedDeviceId=%d",
+ __func__, mPortId, deviceId, mSelectedDeviceId);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
if (mStatus == NO_ERROR) {
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index cae81f0..5f12f71 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -55,7 +55,9 @@
ConversionResult<media::CreateTrackRequest> IAudioFlinger::CreateTrackInput::toAidl() const {
media::CreateTrackRequest aidl;
aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
- aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(config));
+ // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+ aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+ config, false /*isInput*/));
aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
aidl.sharedBuffer = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(sharedBuffer));
aidl.notificationsPerBuffer = VALUE_OR_RETURN(convertIntegral<int32_t>(notificationsPerBuffer));
@@ -74,7 +76,9 @@
IAudioFlinger::CreateTrackInput::fromAidl(const media::CreateTrackRequest& aidl) {
IAudioFlinger::CreateTrackInput legacy;
legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
- legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.config));
+ // Do not be mislead by 'Input'--this is an input to 'createTrack', which creates output tracks.
+ legacy.config = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfig_audio_config_t(aidl.config, false /*isInput*/));
legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
legacy.sharedBuffer = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.sharedBuffer));
legacy.notificationsPerBuffer = VALUE_OR_RETURN(
@@ -139,7 +143,8 @@
IAudioFlinger::CreateRecordInput::toAidl() const {
media::CreateRecordRequest aidl;
aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
- aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(config));
+ aidl.config = VALUE_OR_RETURN(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(config, true /*isInput*/));
aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(riid));
aidl.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -159,7 +164,8 @@
IAudioFlinger::CreateRecordInput legacy;
legacy.attr = VALUE_OR_RETURN(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
- legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
+ legacy.config = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config, true /*isInput*/));
legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
legacy.maxSharedAudioHistoryMs = VALUE_OR_RETURN(
@@ -242,9 +248,9 @@
audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
auto result = [&]() -> ConversionResult<audio_format_t> {
int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
- media::audio::common::AudioFormat aidlRet;
+ media::AudioFormatDescription aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
- return aidl2legacy_AudioFormat_audio_format_t(aidlRet);
+ return aidl2legacy_AudioFormatDescription_audio_format_t(aidlRet);
}();
return result.value_or(AUDIO_FORMAT_INVALID);
}
@@ -410,10 +416,10 @@
audio_channel_mask_t channelMask) const {
auto result = [&]() -> ConversionResult<size_t> {
int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
- media::audio::common::AudioFormat formatAidl = VALUE_OR_RETURN(
- legacy2aidl_audio_format_t_AudioFormat(format));
- int32_t channelMaskAidl = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_int32_t(channelMask));
+ media::AudioFormatDescription formatAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(format));
+ media::AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(channelMask, true /*isInput*/));
int64_t aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(
mDelegate->getInputBufferSize(sampleRateAidl, formatAidl, channelMaskAidl,
@@ -715,6 +721,10 @@
return statusTFromBinderStatus(mDelegate->systemReady());
}
+status_t AudioFlingerClientAdapter::audioPolicyReady() {
+ return statusTFromBinderStatus(mDelegate->audioPolicyReady());
+}
+
size_t AudioFlingerClientAdapter::frameCountHAL(audio_io_handle_t ioHandle) const {
auto result = [&]() -> ConversionResult<size_t> {
int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
@@ -806,11 +816,11 @@
}
Status AudioFlingerServerAdapter::format(int32_t output,
- media::audio::common::AudioFormat* _aidl_return) {
+ media::AudioFormatDescription* _aidl_return) {
audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(output));
*_aidl_return = VALUE_OR_RETURN_BINDER(
- legacy2aidl_audio_format_t_AudioFormat(mDelegate->format(outputLegacy)));
+ legacy2aidl_audio_format_t_AudioFormatDescription(mDelegate->format(outputLegacy)));
return Status::ok();
}
@@ -934,13 +944,14 @@
}
Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
- media::audio::common::AudioFormat format,
- int32_t channelMask, int64_t* _aidl_return) {
+ const media::AudioFormatDescription& format,
+ const media::AudioChannelLayout& channelMask,
+ int64_t* _aidl_return) {
uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
- aidl2legacy_AudioFormat_audio_format_t(format));
+ aidl2legacy_AudioFormatDescription_audio_format_t(format));
audio_channel_mask_t channelMaskLegacy = VALUE_OR_RETURN_BINDER(
- aidl2legacy_int32_t_audio_channel_mask_t(channelMask));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(channelMask, true /*isInput*/));
size_t size = mDelegate->getInputBufferSize(sampleRateLegacy, formatLegacy, channelMaskLegacy);
*_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(size));
return Status::ok();
@@ -1189,6 +1200,11 @@
return Status::fromStatusT(mDelegate->systemReady());
}
+Status AudioFlingerServerAdapter::audioPolicyReady() {
+ mDelegate->audioPolicyReady();
+ return Status::ok();
+}
+
Status AudioFlingerServerAdapter::frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) {
audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 25fdb49..676bb37 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -232,10 +232,14 @@
std::back_inserter(legacy.mCriteria),
aidl2legacy_AudioMixMatchCriterion));
legacy.mMixType = VALUE_OR_RETURN(aidl2legacy_AudioMixType_uint32_t(aidl.mixType));
- legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.format));
+ // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+ // an output mask is expected here.
+ legacy.mFormat = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(
+ aidl.format, false /*isInput*/));
legacy.mRouteFlags = VALUE_OR_RETURN(
aidl2legacy_AudioMixRouteFlag_uint32_t_mask(aidl.routeFlags));
- legacy.mDeviceType = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.device.type));
+ legacy.mDeviceType = VALUE_OR_RETURN(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.device.type));
legacy.mDeviceAddress = VALUE_OR_RETURN(aidl2legacy_string_view_String8(aidl.device.address));
legacy.mCbFlags = VALUE_OR_RETURN(aidl2legacy_AudioMixCallbackFlag_uint32_t_mask(aidl.cbFlags));
legacy.mAllowPrivilegedMediaPlaybackCapture = aidl.allowPrivilegedMediaPlaybackCapture;
@@ -251,10 +255,14 @@
legacy.mCriteria,
legacy2aidl_AudioMixMatchCriterion));
aidl.mixType = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixType(legacy.mMixType));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(legacy.mFormat));
+ // See 'convertAudioMixToNative' in 'android_media_AudioSystem.cpp' -- only
+ // an output mask is expected here.
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(
+ legacy.mFormat, false /*isInput*/));
aidl.routeFlags = VALUE_OR_RETURN(
legacy2aidl_uint32_t_AudioMixRouteFlag_mask(legacy.mRouteFlags));
- aidl.device.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mDeviceType));
+ aidl.device.type = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.mDeviceType));
aidl.device.address = VALUE_OR_RETURN(legacy2aidl_String8_string(legacy.mDeviceAddress));
aidl.cbFlags = VALUE_OR_RETURN(legacy2aidl_uint32_t_AudioMixCallbackFlag_mask(legacy.mCbFlags));
aidl.allowPrivilegedMediaPlaybackCapture = legacy.mAllowPrivilegedMediaPlaybackCapture;
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
new file mode 100644
index 0000000..d8c18c0
--- /dev/null
+++ b/media/libaudioclient/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "audio_aidl_conversion_tests"
+ }
+ ]
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl b/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
new file mode 100644
index 0000000..fdc8184
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * This structure describes a layout of a multi-channel stream.
+ * There are two possible ways for representing a layout:
+ *
+ * - indexed mask, which tells what channels of an audio frame are used, but
+ * doesn't label them in any way, thus a correspondence between channels in
+ * the same position of frames originating from different streams must be
+ * established externally;
+ *
+ * - layout mask, which gives a label to each channel, thus allowing to
+ * match channels between streams of different layouts.
+ *
+ * Both representations are agnostic of the direction of audio transfer. Also,
+ * by construction, the number of bits set to '1' in the mask indicates the
+ * number of channels in the audio frame. A channel mask per se only defines the
+ * presence or absence of a channel, not the order. Please see 'INTERLEAVE_*'
+ * constants for the platform convention of order.
+ *
+ * The structure also defines a "voice mask" which is a special case of
+ * layout mask, intended for processing voice audio from telecommunication
+ * use cases.
+ */
+union AudioChannelLayout {
+ /**
+ * This variant is used for representing the "null" ("none") value
+ * for the channel layout. The field value must always be '0'.
+ */
+ int none = 0;
+ /**
+ * This variant is used for indicating an "invalid" layout for use by the
+ * framework only. HAL implementations must not accept or emit
+ * AudioChannelLayout values for this variant. The field value must always
+ * be '0'.
+ */
+ int invalid = 0;
+ /**
+ * This variant is used for representing indexed masks. The value
+ * must be one of the 'INDEX_MASK_*' constants. The 'indexMask' field
+ * must have at least one bit set.
+ */
+ int indexMask;
+ /**
+ * This variant is used for representing layout masks.
+ * It is recommended to use one of 'LAYOUT_*' values. The 'layoutMask' field
+ * must have at least one bit set.
+ */
+ int layoutMask;
+ /**
+ * This variant is used for processing of voice audio input and output.
+ * It is recommended to use one of 'VOICE_*' values. The 'voiceMask' field
+ * must have at least one bit set.
+ */
+ int voiceMask;
+
+ /**
+ * 'INDEX_MASK_' constants define how many channels are used.
+ */
+ const int INDEX_MASK_1 = (1 << 1) - 1;
+ const int INDEX_MASK_2 = (1 << 2) - 1;
+ const int INDEX_MASK_3 = (1 << 3) - 1;
+ const int INDEX_MASK_4 = (1 << 4) - 1;
+ const int INDEX_MASK_5 = (1 << 5) - 1;
+ const int INDEX_MASK_6 = (1 << 6) - 1;
+ const int INDEX_MASK_7 = (1 << 7) - 1;
+ const int INDEX_MASK_8 = (1 << 8) - 1;
+ const int INDEX_MASK_9 = (1 << 9) - 1;
+ const int INDEX_MASK_10 = (1 << 10) - 1;
+ const int INDEX_MASK_11 = (1 << 11) - 1;
+ const int INDEX_MASK_12 = (1 << 12) - 1;
+ const int INDEX_MASK_13 = (1 << 13) - 1;
+ const int INDEX_MASK_14 = (1 << 14) - 1;
+ const int INDEX_MASK_15 = (1 << 15) - 1;
+ const int INDEX_MASK_16 = (1 << 16) - 1;
+ const int INDEX_MASK_17 = (1 << 17) - 1;
+ const int INDEX_MASK_18 = (1 << 18) - 1;
+ const int INDEX_MASK_19 = (1 << 19) - 1;
+ const int INDEX_MASK_20 = (1 << 20) - 1;
+ const int INDEX_MASK_21 = (1 << 21) - 1;
+ const int INDEX_MASK_22 = (1 << 22) - 1;
+ const int INDEX_MASK_23 = (1 << 23) - 1;
+ const int INDEX_MASK_24 = (1 << 24) - 1;
+
+ /**
+ * 'LAYOUT_*' constants define channel layouts recognized by
+ * the audio system. The order of the channels in the frame is assumed
+ * to be from the LSB to MSB for all the bits set to '1'.
+ */
+ const int LAYOUT_MONO = CHANNEL_FRONT_LEFT;
+ const int LAYOUT_STEREO =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT;
+ const int LAYOUT_2POINT1 =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_LOW_FREQUENCY;
+ const int LAYOUT_TRI =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER;
+ const int LAYOUT_TRI_BACK =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_BACK_CENTER;
+ const int LAYOUT_3POINT1 =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER |
+ CHANNEL_LOW_FREQUENCY;
+ const int LAYOUT_2POINT0POINT2 =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
+ const int LAYOUT_2POINT1POINT2 =
+ LAYOUT_2POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
+ const int LAYOUT_3POINT0POINT2 =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_FRONT_CENTER |
+ CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
+ const int LAYOUT_3POINT1POINT2 =
+ LAYOUT_3POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
+ const int LAYOUT_QUAD =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
+ const int LAYOUT_QUAD_SIDE =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
+ const int LAYOUT_SURROUND =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
+ const int LAYOUT_PENTA = LAYOUT_QUAD | CHANNEL_FRONT_CENTER;
+ const int LAYOUT_5POINT1 =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
+ CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
+ const int LAYOUT_5POINT1_SIDE =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
+ CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
+ const int LAYOUT_5POINT1POINT2 = LAYOUT_5POINT1 |
+ CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
+ const int LAYOUT_5POINT1POINT4 = LAYOUT_5POINT1 |
+ CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
+ CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
+ const int LAYOUT_6POINT1 =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
+ CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT | CHANNEL_BACK_CENTER;
+ const int LAYOUT_7POINT1 = LAYOUT_5POINT1 |
+ CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
+ const int LAYOUT_7POINT1POINT2 = LAYOUT_7POINT1 |
+ CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
+ const int LAYOUT_7POINT1POINT4 = LAYOUT_7POINT1 |
+ CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
+ CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
+ const int LAYOUT_9POINT1POINT4 = LAYOUT_7POINT1POINT4 |
+ CHANNEL_FRONT_WIDE_LEFT | CHANNEL_FRONT_WIDE_RIGHT;
+ const int LAYOUT_9POINT1POINT6 = LAYOUT_9POINT1POINT4 |
+ CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
+ const int LAYOUT_13POINT_360RA =
+ CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
+ CHANNEL_FRONT_CENTER |
+ CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT |
+ CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
+ CHANNEL_TOP_FRONT_CENTER |
+ CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT |
+ CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
+ CHANNEL_BOTTOM_FRONT_CENTER;
+ const int LAYOUT_22POINT2 = LAYOUT_7POINT1POINT4 |
+ CHANNEL_FRONT_LEFT_OF_CENTER | CHANNEL_FRONT_RIGHT_OF_CENTER |
+ CHANNEL_BACK_CENTER | CHANNEL_TOP_CENTER |
+ CHANNEL_TOP_FRONT_CENTER | CHANNEL_TOP_BACK_CENTER |
+ CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT |
+ CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
+ CHANNEL_BOTTOM_FRONT_CENTER |
+ CHANNEL_LOW_FREQUENCY_2;
+ const int LAYOUT_MONO_HAPTIC_A =
+ LAYOUT_MONO | CHANNEL_HAPTIC_A;
+ const int LAYOUT_STEREO_HAPTIC_A =
+ LAYOUT_STEREO | CHANNEL_HAPTIC_A;
+ const int LAYOUT_HAPTIC_AB =
+ CHANNEL_HAPTIC_A | CHANNEL_HAPTIC_B;
+ const int LAYOUT_MONO_HAPTIC_AB =
+ LAYOUT_MONO | LAYOUT_HAPTIC_AB;
+ const int LAYOUT_STEREO_HAPTIC_AB =
+ LAYOUT_STEREO | LAYOUT_HAPTIC_AB;
+ const int LAYOUT_FRONT_BACK =
+ CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
+
+ /**
+ * Expresses the convention when stereo audio samples are stored interleaved
+ * in an array. This should improve readability by allowing code to use
+ * symbolic indices instead of hard-coded [0] and [1].
+ *
+ * For multi-channel beyond stereo, the platform convention is that channels
+ * are interleaved in order from least significant channel mask bit to most
+ * significant channel mask bit, with unused bits skipped. Any exceptions
+ * to this convention will be noted at the appropriate API.
+ */
+ const int INTERLEAVE_LEFT = 0;
+ const int INTERLEAVE_RIGHT = 1;
+
+ /**
+ * 'CHANNEL_*' constants are used to build 'LAYOUT_*' masks. Each constant
+ * must have exactly one bit set. The values do not match
+ * 'android.media.AudioFormat.CHANNEL_OUT_*' constants from the SDK
+ * for better efficiency in masks processing.
+ */
+ const int CHANNEL_FRONT_LEFT = 1 << 0;
+ const int CHANNEL_FRONT_RIGHT = 1 << 1;
+ const int CHANNEL_FRONT_CENTER = 1 << 2;
+ const int CHANNEL_LOW_FREQUENCY = 1 << 3;
+ const int CHANNEL_BACK_LEFT = 1 << 4;
+ const int CHANNEL_BACK_RIGHT = 1 << 5;
+ const int CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6;
+ const int CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7;
+ const int CHANNEL_BACK_CENTER = 1 << 8;
+ const int CHANNEL_SIDE_LEFT = 1 << 9;
+ const int CHANNEL_SIDE_RIGHT = 1 << 10;
+ const int CHANNEL_TOP_CENTER = 1 << 11;
+ const int CHANNEL_TOP_FRONT_LEFT = 1 << 12;
+ const int CHANNEL_TOP_FRONT_CENTER = 1 << 13;
+ const int CHANNEL_TOP_FRONT_RIGHT = 1 << 14;
+ const int CHANNEL_TOP_BACK_LEFT = 1 << 15;
+ const int CHANNEL_TOP_BACK_CENTER = 1 << 16;
+ const int CHANNEL_TOP_BACK_RIGHT = 1 << 17;
+ const int CHANNEL_TOP_SIDE_LEFT = 1 << 18;
+ const int CHANNEL_TOP_SIDE_RIGHT = 1 << 19;
+ const int CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20;
+ const int CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21;
+ const int CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22;
+ const int CHANNEL_LOW_FREQUENCY_2 = 1 << 23;
+ const int CHANNEL_FRONT_WIDE_LEFT = 1 << 24;
+ const int CHANNEL_FRONT_WIDE_RIGHT = 1 << 25;
+ /**
+ * Haptic channels are not part of multichannel standards, however they
+ * enhance user experience when playing so they are packed together with the
+ * channels of the program. To avoid collision with positional channels the
+ * values for haptic channels start at the MSB of an integer (after the sign
+ * bit) and move down to LSB.
+ */
+ const int CHANNEL_HAPTIC_B = 1 << 29;
+ const int CHANNEL_HAPTIC_A = 1 << 30;
+
+ /**
+ * 'VOICE_*' constants define layouts for voice audio. The order of the
+ * channels in the frame is assumed to be from the LSB to MSB for all the
+ * bits set to '1'.
+ */
+ const int VOICE_UPLINK_MONO = CHANNEL_VOICE_UPLINK;
+ const int VOICE_DNLINK_MONO = CHANNEL_VOICE_DNLINK;
+ const int VOICE_CALL_MONO = CHANNEL_VOICE_UPLINK | CHANNEL_VOICE_DNLINK;
+
+ /**
+ * 'CHANNEL_VOICE_*' constants are used to build 'VOICE_*' masks. Each
+ * constant must have exactly one bit set. Use the same values as
+ * 'android.media.AudioFormat.CHANNEL_IN_VOICE_*' constants from the SDK.
+ */
+ const int CHANNEL_VOICE_UPLINK = 0x4000;
+ const int CHANNEL_VOICE_DNLINK = 0x8000;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfig.aidl b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
index 8dc97d3..6996d42 100644
--- a/media/libaudioclient/aidl/android/media/AudioConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
@@ -16,20 +16,17 @@
package android.media;
+import android.media.AudioChannelLayout;
+import android.media.AudioFormatDescription;
import android.media.AudioOffloadInfo;
-import android.media.audio.common.AudioFormat;
/**
* {@hide}
*/
parcelable AudioConfig {
int sampleRate;
- /**
- * Interpreted as audio_channel_mask_t.
- * TODO(ytai): Create a designated type.
- */
- int channelMask;
- AudioFormat format;
+ AudioChannelLayout channelMask;
+ AudioFormatDescription format;
AudioOffloadInfo offloadInfo;
long frameCount;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
index 8353c0d..e84161b 100644
--- a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
@@ -16,14 +16,14 @@
package android.media;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioChannelLayout;
+import android.media.AudioFormatDescription;
/**
* {@hide}
*/
parcelable AudioConfigBase {
int sampleRate;
- /** Interpreted as audio_channel_mask_t. */
- int channelMask;
- AudioFormat format;
+ AudioChannelLayout channelMask;
+ AudioFormatDescription format;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioDevice.aidl b/media/libaudioclient/aidl/android/media/AudioDevice.aidl
index b200697..a815874 100644
--- a/media/libaudioclient/aidl/android/media/AudioDevice.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioDevice.aidl
@@ -16,11 +16,12 @@
package android.media;
+import android.media.AudioDeviceDescription;
+
/**
* {@hide}
*/
parcelable AudioDevice {
- /** Interpreted as audio_devices_t. */
- int type;
+ AudioDeviceDescription type;
@utf8InCpp String address;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioDeviceDescription.aidl b/media/libaudioclient/aidl/android/media/AudioDeviceDescription.aidl
new file mode 100644
index 0000000..f7548b9
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioDeviceDescription.aidl
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioDeviceType;
+
+parcelable AudioDeviceDescription {
+ /**
+ * Type and directionality of the device. For bidirectional audio devices
+ * two descriptions need to be created, having the same value for
+ * the 'connection' field.
+ *
+ * See 'AudioDeviceType' for the list of supported values.
+ */
+ AudioDeviceType type = AudioDeviceType.NONE;
+ /**
+ * Specifies the type of the connection of the device to the audio system.
+ * Usually it's some kind of a communication protocol, e.g. Bluetooth SCO or
+ * USB. There is a list of connection types recognized by the framework,
+ * defined using 'CONNECTION_' constants. Vendors can add their own
+ * connection types with "vx.<vendor>." prefix.
+ *
+ * When the 'connection' field is left empty and 'type != NONE | DEFAULT',
+ * it is assumed that the device is permanently attached to the audio
+ * system, e.g. a built-in speaker or microphone.
+ *
+ * The 'connection' field must be left empty if 'type' is 'NONE' or
+ * '{IN|OUT}_DEFAULT'.
+ */
+ @utf8InCpp String connection;
+ /**
+ * Analog connection, for example, via 3.5 mm analog jack.
+ */
+ const @utf8InCpp String CONNECTION_ANALOG = "analog";
+ /**
+ * Low-End (Analog) Desk Dock.
+ */
+ const @utf8InCpp String CONNECTION_ANALOG_DOCK = "analog-dock";
+ /**
+ * Bluetooth A2DP connection.
+ */
+ const @utf8InCpp String CONNECTION_BT_A2DP = "bt-a2dp";
+ /**
+ * Bluetooth Low Energy (LE) connection.
+ */
+ const @utf8InCpp String CONNECTION_BT_LE = "bt-le";
+ /**
+ * Bluetooth SCO connection.
+ */
+ const @utf8InCpp String CONNECTION_BT_SCO = "bt-sco";
+ /**
+ * Bus connection. Mostly used in automotive scenarios.
+ */
+ const @utf8InCpp String CONNECTION_BUS = "bus";
+ /**
+ * High-End (Digital) Desk Dock.
+ */
+ const @utf8InCpp String CONNECTION_DIGITAL_DOCK = "digital-dock";
+ /**
+ * HDMI connection.
+ */
+ const @utf8InCpp String CONNECTION_HDMI = "hdmi";
+ /**
+ * HDMI ARC connection.
+ */
+ const @utf8InCpp String CONNECTION_HDMI_ARC = "hdmi-arc";
+ /**
+ * HDMI eARC connection.
+ */
+ const @utf8InCpp String CONNECTION_HDMI_EARC = "hdmi-earc";
+ /**
+ * IP v4 connection.
+ */
+ const @utf8InCpp String CONNECTION_IP_V4 = "ip-v4";
+ /**
+ * SPDIF connection.
+ */
+ const @utf8InCpp String CONNECTION_SPDIF = "spdif";
+ /**
+ * A wireless connection when the actual protocol is unspecified.
+ */
+ const @utf8InCpp String CONNECTION_WIRELESS = "wireless";
+ /**
+ * USB connection.
+ */
+ const @utf8InCpp String CONNECTION_USB = "usb";
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioDeviceType.aidl b/media/libaudioclient/aidl/android/media/AudioDeviceType.aidl
new file mode 100644
index 0000000..4da9fd6
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioDeviceType.aidl
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The type of the audio device. Only used as part of 'AudioDeviceDescription'
+ * structure.
+ *
+ * Types are divided into "input" and "output" categories. Audio devices that
+ * have both audio input and output, for example, headsets, are represented by a
+ * pair of input and output device types.
+ *
+ * The 'AudioDeviceType' intentionally binds together directionality and 'kind'
+ * of the device to avoid making them fully orthogonal. This is because not all
+ * types of devices are bidirectional, for example, speakers can only be used
+ * for output and microphones can only be used for input (at least, in the
+ * context of the audio framework).
+ */
+@Backing(type="int")
+enum AudioDeviceType {
+ /**
+ * "None" type is a "null" value. All fields of 'AudioDeviceDescription'
+ * must have default / empty / null values.
+ */
+ NONE = 0,
+ /**
+ * The "default" device is used when the client does not have any
+ * preference for a particular device.
+ */
+ IN_DEFAULT = 1,
+ /**
+ * A device implementing Android Open Accessory protocol.
+ */
+ IN_ACCESSORY = 2,
+ /**
+ * Input from a DSP front-end proxy device.
+ */
+ IN_AFE_PROXY = 3,
+ /**
+ * Used when only the connection protocol is known, e.g. a "HDMI Device."
+ */
+ IN_DEVICE = 4,
+ /**
+ * A device providing reference input for echo canceller.
+ */
+ IN_ECHO_REFERENCE = 5,
+ /**
+ * FM Tuner input.
+ */
+ IN_FM_TUNER = 6,
+ /**
+ * A microphone of a headset.
+ */
+ IN_HEADSET = 7,
+ /**
+ * Loopback input.
+ */
+ IN_LOOPBACK = 8,
+ /**
+ * The main microphone (the frontal mic on mobile devices).
+ */
+ IN_MICROPHONE = 9,
+ /**
+ * The secondary microphone (the back mic on mobile devices).
+ */
+ IN_MICROPHONE_BACK = 10,
+ /**
+ * Input from a submix of other streams.
+ */
+ IN_SUBMIX = 11,
+ /**
+ * Audio received via the telephone line.
+ */
+ IN_TELEPHONY_RX = 12,
+ /**
+ * TV Tuner audio input.
+ */
+ IN_TV_TUNER = 13,
+ /**
+ * The "default" device is used when the client does not have any
+ * preference for a particular device.
+ */
+ OUT_DEFAULT = 129,
+ /**
+ * A device implementing Android Open Accessory protocol.
+ */
+ OUT_ACCESSORY = 130,
+ /**
+ * Output from a DSP front-end proxy device.
+ */
+ OUT_AFE_PROXY = 131,
+ /**
+ * Car audio system.
+ */
+ OUT_CARKIT = 132,
+ /**
+ * Used when only the connection protocol is known, e.g. a "HDMI Device."
+ */
+ OUT_DEVICE = 133,
+ /**
+ * The echo canceller device.
+ */
+ OUT_ECHO_CANCELLER = 134,
+ /**
+ * The FM Tuner device.
+ */
+ OUT_FM = 135,
+ /**
+ * Headphones.
+ */
+ OUT_HEADPHONE = 136,
+ /**
+ * Headphones of a headset.
+ */
+ OUT_HEADSET = 137,
+ /**
+ * Hearing aid.
+ */
+ OUT_HEARING_AID = 138,
+ /**
+ * Secondary line level output.
+ */
+ OUT_LINE_AUX = 139,
+ /**
+ * The main speaker.
+ */
+ OUT_SPEAKER = 140,
+ /**
+ * The speaker of a mobile device in the case when it is close to the ear.
+ */
+ OUT_SPEAKER_EARPIECE = 141,
+ /**
+ * The main speaker with overload / overheating protection.
+ */
+ OUT_SPEAKER_SAFE = 142,
+ /**
+ * Output into a submix.
+ */
+ OUT_SUBMIX = 143,
+ /**
+ * Output into a telephone line.
+ */
+ OUT_TELEPHONY_TX = 144,
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl b/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
new file mode 100644
index 0000000..a656348
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioFormatType;
+import android.media.PcmType;
+
+/**
+ * An extensible type for specifying audio formats. All formats are largely
+ * divided into two classes: PCM and non-PCM (bitstreams). Bitstreams can
+ * be encapsulated into PCM streams.
+ *
+ * The type defined in a way to make each format uniquely identifiable, so
+ * that if the framework and the HAL construct a value for the same type
+ * (e.g. PCM 16 bit), they will produce identical parcelables which will have
+ * identical hashes. This makes possible deduplicating type descriptions
+ * by the framework when they are received from different HAL modules without
+ * relying on having some centralized registry of enumeration values.
+ *
+ * {@hide}
+ */
+parcelable AudioFormatDescription {
+ /**
+ * The type of the audio format. See the 'AudioFormatType' for the
+ * list of supported values.
+ */
+ AudioFormatType type = AudioFormatType.DEFAULT;
+ /**
+ * The type of the PCM stream or the transport stream for PCM
+ * encapsulations. See 'PcmType' for the list of supported values.
+ */
+ PcmType pcm = PcmType.DEFAULT;
+ /**
+ * Optional encoding specification. Must be left empty when:
+ *
+ * - 'type == DEFAULT && pcm == DEFAULT' -- that means "default" type;
+ * - 'type == PCM' -- that means a regular PCM stream (not an encapsulation
+ * of an encoded bitstream).
+ *
+ * For PCM encapsulations of encoded bitstreams (e.g. an encapsulation
+ * according to IEC-61937 standard), the value of the 'pcm' field must
+ * be set accordingly, as an example, PCM_INT_16_BIT must be used for
+ * IEC-61937. Note that 'type == NON_PCM' in this case.
+ *
+ * Encoding names mostly follow IANA standards for media types (MIME), and
+ * frameworks/av/media/libstagefright/foundation/MediaDefs.cpp with the
+ * latter having priority. Since there are still many audio types not found
+ * in any of these lists, the following rules are applied:
+ *
+ * - If there is a direct MIME type for the encoding, the MIME type name
+ * is used as is, e.g. "audio/eac3" for the EAC-3 format.
+ * - If the encoding is a "subformat" of a MIME-registered format,
+ * the latter is augmented with a suffix, e.g. "audio/eac3-joc" for the
+ * JOC extension of EAC-3.
+ * - If it's a proprietary format, a "vnd." prefix is added, similar to
+ * IANA rules, e.g. "audio/vnd.dolby.truehd".
+ * - Otherwise, "x-" prefix is added, e.g. "audio/x-iec61937".
+ * - All MIME types not found in the IANA formats list have an associated
+ * comment.
+ *
+ * For PCM encapsulations with a known bitstream format, the latter
+ * is added to the encapsulation encoding as a suffix, after a "+" char.
+ * For example, an IEC61937 encapsulation of AC3 has the following
+ * representation:
+ * type = NON_PCM,
+ * pcm = PcmType.INT_16_BIT,
+ * encoding = "audio/x-iec61937+audio/ac3"
+ */
+ @utf8InCpp String encoding;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatType.aidl b/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
new file mode 100644
index 0000000..31ed2be
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The type of the audio format. Only used as part of 'AudioFormatDescription'
+ * structure.
+ */
+@Backing(type="byte")
+enum AudioFormatType {
+ /**
+ * "Default" type is used when the client does not care about the actual
+ * format. All fields of 'AudioFormatDescription' must have default / empty
+ * / null values.
+ */
+ DEFAULT = 0,
+ /**
+ * When the 'encoding' field of 'AudioFormatDescription' is not empty, it
+ * specifies the codec used for bitstream (non-PCM) data. It is also used
+ * in the case when the bitstream data is encapsulated into a PCM stream,
+ * see the documentation for 'AudioFormatDescription'.
+ */
+ NON_PCM = DEFAULT,
+ /**
+ * PCM type. The 'pcm' field of 'AudioFormatDescription' is used to specify
+ * the actual sample size and representation.
+ */
+ PCM = 1,
+ /**
+ * Value reserved for system use only. HALs must never return this value to
+ * the system or accept it from the system.
+ */
+ SYS_RESERVED_INVALID = -1,
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioGain.aidl b/media/libaudioclient/aidl/android/media/AudioGain.aidl
index 048b295..4cfa96e 100644
--- a/media/libaudioclient/aidl/android/media/AudioGain.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGain.aidl
@@ -16,17 +16,18 @@
package android.media;
+import android.media.AudioChannelLayout;
+
/**
* {@hide}
*/
parcelable AudioGain {
int index;
- boolean useInChannelMask;
+ boolean isInput;
boolean useForVolume;
/** Bitmask, indexed by AudioGainMode. */
int mode;
- /** Interpreted as audio_channel_mask_t. */
- int channelMask;
+ AudioChannelLayout channelMask;
int minValue;
int maxValue;
int defaultValue;
diff --git a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
index b93c2dc..afa3aca 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
@@ -16,6 +16,8 @@
package android.media;
+import android.media.AudioChannelLayout;
+
/**
* {@hide}
*/
@@ -28,9 +30,8 @@
/**
* Channels which gain value follows. N/A in joint mode.
- * Interpreted as audio_channel_mask_t.
*/
- int channelMask;
+ AudioChannelLayout channelMask;
/**
* Gain values in millibels.
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index 876ef9b..efdf99b 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -16,8 +16,9 @@
package android.media;
+import android.media.AudioChannelLayout;
+import android.media.AudioFormatDescription;
import android.media.AudioPatch;
-import android.media.audio.common.AudioFormat;
/**
* {@hide}
@@ -26,10 +27,10 @@
/** Interpreted as audio_io_handle_t. */
int ioHandle;
AudioPatch patch;
+ boolean isInput;
int samplingRate;
- AudioFormat format;
- /** Interpreted as audio_channel_mask_t. */
- int channelMask;
+ AudioFormatDescription format;
+ AudioChannelLayout channelMask;
long frameCount;
long frameCountHAL;
/** Only valid for output. */
diff --git a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl b/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
index c86b3f0..f326305 100644
--- a/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioOffloadInfo.aidl
@@ -20,7 +20,6 @@
import android.media.AudioEncapsulationMode;
import android.media.AudioStreamType;
import android.media.AudioUsage;
-import android.media.audio.common.AudioFormat;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
index 2dd30a4..be32a69 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
@@ -16,13 +16,14 @@
package android.media;
+import android.media.AudioChannelLayout;
import android.media.AudioGainConfig;
import android.media.AudioIoFlags;
import android.media.AudioPortConfigExt;
import android.media.AudioPortConfigType;
import android.media.AudioPortRole;
import android.media.AudioPortType;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioFormatDescription;
/**
* {@hide}
@@ -43,14 +44,12 @@
int sampleRate;
/**
* Channel mask, if applicable.
- * Interpreted as audio_channel_mask_t.
- * TODO: bitmask?
*/
- int channelMask;
+ AudioChannelLayout channelMask;
/**
* Format, if applicable.
*/
- AudioFormat format;
+ AudioFormatDescription format;
/** Gain to apply, if applicable. */
AudioGainConfig gain;
/** Framework only: HW_AV_SYNC, DIRECT, ... */
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
index a99aa9b..31e5330 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigDeviceExt.aidl
@@ -16,6 +16,8 @@
package android.media;
+import android.media.AudioDeviceDescription;
+
/**
* {@hide}
*/
@@ -26,11 +28,9 @@
*/
int hwModule;
/**
- * Device type (e.g AUDIO_DEVICE_OUT_SPEAKER).
- * Interpreted as audio_devices_t.
- * TODO: Convert to a standalone AIDL representation.
+ * Device type.
*/
- int type;
+ AudioDeviceDescription type;
/** Device address. "" if N/A. */
@utf8InCpp String address;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioProfile.aidl b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
index afb288f..9fb8d49 100644
--- a/media/libaudioclient/aidl/android/media/AudioProfile.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
@@ -16,8 +16,9 @@
package android.media;
+import android.media.AudioChannelLayout;
import android.media.AudioEncapsulationType;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioFormatDescription;
/**
* {@hide}
@@ -25,9 +26,8 @@
parcelable AudioProfile {
@utf8InCpp String name;
/** The format for an audio profile should only be set when initialized. */
- AudioFormat format;
- /** Interpreted as audio_channel_mask_t. */
- int[] channelMasks;
+ AudioFormatDescription format;
+ AudioChannelLayout[] channelMasks;
int[] samplingRates;
boolean isDynamicFormat;
boolean isDynamicChannels;
diff --git a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
index f88fc3c..8538d8a 100644
--- a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
@@ -24,4 +24,5 @@
int id;
float resonantFrequency;
float qFactor;
+ float maxAmplitude;
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index d2cae6d..16f70c1 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,6 +16,7 @@
package android.media;
+import android.media.AudioChannelLayout;
import android.media.AudioMode;
import android.media.AudioPatch;
import android.media.AudioPort;
@@ -41,7 +42,7 @@
import android.media.MicrophoneInfoData;
import android.media.RenderPosition;
import android.media.TrackSecondaryOutputInfo;
-import android.media.audio.common.AudioFormat;
+import android.media.AudioFormatDescription;
/**
* {@hide}
@@ -62,7 +63,7 @@
*/
int sampleRate(int /* audio_io_handle_t */ ioHandle);
- AudioFormat format(int /* audio_io_handle_t */ output);
+ AudioFormatDescription format(int /* audio_io_handle_t */ output);
long frameCount(int /* audio_io_handle_t */ ioHandle);
@@ -115,8 +116,8 @@
// Retrieve the audio recording buffer size in bytes.
// FIXME This API assumes a route, and so should be deprecated.
long getInputBufferSize(int sampleRate,
- AudioFormat format,
- int /* audio_channel_mask_t */ channelMask);
+ in AudioFormatDescription format,
+ in AudioChannelLayout channelMask);
OpenOutputResponse openOutput(in OpenOutputRequest request);
int /* audio_io_handle_t */ openDuplicateOutput(int /* audio_io_handle_t */ output1,
@@ -197,6 +198,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
oneway void systemReady();
+ /* Indicate audio policy service is ready */
+ oneway void audioPolicyReady();
+
// Returns the number of frames per audio HAL buffer.
long frameCountHAL(int /* audio_io_handle_t */ ioHandle);
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 65bcd82..d54bd5e 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -16,15 +16,15 @@
package android.media;
+import android.media.AudioFormatDescription;
import android.content.AttributionSourceState;
-import android.media.audio.common.AudioFormat;
-
import android.media.AudioAttributesEx;
import android.media.AudioAttributesInternal;
import android.media.AudioConfig;
import android.media.AudioConfigBase;
import android.media.AudioDevice;
+import android.media.AudioDeviceDescription;
import android.media.AudioMix;
import android.media.AudioMode;
import android.media.AudioOffloadInfo;
@@ -63,13 +63,13 @@
void setDeviceConnectionState(in AudioDevice device,
in AudioPolicyDeviceState state,
@utf8InCpp String deviceName,
- in AudioFormat encodedFormat);
+ in AudioFormatDescription encodedFormat);
AudioPolicyDeviceState getDeviceConnectionState(in AudioDevice device);
void handleDeviceConfigChange(in AudioDevice device,
@utf8InCpp String deviceName,
- in AudioFormat encodedFormat);
+ in AudioFormatDescription encodedFormat);
void setPhoneState(AudioMode state, int /* uid_t */ uid);
@@ -114,18 +114,18 @@
int indexMax);
void setStreamVolumeIndex(AudioStreamType stream,
- int /* audio_devices_t */ device,
+ in AudioDeviceDescription device,
int index);
int getStreamVolumeIndex(AudioStreamType stream,
- int /* audio_devices_t */ device);
+ in AudioDeviceDescription device);
void setVolumeIndexForAttributes(in AudioAttributesInternal attr,
- int /* audio_devices_t */ device,
+ in AudioDeviceDescription device,
int index);
int getVolumeIndexForAttributes(in AudioAttributesInternal attr,
- int /* audio_devices_t */ device);
+ in AudioDeviceDescription device);
int getMaxVolumeIndexForAttributes(in AudioAttributesInternal attr);
@@ -133,7 +133,7 @@
int /* product_strategy_t */ getStrategyForStream(AudioStreamType stream);
- int /* bitmask of audio_devices_t */ getDevicesForStream(AudioStreamType stream);
+ AudioDeviceDescription[] getDevicesForStream(AudioStreamType stream);
AudioDevice[] getDevicesForAttributes(in AudioAttributesEx attr);
@@ -268,7 +268,7 @@
boolean getMasterMono();
- float getStreamVolumeDB(AudioStreamType stream, int index, int /* audio_devices_t */ device);
+ float getStreamVolumeDB(AudioStreamType stream, int index, in AudioDeviceDescription device);
/**
* Populates supported surround formats and their enabled state in formats and formatsEnabled.
@@ -279,7 +279,7 @@
* number of elements without actually retrieving them.
*/
void getSurroundFormats(inout Int count,
- out AudioFormat[] formats,
+ out AudioFormatDescription[] formats,
out boolean[] formatsEnabled);
/**
@@ -291,11 +291,11 @@
* number of elements without actually retrieving them.
*/
void getReportedSurroundFormats(inout Int count,
- out AudioFormat[] formats);
+ out AudioFormatDescription[] formats);
- AudioFormat[] getHwOffloadEncodingFormatsSupportedForA2DP();
+ AudioFormatDescription[] getHwOffloadEncodingFormatsSupportedForA2DP();
- void setSurroundFormatEnabled(AudioFormat audioFormat, boolean enabled);
+ void setSurroundFormatEnabled(in AudioFormatDescription audioFormat, boolean enabled);
void setAssistantUid(int /* uid_t */ uid);
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 06b12e9..1541948 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -17,6 +17,7 @@
package android.media;
import android.media.AudioConfig;
+import android.media.AudioConfigBase;
import android.media.AudioPort;
/**
@@ -25,7 +26,8 @@
parcelable OpenOutputRequest {
/** Interpreted as audio_module_handle_t. */
int module;
- AudioConfig config;
+ AudioConfig halConfig;
+ AudioConfigBase mixerConfig;
/** Type must be DEVICE. */
AudioPort device;
/** Bitmask, indexed by AudioOutputFlag. */
diff --git a/media/libaudioclient/aidl/android/media/PcmType.aidl b/media/libaudioclient/aidl/android/media/PcmType.aidl
new file mode 100644
index 0000000..c9e327c
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/PcmType.aidl
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The type of the encoding used for representing PCM samples. Only used as
+ * part of 'AudioFormatDescription' structure.
+ */
+@Backing(type="byte")
+enum PcmType {
+ /**
+ * "Default" value used when the type 'AudioFormatDescription' is "default".
+ */
+ DEFAULT = 0,
+ /**
+ * Unsigned 8-bit integer.
+ */
+ UINT_8_BIT = DEFAULT,
+ /**
+ * Signed 16-bit integer.
+ */
+ INT_16_BIT = 1,
+ /**
+ * Signed 32-bit integer.
+ */
+ INT_32_BIT = 2,
+ /**
+ * Q8.24 fixed point format.
+ */
+ FIXED_Q_8_24 = 3,
+ /**
+ * IEEE 754 32-bit floating point format.
+ */
+ FLOAT_32_BIT = 4,
+ /**
+ * Signed 24-bit integer.
+ */
+ INT_24_BIT = 5,
+}
diff --git a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
index a829e59..b9708b2 100644
--- a/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
+++ b/media/libaudioclient/aidl/android/media/SoundTriggerSession.aidl
@@ -16,6 +16,8 @@
package android.media;
+import android.media.AudioDeviceDescription;
+
/**
* {@hide}
*/
@@ -24,6 +26,6 @@
int session;
/** Interpreted as audio_io_handle_t. */
int ioHandle;
- /** Interpreted as audio_devices_t. */
- int device;
+ /** Device type. */
+ AudioDeviceDescription device;
}
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index d03c6fa..bdd72dd 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -597,7 +597,8 @@
media::OpenInputRequest request{};
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
@@ -648,11 +649,16 @@
sp<DeviceDescriptorBase> device = new DeviceDescriptorBase(getValue(&mFdp, kDevices));
audio_output_flags_t flags = getValue(&mFdp, kOutputFlags);
+ audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+
media::OpenOutputRequest request{};
media::OpenOutputResponse response{};
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.halConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(config, false /*isInput*/));
+ request.mixerConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(mixerConfig, false /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 4ec69c7..2cf127c 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -22,14 +22,17 @@
#include <system/audio.h>
#include <android/media/AudioAttributesInternal.h>
+#include <android/media/AudioChannelLayout.h>
#include <android/media/AudioClient.h>
#include <android/media/AudioConfig.h>
#include <android/media/AudioConfigBase.h>
+#include <android/media/AudioDeviceDescription.h>
#include <android/media/AudioDualMonoMode.h>
#include <android/media/AudioEncapsulationMode.h>
#include <android/media/AudioEncapsulationMetadataType.h>
#include <android/media/AudioEncapsulationType.h>
#include <android/media/AudioFlag.h>
+#include <android/media/AudioFormatDescription.h>
#include <android/media/AudioGain.h>
#include <android/media/AudioGainMode.h>
#include <android/media/AudioInputFlags.h>
@@ -96,9 +99,6 @@
ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
-ConversionResult<audio_channel_mask_t> aidl2legacy_int32_t_audio_channel_mask_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_channel_mask_t_int32_t(audio_channel_mask_t legacy);
-
ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy);
@@ -116,10 +116,10 @@
ConversionResult<std::optional<std::string_view>>
legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy);
-ConversionResult<audio_io_config_event> aidl2legacy_AudioIoConfigEvent_audio_io_config_event(
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
media::AudioIoConfigEvent aidl);
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
- audio_io_config_event legacy);
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+ audio_io_config_event_t legacy);
ConversionResult<audio_port_role_t> aidl2legacy_AudioPortRole_audio_port_role_t(
media::AudioPortRole aidl);
@@ -131,9 +131,19 @@
ConversionResult<media::AudioPortType> legacy2aidl_audio_port_type_t_AudioPortType(
audio_port_type_t legacy);
-ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
- media::audio::common::AudioFormat aidl);
-ConversionResult<media::audio::common::AudioFormat> legacy2aidl_audio_format_t_AudioFormat(
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ const media::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ audio_channel_mask_t legacy, bool isInput);
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+ const media::AudioDeviceDescription& aidl);
+ConversionResult<media::AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
+ audio_devices_t legacy);
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+ const media::AudioFormatDescription& aidl);
+ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
audio_format_t legacy);
ConversionResult<audio_gain_mode_t>
@@ -144,9 +154,6 @@
ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
-ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy);
-
ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
const media::AudioGainConfig& aidl, media::AudioPortRole role, media::AudioPortType type);
ConversionResult<media::AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
@@ -221,7 +228,6 @@
ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
const media::AudioIoDescriptor& aidl);
-
ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
const sp<AudioIoDescriptor>& legacy);
@@ -266,14 +272,14 @@
legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy);
ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl);
+aidl2legacy_AudioConfig_audio_config_t(const media::AudioConfig& aidl, bool isInput);
ConversionResult<media::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy);
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput);
ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl);
+aidl2legacy_AudioConfigBase_audio_config_base_t(const media::AudioConfigBase& aidl, bool isInput);
ConversionResult<media::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy);
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput);
ConversionResult<sp<IMemory>>
aidl2legacy_SharedFileRegion_IMemory(const media::SharedFileRegion& aidl);
@@ -340,14 +346,15 @@
legacy2aidl_audio_port_session_ext_AudioPortSessionExt(const audio_port_session_ext& legacy);
ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl);
+aidl2legacy_AudioProfile_audio_profile(const media::AudioProfile& aidl, bool isInput);
ConversionResult<media::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy);
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput);
ConversionResult<audio_gain>
aidl2legacy_AudioGain_audio_gain(const media::AudioGain& aidl);
+// The AIDL structure provides a flag for direction indication while the legacy type doesn't.
ConversionResult<media::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy);
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput);
ConversionResult<audio_port_v7>
aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl);
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 5dfe5fc..bd341e3 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -17,9 +17,72 @@
#pragma once
+#include <functional>
+
+#include <android/media/AudioChannelLayout.h>
+#include <android/media/AudioDeviceDescription.h>
+#include <android/media/AudioFormatDescription.h>
+#include <binder/Parcelable.h>
#include <system/audio.h>
#include <system/audio_policy.h>
-#include <binder/Parcelable.h>
+
+namespace {
+// see boost::hash_combine
+#if defined(__clang__)
+__attribute__((no_sanitize("unsigned-integer-overflow")))
+#endif
+static size_t hash_combine(size_t seed, size_t v) {
+ return std::hash<size_t>{}(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
+}
+}
+
+namespace std {
+
+// Note: when extending the types hashed below we need to account for the
+// possibility of processing types belonging to different versions of the type,
+// e.g. a HAL may be using a previous version of the AIDL interface.
+
+template<> struct hash<android::media::AudioChannelLayout>
+{
+ std::size_t operator()(const android::media::AudioChannelLayout& acl) const noexcept {
+ using Tag = android::media::AudioChannelLayout::Tag;
+ const size_t seed = std::hash<Tag>{}(acl.getTag());
+ switch (acl.getTag()) {
+ case Tag::none:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::none>()));
+ case Tag::invalid:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::invalid>()));
+ case Tag::indexMask:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::indexMask>()));
+ case Tag::layoutMask:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::layoutMask>()));
+ case Tag::voiceMask:
+ return hash_combine(seed, std::hash<int32_t>{}(acl.get<Tag::voiceMask>()));
+ }
+ return seed;
+ }
+};
+
+template<> struct hash<android::media::AudioDeviceDescription>
+{
+ std::size_t operator()(const android::media::AudioDeviceDescription& add) const noexcept {
+ return hash_combine(
+ std::hash<android::media::AudioDeviceType>{}(add.type),
+ std::hash<std::string>{}(add.connection));
+ }
+};
+
+template<> struct hash<android::media::AudioFormatDescription>
+{
+ std::size_t operator()(const android::media::AudioFormatDescription& afd) const noexcept {
+ return hash_combine(
+ std::hash<android::media::AudioFormatType>{}(afd.type),
+ hash_combine(
+ std::hash<android::media::PcmType>{}(afd.pcm),
+ std::hash<std::string>{}(afd.encoding)));
+ }
+};
+} // namespace std
namespace android {
@@ -41,8 +104,43 @@
return !(lhs==rhs);
}
+constexpr bool operator==(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+ return lhs.version == rhs.version && lhs.size == rhs.size &&
+ lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format && lhs.stream_type == rhs.stream_type &&
+ lhs.bit_rate == rhs.bit_rate && lhs.duration_us == rhs.duration_us &&
+ lhs.has_video == rhs.has_video && lhs.is_streaming == rhs.is_streaming &&
+ lhs.bit_width == rhs.bit_width && lhs.offload_buffer_size == rhs.offload_buffer_size &&
+ lhs.usage == rhs.usage && lhs.encapsulation_mode == rhs.encapsulation_mode &&
+ lhs.content_id == rhs.content_id && lhs.sync_id == rhs.sync_id;
+}
+constexpr bool operator!=(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+ return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format && lhs.offload_info == rhs.offload_info;
+}
+constexpr bool operator!=(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+ return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format;
+}
+constexpr bool operator!=(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
enum volume_group_t : uint32_t;
static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
} // namespace android
-
diff --git a/media/libaudioclient/include/media/AudioIoDescriptor.h b/media/libaudioclient/include/media/AudioIoDescriptor.h
index 981d33a..ef729ed 100644
--- a/media/libaudioclient/include/media/AudioIoDescriptor.h
+++ b/media/libaudioclient/include/media/AudioIoDescriptor.h
@@ -17,9 +17,15 @@
#ifndef ANDROID_AUDIO_IO_DESCRIPTOR_H
#define ANDROID_AUDIO_IO_DESCRIPTOR_H
+#include <sstream>
+#include <string>
+
+#include <system/audio.h>
+#include <utils/RefBase.h>
+
namespace android {
-enum audio_io_config_event {
+enum audio_io_config_event_t {
AUDIO_OUTPUT_REGISTERED,
AUDIO_OUTPUT_OPENED,
AUDIO_OUTPUT_CLOSED,
@@ -35,39 +41,68 @@
// frequent calls through IAudioFlinger
class AudioIoDescriptor : public RefBase {
public:
- AudioIoDescriptor() :
- mIoHandle(AUDIO_IO_HANDLE_NONE),
- mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(AUDIO_CHANNEL_NONE),
- mFrameCount(0), mFrameCountHAL(0), mLatency(0), mPortId(AUDIO_PORT_HANDLE_NONE)
- {
- memset(&mPatch, 0, sizeof(struct audio_patch));
- }
+ AudioIoDescriptor() = default;
+ // For AUDIO_{INPUT|OUTPUT}_CLOSED events.
+ AudioIoDescriptor(audio_io_handle_t ioHandle) : mIoHandle(ioHandle) {}
+ // For AUDIO_CLIENT_STARTED events.
+ AudioIoDescriptor(
+ audio_io_handle_t ioHandle, const audio_patch& patch, audio_port_handle_t portId) :
+ mIoHandle(ioHandle), mPatch(patch), mPortId(portId) {}
+ // For everything else.
+ AudioIoDescriptor(
+ audio_io_handle_t ioHandle, const audio_patch& patch, bool isInput,
+ uint32_t samplingRate, audio_format_t format, audio_channel_mask_t channelMask,
+ size_t frameCount, size_t frameCountHal, uint32_t latency = 0,
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) :
+ mIoHandle(ioHandle), mPatch(patch), mIsInput(isInput),
+ mSamplingRate(samplingRate), mFormat(format), mChannelMask(channelMask),
+ mFrameCount(frameCount), mFrameCountHAL(frameCountHal), mLatency(latency),
+ mPortId(portId) {}
- virtual ~AudioIoDescriptor() {}
-
- audio_port_handle_t getDeviceId() {
+ audio_io_handle_t getIoHandle() const { return mIoHandle; }
+ const audio_patch& getPatch() const { return mPatch; }
+ bool getIsInput() const { return mIsInput; }
+ uint32_t getSamplingRate() const { return mSamplingRate; }
+ audio_format_t getFormat() const { return mFormat; }
+ audio_channel_mask_t getChannelMask() const { return mChannelMask; }
+ size_t getFrameCount() const { return mFrameCount; }
+ size_t getFrameCountHAL() const { return mFrameCountHAL; }
+ uint32_t getLatency() const { return mLatency; }
+ audio_port_handle_t getPortId() const { return mPortId; }
+ audio_port_handle_t getDeviceId() const {
if (mPatch.num_sources != 0 && mPatch.num_sinks != 0) {
- if (mPatch.sources[0].type == AUDIO_PORT_TYPE_MIX) {
- // this is an output mix
- // FIXME: the API only returns the first device in case of multiple device selection
- return mPatch.sinks[0].id;
- } else {
- // this is an input mix
- return mPatch.sources[0].id;
- }
+ // FIXME: the API only returns the first device in case of multiple device selection
+ return mIsInput ? mPatch.sources[0].id : mPatch.sinks[0].id;
}
return AUDIO_PORT_HANDLE_NONE;
}
+ void setPatch(const audio_patch& patch) { mPatch = patch; }
- audio_io_handle_t mIoHandle;
- struct audio_patch mPatch;
- uint32_t mSamplingRate;
- audio_format_t mFormat;
- audio_channel_mask_t mChannelMask;
- size_t mFrameCount;
- size_t mFrameCountHAL;
- uint32_t mLatency; // only valid for output
- audio_port_handle_t mPortId; // valid for event AUDIO_CLIENT_STARTED
+ std::string toDebugString() const {
+ std::ostringstream ss;
+ ss << mIoHandle << ", samplingRate " << mSamplingRate << ", "
+ << audio_format_to_string(mFormat) << ", "
+ << (audio_channel_mask_get_representation(mChannelMask) ==
+ AUDIO_CHANNEL_REPRESENTATION_INDEX ?
+ audio_channel_index_mask_to_string(mChannelMask) :
+ (mIsInput ? audio_channel_in_mask_to_string(mChannelMask) :
+ audio_channel_out_mask_to_string(mChannelMask)))
+ << ", frameCount " << mFrameCount << ", frameCountHAL " << mFrameCountHAL
+ << ", deviceId " << getDeviceId();
+ return ss.str();
+ }
+
+ private:
+ const audio_io_handle_t mIoHandle = AUDIO_IO_HANDLE_NONE;
+ struct audio_patch mPatch = {};
+ const bool mIsInput = false;
+ const uint32_t mSamplingRate = 0;
+ const audio_format_t mFormat = AUDIO_FORMAT_DEFAULT;
+ const audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+ const size_t mFrameCount = 0;
+ const size_t mFrameCountHAL = 0;
+ const uint32_t mLatency = 0;
+ const audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
};
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 326919a..f17ee3a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -264,6 +264,7 @@
size_t frameCount() const { return mFrameCount; }
size_t frameSize() const { return mFrameSize; }
audio_source_t inputSource() const { return mAttributes.source; }
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
/*
* Return the period of the notification callback in frames.
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index a9109c8..c92b924 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -24,6 +24,7 @@
#include <android/media/BnAudioPolicyServiceClient.h>
#include <android/content/AttributionSourceState.h>
#include <media/AidlConversionUtil.h>
+#include <media/AudioContainers.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
#include <media/AudioProductStrategy.h>
@@ -225,6 +226,9 @@
// Indicate JAVA services are ready (scheduling, power management ...)
static status_t systemReady();
+ // Indicate audio policy service is ready
+ static status_t audioPolicyReady();
+
// Returns the number of frames per audio HAL buffer.
// Corresponds to audio_stream->get_buffer_size()/audio_stream_in_frame_size() for input.
// See also getFrameCount().
@@ -318,7 +322,7 @@
static status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
static product_strategy_t getStrategyForStream(audio_stream_type_t stream);
- static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+ static DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
static status_t getDevicesForAttributes(const AudioAttributes &aa,
AudioDeviceTypeAddrVector *devices);
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index cb00990..a2cd12f 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -401,6 +401,7 @@
uint32_t channelCount() const { return mChannelCount; }
size_t frameCount() const { return mFrameCount; }
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
/*
* Return the period of the notification callback in frames.
@@ -427,8 +428,7 @@
* less than or equal to the getBufferCapacityInFrames().
* It may also be adjusted slightly for internal reasons.
*
- * Return the final size or a negative error if the track is unitialized
- * or does not support variable sizes.
+ * Return the final size or a negative value (NO_INIT) if the track is uninitialized.
*/
ssize_t setBufferSizeInFrames(size_t size);
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 0e059f7..8632abb 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -329,6 +329,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
virtual status_t systemReady() = 0;
+ // Indicate audio policy service is ready
+ virtual status_t audioPolicyReady() = 0;
+
// Returns the number of frames per audio HAL buffer.
virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
@@ -432,6 +435,8 @@
status_t setAudioPortConfig(const struct audio_port_config* config) override;
audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) override;
status_t systemReady() override;
+ status_t audioPolicyReady() override;
+
size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
@@ -514,6 +519,7 @@
SET_AUDIO_PORT_CONFIG = media::BnAudioFlingerService::TRANSACTION_setAudioPortConfig,
GET_AUDIO_HW_SYNC_FOR_SESSION = media::BnAudioFlingerService::TRANSACTION_getAudioHwSyncForSession,
SYSTEM_READY = media::BnAudioFlingerService::TRANSACTION_systemReady,
+ AUDIO_POLICY_READY = media::BnAudioFlingerService::TRANSACTION_audioPolicyReady,
FRAME_COUNT_HAL = media::BnAudioFlingerService::TRANSACTION_frameCountHAL,
GET_MICROPHONES = media::BnAudioFlingerService::TRANSACTION_getMicrophones,
SET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_setMasterBalance,
@@ -563,7 +569,7 @@
Status createRecord(const media::CreateRecordRequest& request,
media::CreateRecordResponse* _aidl_return) override;
Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
- Status format(int32_t output, media::audio::common::AudioFormat* _aidl_return) override;
+ Status format(int32_t output, media::AudioFormatDescription* _aidl_return) override;
Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
Status latency(int32_t output, int32_t* _aidl_return) override;
Status setMasterVolume(float value) override;
@@ -585,8 +591,9 @@
Status
getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
- Status getInputBufferSize(int32_t sampleRate, media::audio::common::AudioFormat format,
- int32_t channelMask, int64_t* _aidl_return) override;
+ Status getInputBufferSize(int32_t sampleRate, const media::AudioFormatDescription& format,
+ const media::AudioChannelLayout& channelMask,
+ int64_t* _aidl_return) override;
Status openOutput(const media::OpenOutputRequest& request,
media::OpenOutputResponse* _aidl_return) override;
Status openDuplicateOutput(int32_t output1, int32_t output2, int32_t* _aidl_return) override;
@@ -624,6 +631,7 @@
Status setAudioPortConfig(const media::AudioPortConfig& config) override;
Status getAudioHwSyncForSession(int32_t sessionId, int32_t* _aidl_return) override;
Status systemReady() override;
+ Status audioPolicyReady() override;
Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
Status setAudioHalPids(const std::vector<int32_t>& pids) override;
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index def7ca6..2279244 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -9,10 +9,34 @@
cc_defaults {
name: "libaudioclient_tests_defaults",
+ test_suites: ["device-tests"],
cflags: [
"-Wall",
"-Werror",
],
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+}
+
+cc_test {
+ name: "audio_aidl_conversion_tests",
+ defaults: ["libaudioclient_tests_defaults"],
+ srcs: ["audio_aidl_legacy_conversion_tests.cpp"],
+ shared_libs: [
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+ static_libs: [
+ "audioclient-types-aidl-cpp",
+ "libaudioclient_aidl_conversion",
+ "libstagefright_foundation",
+ ],
}
cc_test {
@@ -30,8 +54,10 @@
cc_test {
name: "test_create_audiotrack",
defaults: ["libaudioclient_tests_defaults"],
- srcs: ["test_create_audiotrack.cpp",
- "test_create_utils.cpp"],
+ srcs: [
+ "test_create_audiotrack.cpp",
+ "test_create_utils.cpp",
+ ],
header_libs: [
"libmedia_headers",
"libmediametrics_headers",
@@ -49,8 +75,10 @@
cc_test {
name: "test_create_audiorecord",
defaults: ["libaudioclient_tests_defaults"],
- srcs: ["test_create_audiorecord.cpp",
- "test_create_utils.cpp"],
+ srcs: [
+ "test_create_audiorecord.cpp",
+ "test_create_utils.cpp",
+ ],
header_libs: [
"libmedia_headers",
"libmediametrics_headers",
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
new file mode 100644
index 0000000..7f8af53
--- /dev/null
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include <media/AudioCommonTypes.h>
+#include <media/AidlConversion.h>
+
+using namespace android;
+using namespace android::aidl_utils;
+
+namespace {
+
+template<typename T> size_t hash(const T& t) {
+ return std::hash<T>{}(t);
+}
+
+media::AudioChannelLayout make_ACL_None() {
+ return media::AudioChannelLayout{};
+}
+
+media::AudioChannelLayout make_ACL_Invalid() {
+ return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::invalid>(0);
+}
+
+media::AudioChannelLayout make_ACL_Stereo() {
+ return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>(
+ media::AudioChannelLayout::LAYOUT_STEREO);
+}
+
+media::AudioChannelLayout make_ACL_ChannelIndex2() {
+ return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>(
+ media::AudioChannelLayout::INDEX_MASK_2);
+}
+
+media::AudioChannelLayout make_ACL_VoiceCall() {
+ return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>(
+ media::AudioChannelLayout::VOICE_CALL_MONO);
+}
+
+media::AudioDeviceDescription make_AudioDeviceDescription(media::AudioDeviceType type,
+ const std::string& connection = "") {
+ media::AudioDeviceDescription result;
+ result.type = type;
+ result.connection = connection;
+ return result;
+}
+
+media::AudioDeviceDescription make_ADD_None() {
+ return media::AudioDeviceDescription{};
+}
+
+media::AudioDeviceDescription make_ADD_DefaultIn() {
+ return make_AudioDeviceDescription(media::AudioDeviceType::IN_DEFAULT);
+}
+
+media::AudioDeviceDescription make_ADD_DefaultOut() {
+ return make_AudioDeviceDescription(media::AudioDeviceType::OUT_DEFAULT);
+}
+
+media::AudioDeviceDescription make_ADD_WiredHeadset() {
+ return make_AudioDeviceDescription(media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_ANALOG());
+}
+
+media::AudioDeviceDescription make_ADD_BtScoHeadset() {
+ return make_AudioDeviceDescription(media::AudioDeviceType::OUT_HEADSET,
+ media::AudioDeviceDescription::CONNECTION_BT_SCO());
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
+ media::AudioFormatDescription result;
+ result.type = type;
+ return result;
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
+ auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+ result.pcm = pcm;
+ return result;
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ media::AudioFormatDescription result;
+ result.encoding = encoding;
+ return result;
+}
+
+media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+ const std::string& encoding) {
+ auto result = make_AudioFormatDescription(encoding);
+ result.pcm = transport;
+ return result;
+}
+
+media::AudioFormatDescription make_AFD_Default() {
+ return media::AudioFormatDescription{};
+}
+
+media::AudioFormatDescription make_AFD_Invalid() {
+ return make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID);
+}
+
+media::AudioFormatDescription make_AFD_Pcm16Bit() {
+ return make_AudioFormatDescription(media::PcmType::INT_16_BIT);
+}
+
+media::AudioFormatDescription make_AFD_Bitstream() {
+ return make_AudioFormatDescription("example");
+}
+
+media::AudioFormatDescription make_AFD_Encap() {
+ return make_AudioFormatDescription(media::PcmType::INT_16_BIT, "example.encap");
+}
+
+media::AudioFormatDescription make_AFD_Encap_with_Enc() {
+ auto afd = make_AFD_Encap();
+ afd.encoding += "+example";
+ return afd;
+}
+
+} // namespace
+
+// Verify that two independently constructed ADDs/AFDs have the same hash.
+// This ensures that regardless of whether the ADD/AFD instance originates
+// from, it can be correctly compared to other ADD/AFD instance. Thus,
+// for example, a 16-bit integer format description provided by HAL
+// is identical to the same format description constructed by the framework.
+class HashIdentityTest : public ::testing::Test {
+ public:
+ template<typename T> void verifyHashIdentity(const std::vector<std::function<T()>>& valueGens) {
+ for (size_t i = 0; i < valueGens.size(); ++i) {
+ for (size_t j = 0; j < valueGens.size(); ++j) {
+ if (i == j) {
+ EXPECT_EQ(hash(valueGens[i]()), hash(valueGens[i]())) << i;
+ } else {
+ EXPECT_NE(hash(valueGens[i]()), hash(valueGens[j]())) << i << ", " << j;
+ }
+ }
+ }
+ }
+};
+
+TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
+ verifyHashIdentity<media::AudioChannelLayout>({
+ make_ACL_None, make_ACL_Invalid, make_ACL_Stereo, make_ACL_ChannelIndex2,
+ make_ACL_VoiceCall});
+}
+
+TEST_F(HashIdentityTest, AudioDeviceDescriptionHashIdentity) {
+ verifyHashIdentity<media::AudioDeviceDescription>({
+ make_ADD_None, make_ADD_DefaultIn, make_ADD_DefaultOut, make_ADD_WiredHeadset,
+ make_ADD_BtScoHeadset});
+}
+
+TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
+ verifyHashIdentity<media::AudioFormatDescription>({
+ make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
+ make_AFD_Encap, make_AFD_Encap_with_Enc});
+}
+
+using ChannelLayoutParam = std::tuple<media::AudioChannelLayout, bool /*isInput*/>;
+class AudioChannelLayoutRoundTripTest :
+ public testing::TestWithParam<ChannelLayoutParam> {};
+TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
+ const auto initial = std::get<0>(GetParam());
+ const bool isInput = std::get<1>(GetParam());
+ auto conv = aidl2legacy_AudioChannelLayout_audio_channel_mask_t(initial, isInput);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(conv.value(), isInput);
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
+ AudioChannelLayoutRoundTripTest,
+ testing::Combine(
+ testing::Values(media::AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
+ make_ACL_ChannelIndex2()),
+ testing::Values(false, true)));
+INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
+ AudioChannelLayoutRoundTripTest,
+ // In legacy constants the voice call is only defined for input.
+ testing::Combine(testing::Values(make_ACL_VoiceCall()), testing::Values(true)));
+
+class AudioDeviceDescriptionRoundTripTest :
+ public testing::TestWithParam<media::AudioDeviceDescription> {};
+TEST_P(AudioDeviceDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+ const auto initial = GetParam();
+ auto conv = aidl2legacy_AudioDeviceDescription_audio_devices_t(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_devices_t_AudioDeviceDescription(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioDeviceDescriptionRoundTrip,
+ AudioDeviceDescriptionRoundTripTest,
+ testing::Values(media::AudioDeviceDescription{}, make_ADD_DefaultIn(),
+ make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
+
+class AudioFormatDescriptionRoundTripTest :
+ public testing::TestWithParam<media::AudioFormatDescription> {};
+TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
+ const auto initial = GetParam();
+ auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_format_t_AudioFormatDescription(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
+ AudioFormatDescriptionRoundTripTest,
+ testing::Values(make_AFD_Invalid(), media::AudioFormatDescription{}, make_AFD_Pcm16Bit()));
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index c5d7da8..7f54474 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -158,14 +158,15 @@
ConversionResult<AudioDeviceTypeAddr>
aidl2legacy_AudioDeviceTypeAddress(const media::AudioDevice& aidl) {
- audio_devices_t type = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_devices_t(aidl.type));
+ audio_devices_t type = VALUE_OR_RETURN(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
return AudioDeviceTypeAddr(type, aidl.address);
}
ConversionResult<media::AudioDevice>
legacy2aidl_AudioDeviceTypeAddress(const AudioDeviceTypeAddr& legacy) {
media::AudioDevice aidl;
- aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_int32_t(legacy.mType));
+ aidl.type = VALUE_OR_RETURN(legacy2aidl_audio_devices_t_AudioDeviceDescription(legacy.mType));
aidl.address = legacy.getAddress();
return aidl;
}
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 1dee938..ea0258a 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -34,10 +34,10 @@
namespace android {
-AudioGain::AudioGain(int index, bool useInChannelMask)
+AudioGain::AudioGain(int index, bool isInput)
{
mIndex = index;
- mUseInChannelMask = useInChannelMask;
+ mIsInput = isInput;
memset(&mGain, 0, sizeof(struct audio_gain));
}
@@ -49,12 +49,9 @@
if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
config->values[0] = mGain.default_value;
} else {
- uint32_t numValues;
- if (mUseInChannelMask) {
- numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
- } else {
- numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
- }
+ const uint32_t numValues = mIsInput ?
+ audio_channel_count_from_in_mask(mGain.channel_mask) :
+ audio_channel_count_from_out_mask(mGain.channel_mask);
for (size_t i = 0; i < numValues; i++) {
config->values[i] = mGain.default_value;
}
@@ -78,12 +75,9 @@
if ((config->channel_mask & ~mGain.channel_mask) != 0) {
return BAD_VALUE;
}
- uint32_t numValues;
- if (mUseInChannelMask) {
- numValues = audio_channel_count_from_in_mask(config->channel_mask);
- } else {
- numValues = audio_channel_count_from_out_mask(config->channel_mask);
- }
+ const uint32_t numValues = mIsInput ?
+ audio_channel_count_from_in_mask(config->channel_mask) :
+ audio_channel_count_from_out_mask(config->channel_mask);
for (size_t i = 0; i < numValues; i++) {
if ((config->values[i] < mGain.min_value) ||
(config->values[i] > mGain.max_value)) {
@@ -116,7 +110,7 @@
bool AudioGain::equals(const sp<AudioGain>& other) const
{
return other != nullptr &&
- mUseInChannelMask == other->mUseInChannelMask &&
+ mIsInput == other->mIsInput &&
mUseForVolume == other->mUseForVolume &&
// Compare audio gain
mGain.mode == other->mGain.mode &&
@@ -137,12 +131,13 @@
status_t AudioGain::writeToParcelable(media::AudioGain* parcelable) const {
parcelable->index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mIndex));
- parcelable->useInChannelMask = mUseInChannelMask;
+ parcelable->isInput = mIsInput;
parcelable->useForVolume = mUseForVolume;
parcelable->mode = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
parcelable->channelMask = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ mGain.channel_mask, mIsInput));
parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
parcelable->maxValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.max_value));
parcelable->defaultValue = VALUE_OR_RETURN_STATUS(
@@ -161,12 +156,13 @@
status_t AudioGain::readFromParcelable(const media::AudioGain& parcelable) {
mIndex = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.index));
- mUseInChannelMask = parcelable.useInChannelMask;
+ mIsInput = parcelable.isInput;
mUseForVolume = parcelable.useForVolume;
mGain.mode = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.mode));
mGain.channel_mask = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ parcelable.channelMask, parcelable.isInput));
mGain.min_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.minValue));
mGain.max_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.maxValue));
mGain.default_value = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.defaultValue));
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index fafabd9..c70a6c2 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -210,7 +210,8 @@
parcelable->name = mName;
parcelable->type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_type_t_AudioPortType(mType));
parcelable->role = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_role_t_AudioPortRole(mRole));
- parcelable->profiles = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioProfileVector(mProfiles));
+ parcelable->profiles = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_AudioProfileVector(mProfiles, useInputChannelMask()));
parcelable->extraAudioDescriptors = mExtraAudioDescriptors;
parcelable->gains = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioGains(mGains));
return OK;
@@ -226,7 +227,8 @@
mName = parcelable.name;
mType = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortType_audio_port_type_t(parcelable.type));
mRole = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortRole_audio_port_role_t(parcelable.role));
- mProfiles = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioProfileVector(parcelable.profiles));
+ mProfiles = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioProfileVector(parcelable.profiles, useInputChannelMask()));
mExtraAudioDescriptors = parcelable.extraAudioDescriptors;
mGains = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioGains(parcelable.gains));
return OK;
@@ -330,23 +332,19 @@
mGain.ramp_duration_ms == other->mGain.ramp_duration_ms;
}
-status_t AudioPortConfig::writeToParcel(Parcel *parcel) const {
- media::AudioPortConfig parcelable;
- return writeToParcelable(&parcelable)
- ?: parcelable.writeToParcel(parcel);
-}
-
-status_t AudioPortConfig::writeToParcelable(media::AudioPortConfig* parcelable) const {
+status_t AudioPortConfig::writeToParcelable(
+ media::AudioPortConfig* parcelable, bool isInput) const {
parcelable->sampleRate = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mSamplingRate));
- parcelable->format = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+ parcelable->format = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
parcelable->channelMask = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_channel_mask_t_int32_t(mChannelMask));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(mChannelMask, isInput));
parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(mGain.channel_mask, isInput));
parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
convertIntegral<int32_t>(mGain.ramp_duration_ms));
parcelable->gain.values = VALUE_OR_RETURN_STATUS(convertContainer<std::vector<int32_t>>(
@@ -354,23 +352,20 @@
return OK;
}
-status_t AudioPortConfig::readFromParcel(const Parcel *parcel) {
- media::AudioPortConfig parcelable;
- return parcelable.readFromParcel(parcel)
- ?: readFromParcelable(parcelable);
-}
-
-status_t AudioPortConfig::readFromParcelable(const media::AudioPortConfig& parcelable) {
+status_t AudioPortConfig::readFromParcelable(
+ const media::AudioPortConfig& parcelable, bool isInput) {
mSamplingRate = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(parcelable.sampleRate));
- mFormat = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
+ mFormat = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
mChannelMask = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_channel_mask_t(parcelable.channelMask));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(parcelable.channelMask, isInput));
mId = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(parcelable.id));
mGain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int>(parcelable.gain.index));
mGain.mode = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_gain_mode_t_mask(parcelable.gain.mode));
mGain.channel_mask = VALUE_OR_RETURN_STATUS(
- aidl2legacy_int32_t_audio_channel_mask_t(parcelable.gain.channelMask));
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ parcelable.gain.channelMask, isInput));
mGain.ramp_duration_ms = VALUE_OR_RETURN_STATUS(
convertIntegral<unsigned int>(parcelable.gain.rampDurationMs));
if (parcelable.gain.values.size() > std::size(mGain.values)) {
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 8ac3f73..47b2d54 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -154,19 +154,17 @@
return *this;
}
-status_t AudioProfile::writeToParcel(Parcel *parcel) const {
- media::AudioProfile parcelable = VALUE_OR_RETURN_STATUS(toParcelable());
- return parcelable.writeToParcel(parcel);
- }
-
ConversionResult<media::AudioProfile>
-AudioProfile::toParcelable() const {
+AudioProfile::toParcelable(bool isInput) const {
media::AudioProfile parcelable;
parcelable.name = mName;
- parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormat(mFormat));
+ parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
parcelable.channelMasks = VALUE_OR_RETURN(
- convertContainer<std::vector<int32_t>>(mChannelMasks,
- legacy2aidl_audio_channel_mask_t_int32_t));
+ convertContainer<std::vector<media::AudioChannelLayout>>(
+ mChannelMasks,
+ [isInput](audio_channel_mask_t m) {
+ return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+ }));
parcelable.samplingRates = VALUE_OR_RETURN(
convertContainer<std::vector<int32_t>>(mSamplingRates,
convertIntegral<int32_t, uint32_t>));
@@ -178,23 +176,17 @@
return parcelable;
}
-status_t AudioProfile::readFromParcel(const Parcel *parcel) {
- media::AudioProfile parcelable;
- if (status_t status = parcelable.readFromParcel(parcel); status != OK) {
- return status;
- }
- *this = *VALUE_OR_RETURN_STATUS(fromParcelable(parcelable));
- return OK;
-}
-
ConversionResult<sp<AudioProfile>>
-AudioProfile::fromParcelable(const media::AudioProfile& parcelable) {
+AudioProfile::fromParcelable(const media::AudioProfile& parcelable, bool isInput) {
sp<AudioProfile> legacy = new AudioProfile();
legacy->mName = parcelable.name;
- legacy->mFormat = VALUE_OR_RETURN(aidl2legacy_AudioFormat_audio_format_t(parcelable.format));
+ legacy->mFormat = VALUE_OR_RETURN(
+ aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
legacy->mChannelMasks = VALUE_OR_RETURN(
convertContainer<ChannelMaskSet>(parcelable.channelMasks,
- aidl2legacy_int32_t_audio_channel_mask_t));
+ [isInput](const media::AudioChannelLayout& l) {
+ return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+ }));
legacy->mSamplingRates = VALUE_OR_RETURN(
convertContainer<SampleRateSet>(parcelable.samplingRates,
convertIntegral<uint32_t, int32_t>));
@@ -208,13 +200,13 @@
}
ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl) {
- return AudioProfile::fromParcelable(aidl);
+aidl2legacy_AudioProfile(const media::AudioProfile& aidl, bool isInput) {
+ return AudioProfile::fromParcelable(aidl, isInput);
}
ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy) {
- return legacy->toParcelable();
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput) {
+ return legacy->toParcelable(isInput);
}
ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
@@ -328,33 +320,6 @@
}
}
-status_t AudioProfileVector::writeToParcel(Parcel *parcel) const
-{
- status_t status = NO_ERROR;
- if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
- for (const auto &audioProfile : *this) {
- if ((status = parcel->writeParcelable(*audioProfile)) != NO_ERROR) {
- break;
- }
- }
- return status;
-}
-
-status_t AudioProfileVector::readFromParcel(const Parcel *parcel)
-{
- status_t status = NO_ERROR;
- this->clear();
- if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
- for (size_t i = 0; i < this->size(); ++i) {
- this->at(i) = new AudioProfile(AUDIO_FORMAT_DEFAULT, AUDIO_CHANNEL_NONE, 0 /*sampleRate*/);
- if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
- this->clear();
- break;
- }
- }
- return status;
-}
-
bool AudioProfileVector::equals(const AudioProfileVector& other) const
{
return std::equal(begin(), end(), other.begin(), other.end(),
@@ -364,13 +329,19 @@
}
ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl) {
- return convertContainer<AudioProfileVector>(aidl, aidl2legacy_AudioProfile);
+aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl, bool isInput) {
+ return convertContainer<AudioProfileVector>(aidl,
+ [isInput](const media::AudioProfile& p) {
+ return aidl2legacy_AudioProfile(p, isInput);
+ });
}
ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy) {
- return convertContainer<std::vector<media::AudioProfile>>(legacy, legacy2aidl_AudioProfile);
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput) {
+ return convertContainer<std::vector<media::AudioProfile>>(legacy,
+ [isInput](const sp<AudioProfile>& p) {
+ return legacy2aidl_AudioProfile(p, isInput);
+ });
}
AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 5cfea81..3cce722 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -166,7 +166,7 @@
status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
AudioPort::writeToParcelable(parcelable);
- AudioPortConfig::writeToParcelable(&parcelable->activeConfig);
+ AudioPortConfig::writeToParcelable(&parcelable->activeConfig, useInputChannelMask());
parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
media::AudioPortDeviceExt ext;
@@ -190,7 +190,7 @@
return BAD_VALUE;
}
status_t status = AudioPort::readFromParcelable(parcelable)
- ?: AudioPortConfig::readFromParcelable(parcelable.activeConfig);
+ ?: AudioPortConfig::readFromParcelable(parcelable.activeConfig, useInputChannelMask());
if (status != OK) {
return status;
}
diff --git a/media/libaudiofoundation/include/media/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
index a06b686..28769d2 100644
--- a/media/libaudiofoundation/include/media/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -31,7 +31,7 @@
class AudioGain: public RefBase, public Parcelable
{
public:
- AudioGain(int index, bool useInChannelMask);
+ AudioGain(int index, bool isInput);
virtual ~AudioGain() {}
void setMode(audio_gain_mode_t mode) { mGain.mode = mode; }
@@ -80,7 +80,7 @@
private:
int mIndex;
struct audio_gain mGain;
- bool mUseInChannelMask;
+ bool mIsInput;
bool mUseForVolume = false;
};
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
index 1cee1c9..6e1d032 100644
--- a/media/libaudiofoundation/include/media/AudioPort.h
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -130,7 +130,7 @@
};
-class AudioPortConfig : public virtual RefBase, public virtual Parcelable
+class AudioPortConfig : public virtual RefBase
{
public:
virtual ~AudioPortConfig() = default;
@@ -152,10 +152,8 @@
bool equals(const sp<AudioPortConfig>& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
- status_t writeToParcelable(media::AudioPortConfig* parcelable) const;
- status_t readFromParcelable(const media::AudioPortConfig& parcelable);
+ status_t writeToParcelable(media::AudioPortConfig* parcelable, bool isInput) const;
+ status_t readFromParcelable(const media::AudioPortConfig& parcelable, bool isInput);
protected:
unsigned int mSamplingRate = 0u;
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
index 6a36e78..e34a49f 100644
--- a/media/libaudiofoundation/include/media/AudioProfile.h
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -29,7 +29,7 @@
namespace android {
-class AudioProfile final : public RefBase, public Parcelable
+class AudioProfile final : public RefBase
{
public:
static sp<AudioProfile> createFullDynamic(audio_format_t dynamicFormat = AUDIO_FORMAT_DEFAULT);
@@ -81,11 +81,9 @@
bool equals(const sp<AudioProfile>& other) const;
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
- ConversionResult<media::AudioProfile> toParcelable() const;
- static ConversionResult<sp<AudioProfile>> fromParcelable(const media::AudioProfile& parcelable);
+ ConversionResult<media::AudioProfile> toParcelable(bool isInput) const;
+ static ConversionResult<sp<AudioProfile>> fromParcelable(
+ const media::AudioProfile& parcelable, bool isInput);
private:
@@ -106,11 +104,11 @@
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<sp<AudioProfile>>
-aidl2legacy_AudioProfile(const media::AudioProfile& aidl);
+aidl2legacy_AudioProfile(const media::AudioProfile& aidl, bool isInput);
ConversionResult<media::AudioProfile>
-legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy);
+legacy2aidl_AudioProfile(const sp<AudioProfile>& legacy, bool isInput);
-class AudioProfileVector : public std::vector<sp<AudioProfile>>, public Parcelable
+class AudioProfileVector : public std::vector<sp<AudioProfile>>
{
public:
virtual ~AudioProfileVector() = default;
@@ -136,18 +134,15 @@
virtual void dump(std::string *dst, int spaces) const;
bool equals(const AudioProfileVector& other) const;
-
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
};
bool operator == (const AudioProfile &left, const AudioProfile &right);
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<AudioProfileVector>
-aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl);
+aidl2legacy_AudioProfileVector(const std::vector<media::AudioProfile>& aidl, bool isInput);
ConversionResult<std::vector<media::AudioProfile>>
-legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy);
+legacy2aidl_AudioProfileVector(const AudioProfileVector& legacy, bool isInput);
AudioProfileVector intersectAudioProfiles(const AudioProfileVector& profiles1,
const AudioProfileVector& profiles2);
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index bb9a5f2..f3cd446 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -11,12 +11,19 @@
name: "audiofoundation_parcelable_test",
shared_libs: [
- "libaudiofoundation",
+ "libbase",
"libbinder",
"liblog",
"libutils",
],
+ static_libs: [
+ "audioclient-types-aidl-cpp",
+ "libaudioclient_aidl_conversion",
+ "libaudiofoundation",
+ "libstagefright_foundation",
+ ],
+
header_libs: [
"libaudio_system_headers",
],
diff --git a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
index 068b5d8..1696980 100644
--- a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
+++ b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
@@ -53,7 +53,7 @@
AudioGains getAudioGainsForTest() {
AudioGains audioGains;
- sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*useInChannelMask*/);
+ sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*isInput*/);
audioGain->setMode(AUDIO_GAIN_MODE_JOINT);
audioGain->setChannelMask(AUDIO_CHANNEL_OUT_STEREO);
audioGain->setMinValueInMb(-3200);
@@ -86,17 +86,6 @@
ASSERT_TRUE(audioGainsFromParcel.equals(audioGains));
}
-TEST(AudioFoundationParcelableTest, ParcelingAudioProfileVector) {
- Parcel data;
- AudioProfileVector audioProfiles = getAudioProfileVectorForTest();
-
- ASSERT_EQ(data.writeParcelable(audioProfiles), NO_ERROR);
- data.setDataPosition(0);
- AudioProfileVector audioProfilesFromParcel;
- ASSERT_EQ(data.readParcelable(&audioProfilesFromParcel), NO_ERROR);
- ASSERT_TRUE(audioProfilesFromParcel.equals(audioProfiles));
-}
-
TEST(AudioFoundationParcelableTest, ParcelingAudioPort) {
Parcel data;
sp<AudioPort> audioPort = new AudioPort(
@@ -116,11 +105,15 @@
Parcel data;
sp<AudioPortConfig> audioPortConfig = new AudioPortConfigTestStub();
audioPortConfig->applyAudioPortConfig(&TEST_AUDIO_PORT_CONFIG);
-
- ASSERT_EQ(data.writeParcelable(*audioPortConfig), NO_ERROR);
+ media::AudioPortConfig parcelable{};
+ ASSERT_EQ(NO_ERROR, audioPortConfig->writeToParcelable(&parcelable, false /*isInput*/));
+ ASSERT_EQ(NO_ERROR, data.writeParcelable(parcelable));
data.setDataPosition(0);
+ media::AudioPortConfig parcelableFromParcel{};
+ ASSERT_EQ(NO_ERROR, data.readParcelable(&parcelableFromParcel));
sp<AudioPortConfig> audioPortConfigFromParcel = new AudioPortConfigTestStub();
- ASSERT_EQ(data.readParcelable(audioPortConfigFromParcel.get()), NO_ERROR);
+ ASSERT_EQ(NO_ERROR, audioPortConfigFromParcel->readFromParcelable(
+ parcelableFromParcel, false /*isInput*/));
ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig));
}
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
index e420d07..c19d2c2 100644
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ b/media/libaudiohal/FactoryHalHidl.cpp
@@ -94,7 +94,7 @@
} // namespace
void* createPreferredImpl(const std::string& package, const std::string& interface) {
- for (auto version = detail::sAudioHALVersions; version != nullptr; ++version) {
+ for (auto version = detail::sAudioHALVersions; *version != nullptr; ++version) {
void* rawInterface = nullptr;
if (hasHalService(package, *version, interface)
&& createHalService(*version, interface, &rawInterface)) {
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index 32eaa31..0503698 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -105,6 +105,15 @@
}
// static
+void ConversionHelperHidl::argsFromHal(
+ const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs) {
+ hidlArgs->resize(args.size());
+ for (size_t i = 0; i < args.size(); ++i) {
+ (*hidlArgs)[i] = String8(args[i]).c_str();
+ }
+}
+
+// static
status_t ConversionHelperHidl::analyzeResult(const Result& result) {
switch (result) {
case Result::OK: return OK;
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
index 59122c7..2909013 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -21,6 +21,8 @@
#include <hidl/HidlSupport.h>
#include <system/audio.h>
#include <utils/String8.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
using ::android::hardware::audio::CPP_VERSION::ParameterValue;
using CoreResult = ::android::hardware::audio::CPP_VERSION::Result;
@@ -37,6 +39,7 @@
static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+ static void argsFromHal(const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs);
ConversionHelperHidl(const char* className);
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index ca4f663..f86df1e 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -457,11 +457,13 @@
}
#endif
-status_t DeviceHalHidl::dump(int fd) {
+status_t DeviceHalHidl::dump(int fd, const Vector<String16>& args) {
if (mDevice == 0) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
hidlHandle->data[0] = fd;
- Return<void> ret = mDevice->debug(hidlHandle, {} /* options */);
+ hidl_vec<hidl_string> hidlArgs;
+ argsFromHal(args, &hidlArgs);
+ Return<void> ret = mDevice->debug(hidlHandle, hidlArgs);
native_handle_delete(hidlHandle);
return processReturn("dump", ret);
}
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 2c847cf..2694ab3 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -119,7 +119,7 @@
status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
- virtual status_t dump(int fd);
+ status_t dump(int fd, const Vector<String16>& args) override;
private:
friend class DevicesFactoryHalHidl;
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index af7dc1a..e0304af 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -233,7 +233,7 @@
return INVALID_OPERATION;
}
-status_t DeviceHalLocal::dump(int fd) {
+status_t DeviceHalLocal::dump(int fd, const Vector<String16>& /* args */) {
return mDev->dump(mDev, fd);
}
diff --git a/media/libaudiohal/impl/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
index 46b510b..2fde936 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -112,7 +112,7 @@
status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
- virtual status_t dump(int fd);
+ status_t dump(int fd, const Vector<String16>& args) override;
void closeOutputStream(struct audio_stream_out *stream_out);
void closeInputStream(struct audio_stream_in *stream_in);
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 9c4363c..f75bbf3 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -73,7 +73,9 @@
uint32_t index, effect_descriptor_t *pDescriptor) {
// TODO: We need somehow to track the changes on the server side
// or figure out how to convert everybody to query all the descriptors at once.
- // TODO: check for nullptr
+ if (pDescriptor == nullptr) {
+ return BAD_VALUE;
+ }
if (mLastDescriptors.size() == 0) {
status_t queryResult = queryAllDescriptors();
if (queryResult != OK) return queryResult;
@@ -85,7 +87,9 @@
status_t EffectsFactoryHalHidl::getDescriptor(
const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- // TODO: check for nullptr
+ if (pDescriptor == nullptr || pEffectUuid == nullptr) {
+ return BAD_VALUE;
+ }
if (mEffectsFactory == 0) return NO_INIT;
Uuid hidlUuid;
UuidUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
@@ -105,6 +109,33 @@
return processReturn(__FUNCTION__, ret);
}
+status_t EffectsFactoryHalHidl::getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors) {
+ if (pEffectType == nullptr || descriptors == nullptr) {
+ return BAD_VALUE;
+ }
+
+ uint32_t numEffects = 0;
+ status_t status = queryNumberEffects(&numEffects);
+ if (status != NO_ERROR) {
+ ALOGW("%s error %d from FactoryHal queryNumberEffects", __func__, status);
+ return status;
+ }
+
+ for (uint32_t i = 0; i < numEffects; i++) {
+ effect_descriptor_t descriptor;
+ status = getDescriptor(i, &descriptor);
+ if (status != NO_ERROR) {
+ ALOGW("%s error %d from FactoryHal getDescriptor", __func__, status);
+ continue;
+ }
+ if (memcmp(&descriptor.type, pEffectType, sizeof(effect_uuid_t)) == 0) {
+ descriptors->push_back(descriptor);
+ }
+ }
+ return descriptors->empty() ? NAME_NOT_FOUND : NO_ERROR;
+}
+
status_t EffectsFactoryHalHidl::createEffect(
const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
int32_t deviceId __unused, sp<EffectHalInterface> *effect) {
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 5fa85e7..ff26d9f 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -45,6 +45,9 @@
virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
effect_descriptor_t *pDescriptor);
+ virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors);
+
// Creates an effect engine of the specified type.
// To release the effect engine, it is necessary to release references
// to the returned effect object.
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 539a149..452f84d 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -152,11 +152,13 @@
return processReturn("standby", mStream->standby());
}
-status_t StreamHalHidl::dump(int fd) {
+status_t StreamHalHidl::dump(int fd, const Vector<String16>& args) {
if (!mStream) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
hidlHandle->data[0] = fd;
- Return<void> ret = mStream->debug(hidlHandle, {} /* options */);
+ hidl_vec<hidl_string> hidlArgs;
+ argsFromHal(args, &hidlArgs);
+ Return<void> ret = mStream->debug(hidlHandle, hidlArgs);
native_handle_delete(hidlHandle);
mStreamPowerLog.dump(fd);
return processReturn("dump", ret);
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 970903b..6f5dd04 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -71,7 +71,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby();
- virtual status_t dump(int fd);
+ virtual status_t dump(int fd, const Vector<String16>& args) override;
// Start a stream operating in mmap mode.
virtual status_t start();
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 34bd5df..11fac61 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -87,7 +87,8 @@
return mStream->standby(mStream);
}
-status_t StreamHalLocal::dump(int fd) {
+status_t StreamHalLocal::dump(int fd, const Vector<String16>& args) {
+ (void) args;
status_t status = mStream->dump(mStream, fd);
mStreamPowerLog.dump(fd);
return status;
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index b260495..493c521 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -50,7 +50,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby();
- virtual status_t dump(int fd);
+ virtual status_t dump(int fd, const Vector<String16>& args) override;
// Start a stream operating in mmap mode.
virtual status_t start() = 0;
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 29ef011..69cbcec 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -120,7 +120,7 @@
virtual status_t removeDeviceEffect(
audio_port_handle_t device, sp<EffectHalInterface> effect) = 0;
- virtual status_t dump(int fd) = 0;
+ virtual status_t dump(int fd, const Vector<String16>& args) = 0;
protected:
// Subclasses can not be constructed directly by clients.
diff --git a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
index 9fb56ae..3e505bd 100644
--- a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -37,6 +37,9 @@
virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
effect_descriptor_t *pDescriptor) = 0;
+ virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors) = 0;
+
// Creates an effect engine of the specified type.
// To release the effect engine, it is necessary to release references
// to the returned effect object.
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 2be12fb..2b5b2db 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -25,6 +25,7 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
+#include <utils/Vector.h>
namespace android {
@@ -69,7 +70,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby() = 0;
- virtual status_t dump(int fd) = 0;
+ virtual status_t dump(int fd, const Vector<String16>& args = {}) = 0;
// Start a stream operating in mmap mode.
virtual status_t start() = 0;
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index d85e2e9..e68c002 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -434,6 +434,12 @@
track->mHapticIntensity = hapticIntensity;
}
} break;
+ case HAPTIC_MAX_AMPLITUDE: {
+ const float hapticMaxAmplitude = *reinterpret_cast<float*>(value);
+ if (track->mHapticMaxAmplitude != hapticMaxAmplitude) {
+ track->mHapticMaxAmplitude = hapticMaxAmplitude;
+ }
+ } break;
default:
LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
}
@@ -553,6 +559,7 @@
// haptic
t->mHapticPlaybackEnabled = false;
t->mHapticIntensity = os::HapticScale::NONE;
+ t->mHapticMaxAmplitude = NAN;
t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
t->mMixerHapticChannelCount = 0;
t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
@@ -602,7 +609,8 @@
switch (t->mMixerFormat) {
// Mixer format should be AUDIO_FORMAT_PCM_FLOAT.
case AUDIO_FORMAT_PCM_FLOAT: {
- os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity);
+ os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity,
+ t->mHapticMaxAmplitude);
} break;
default:
LOG_ALWAYS_FATAL("bad mMixerFormat: %#x", t->mMixerFormat);
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index cd47dc6..2988c67 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_MIXER_OPS_H
#define ANDROID_AUDIO_MIXER_OPS_H
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
#include <system/audio.h>
namespace android {
@@ -229,15 +231,26 @@
* complexity of working on interleaved streams is now getting
* too high, and likely limits compiler optimization.
*/
-template <int MIXTYPE, int NCHAN,
+
+// compile-time function.
+constexpr inline bool usesCenterChannel(audio_channel_mask_t mask) {
+ using namespace audio_utils::channels;
+ for (size_t i = 0; i < std::size(kSideFromChannelIdx); ++i) {
+ if ((mask & (1 << i)) != 0 && kSideFromChannelIdx[i] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Applies stereo volume to the audio data based on proper left right channel affinity
+ * (templated channel MASK parameter).
+ */
+template <int MIXTYPE, audio_channel_mask_t MASK,
typename TO, typename TI, typename TV,
typename F>
-void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
- static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
- static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
- || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
- || MIXTYPE == MIXTYPE_STEREOEXPAND
- || MIXTYPE == MIXTYPE_MONOEXPAND);
+void stereoVolumeHelperWithChannelMask(TO*& out, const TI*& in, const TV *vol, F f) {
auto proc = [](auto& a, const auto& b) {
if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
|| MIXTYPE == MIXTYPE_STEREOEXPAND
@@ -250,59 +263,109 @@
auto inp = [&in]() -> const TI& {
if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND
|| MIXTYPE == MIXTYPE_MONOEXPAND) {
- return *in;
+ return *in; // note STEREOEXPAND assumes replicated L/R channels (see doc below).
} else {
return *in++;
}
};
- // HALs should only expose the canonical channel masks.
- proc(*out++, f(inp(), vol[0])); // front left
- if constexpr (NCHAN == 1) return;
- proc(*out++, f(inp(), vol[1])); // front right
- if constexpr (NCHAN == 2) return;
- if constexpr (NCHAN == 4) {
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- return;
- }
-
- // TODO: Precompute center volume if not ramping.
std::decay_t<TV> center;
- if constexpr (std::is_floating_point_v<TV>) {
- center = (vol[0] + vol[1]) * 0.5; // do not use divide
- } else {
- center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
- }
- proc(*out++, f(inp(), center)); // center (or 2.1 LFE)
- if constexpr (NCHAN == 3) return;
- if constexpr (NCHAN == 5) {
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- return;
- }
-
- proc(*out++, f(inp(), center)); // lfe
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- if constexpr (NCHAN == 6) return;
- if constexpr (NCHAN == 7) {
- proc(*out++, f(inp(), center)); // back center
- return;
- }
- // NCHAN == 8
- proc(*out++, f(inp(), vol[0])); // side left
- proc(*out++, f(inp(), vol[1])); // side right
- if constexpr (NCHAN > FCC_8) {
- // Mutes to zero extended surround channels.
- // 7.1.4 has the correct behavior.
- // 22.2 has the behavior that FLC and FRC will be mixed instead
- // of SL and SR and LFE will be center, not left.
- for (int i = 8; i < NCHAN; ++i) {
- // TODO: Consider using android::audio_utils::channels::kSideFromChannelIdx
- proc(*out++, f(inp(), 0.f));
+ constexpr bool USES_CENTER_CHANNEL = usesCenterChannel(MASK);
+ if constexpr (USES_CENTER_CHANNEL) {
+ if constexpr (std::is_floating_point_v<TV>) {
+ center = (vol[0] + vol[1]) * 0.5; // do not use divide
+ } else {
+ center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
}
}
+
+ using namespace audio_utils::channels;
+
+ // if LFE and LFE2 are both present, they take left and right volume respectively.
+ constexpr unsigned LFE_LFE2 = \
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+ constexpr bool has_LFE_LFE2 = (MASK & LFE_LFE2) == LFE_LFE2;
+
+#pragma push_macro("DO_CHANNEL_POSITION")
+#undef DO_CHANNEL_POSITION
+#define DO_CHANNEL_POSITION(BIT_INDEX) \
+ if constexpr ((MASK & (1 << BIT_INDEX)) != 0) { \
+ constexpr auto side = kSideFromChannelIdx[BIT_INDEX]; \
+ if constexpr (side == AUDIO_GEOMETRY_SIDE_LEFT || \
+ has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) { \
+ proc(*out++, f(inp(), vol[0])); \
+ } else if constexpr (side == AUDIO_GEOMETRY_SIDE_RIGHT || \
+ has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) { \
+ proc(*out++, f(inp(), vol[1])); \
+ } else /* constexpr */ { \
+ proc(*out++, f(inp(), center)); \
+ } \
+ }
+
+ DO_CHANNEL_POSITION(0);
+ DO_CHANNEL_POSITION(1);
+ DO_CHANNEL_POSITION(2);
+ DO_CHANNEL_POSITION(3);
+ DO_CHANNEL_POSITION(4);
+ DO_CHANNEL_POSITION(5);
+ DO_CHANNEL_POSITION(6);
+ DO_CHANNEL_POSITION(7);
+
+ DO_CHANNEL_POSITION(8);
+ DO_CHANNEL_POSITION(9);
+ DO_CHANNEL_POSITION(10);
+ DO_CHANNEL_POSITION(11);
+ DO_CHANNEL_POSITION(12);
+ DO_CHANNEL_POSITION(13);
+ DO_CHANNEL_POSITION(14);
+ DO_CHANNEL_POSITION(15);
+
+ DO_CHANNEL_POSITION(16);
+ DO_CHANNEL_POSITION(17);
+ DO_CHANNEL_POSITION(18);
+ DO_CHANNEL_POSITION(19);
+ DO_CHANNEL_POSITION(20);
+ DO_CHANNEL_POSITION(21);
+ DO_CHANNEL_POSITION(22);
+ DO_CHANNEL_POSITION(23);
+ static_assert(FCC_LIMIT <= FCC_24); // Note: this may need to change.
+#pragma pop_macro("DO_CHANNEL_POSITION")
+}
+
+// These are the channel position masks we expect from the HAL.
+// See audio_channel_out_mask_from_count() but this is constexpr
+constexpr inline audio_channel_mask_t canonicalChannelMaskFromCount(size_t channelCount) {
+ constexpr audio_channel_mask_t canonical[] = {
+ [0] = AUDIO_CHANNEL_NONE,
+ [1] = AUDIO_CHANNEL_OUT_MONO,
+ [2] = AUDIO_CHANNEL_OUT_STEREO,
+ [3] = AUDIO_CHANNEL_OUT_2POINT1,
+ [4] = AUDIO_CHANNEL_OUT_QUAD,
+ [5] = AUDIO_CHANNEL_OUT_PENTA,
+ [6] = AUDIO_CHANNEL_OUT_5POINT1,
+ [7] = AUDIO_CHANNEL_OUT_6POINT1,
+ [8] = AUDIO_CHANNEL_OUT_7POINT1,
+ [12] = AUDIO_CHANNEL_OUT_7POINT1POINT4,
+ [24] = AUDIO_CHANNEL_OUT_22POINT2,
+ };
+ return channelCount < std::size(canonical) ? canonical[channelCount] : AUDIO_CHANNEL_NONE;
+}
+
+template <int MIXTYPE, int NCHAN,
+ typename TO, typename TI, typename TV,
+ typename F>
+void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
+ static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
+ static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
+ || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+ || MIXTYPE == MIXTYPE_STEREOEXPAND
+ || MIXTYPE == MIXTYPE_MONOEXPAND);
+ constexpr audio_channel_mask_t MASK{canonicalChannelMaskFromCount(NCHAN)};
+ if constexpr (MASK == AUDIO_CHANNEL_NONE) {
+ ALOGE("%s: Invalid position count %d", __func__, NCHAN);
+ return; // not a valid system mask, ignore.
+ }
+ stereoVolumeHelperWithChannelMask<MIXTYPE, MASK, TO, TI, TV, F>(out, in, vol, f);
}
/*
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
index 70eafe3..5a9fa07 100644
--- a/media/libaudioprocessing/include/media/AudioMixer.h
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -50,6 +50,7 @@
// for haptic
HAPTIC_ENABLED = 0x4007, // Set haptic data from this track should be played or not.
HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+ HAPTIC_MAX_AMPLITUDE = 0x4009, // Set the max amplitude allowed for haptic data.
// for target TIMESTRETCH
PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
// parameter 'value' is a pointer to the new playback rate.
@@ -145,6 +146,7 @@
// Haptic
bool mHapticPlaybackEnabled;
os::HapticScale mHapticIntensity;
+ float mHapticMaxAmplitude;
audio_channel_mask_t mHapticChannelMask;
uint32_t mHapticChannelCount;
audio_channel_mask_t mMixerHapticChannelMask;
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index 3856817..ad402db 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -76,6 +76,7 @@
//
cc_binary {
name: "mixerops_objdump",
+ header_libs: ["libaudioutils_headers"],
srcs: ["mixerops_objdump.cpp"],
}
@@ -84,6 +85,16 @@
//
cc_benchmark {
name: "mixerops_benchmark",
+ header_libs: ["libaudioutils_headers"],
srcs: ["mixerops_benchmark.cpp"],
static_libs: ["libgoogle-benchmark"],
}
+
+//
+// mixerops unit test
+//
+cc_test {
+ name: "mixerops_tests",
+ defaults: ["libaudioprocessing_test_defaults"],
+ srcs: ["mixerops_tests.cpp"],
+}
diff --git a/media/libaudioprocessing/tests/mixerops_benchmark.cpp b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
index 7a4c5c7..f866b1a 100644
--- a/media/libaudioprocessing/tests/mixerops_benchmark.cpp
+++ b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
@@ -16,11 +16,9 @@
#include <inttypes.h>
#include <type_traits>
-#include "../../../../system/media/audio_utils/include/audio_utils/primitives.h"
#define LOG_ALWAYS_FATAL(...)
#include <../AudioMixerOps.h>
-
#include <benchmark/benchmark.h>
using namespace android;
diff --git a/media/libaudioprocessing/tests/mixerops_tests.cpp b/media/libaudioprocessing/tests/mixerops_tests.cpp
new file mode 100644
index 0000000..2500ba9
--- /dev/null
+++ b/media/libaudioprocessing/tests/mixerops_tests.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "mixerop_tests"
+#include <log/log.h>
+
+#include <inttypes.h>
+#include <type_traits>
+
+#include <../AudioMixerOps.h>
+#include <gtest/gtest.h>
+
+using namespace android;
+
+// Note: gtest templated tests require typenames, not integers.
+template <int MIXTYPE, int NCHAN>
+class MixerOpsBasicTest {
+public:
+ static void testStereoVolume() {
+ using namespace android::audio_utils::channels;
+
+ constexpr size_t FRAME_COUNT = 1000;
+ constexpr size_t SAMPLE_COUNT = FRAME_COUNT * NCHAN;
+
+ const float in[SAMPLE_COUNT] = {[0 ... (SAMPLE_COUNT - 1)] = 1.f};
+
+ AUDIO_GEOMETRY_SIDE sides[NCHAN];
+ size_t i = 0;
+ unsigned channel = canonicalChannelMaskFromCount(NCHAN);
+ constexpr unsigned LFE_LFE2 =
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+ bool has_LFE_LFE2 = (channel & LFE_LFE2) == LFE_LFE2;
+ while (channel != 0) {
+ const int index = __builtin_ctz(channel);
+ if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+ sides[i++] = AUDIO_GEOMETRY_SIDE_LEFT; // special case
+ } else if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ sides[i++] = AUDIO_GEOMETRY_SIDE_RIGHT; // special case
+ } else {
+ sides[i++] = sideFromChannelIdx(index);
+ }
+ channel &= ~(1 << index);
+ }
+
+ float vola[2] = {1.f, 0.f}; // left volume at max.
+ float out[SAMPLE_COUNT]{};
+ float aux[FRAME_COUNT]{};
+ float volaux = 0.5;
+ {
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vola, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ const float left = vola[0];
+ const float center = (vola[0] + vola[1]) * 0.5;
+ const float right = vola[1];
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ for (size_t j = 0; j < NCHAN; ++j) {
+ const float audio = *outp++;
+ if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+ EXPECT_EQ(left, audio);
+ } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ EXPECT_EQ(center, audio);
+ } else {
+ EXPECT_EQ(right, audio);
+ }
+ }
+ EXPECT_EQ(volaux, *auxp++); // works if all channels contain 1.f
+ }
+ }
+ float volb[2] = {0.f, 0.5f}; // right volume at half max.
+ {
+ // this accumulates into out, aux.
+ // float out[SAMPLE_COUNT]{};
+ // float aux[FRAME_COUNT]{};
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, volb, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ const float left = vola[0] + volb[0];
+ const float center = (vola[0] + vola[1] + volb[0] + volb[1]) * 0.5;
+ const float right = vola[1] + volb[1];
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ for (size_t j = 0; j < NCHAN; ++j) {
+ const float audio = *outp++;
+ if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+ EXPECT_EQ(left, audio);
+ } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ EXPECT_EQ(center, audio);
+ } else {
+ EXPECT_EQ(right, audio);
+ }
+ }
+ // aux is accumulated so 2x the amplitude
+ EXPECT_EQ(volaux * 2.f, *auxp++); // works if all channels contain 1.f
+ }
+ }
+
+ { // test aux as derived from out.
+ // AUX channel is the weighted sum of all of the output channels prior to volume
+ // adjustment. We must set L and R to the same volume to allow computation
+ // of AUX from the output values.
+ const float volmono = 0.25f;
+ const float vollr[2] = {volmono, volmono}; // all the same.
+ float out[SAMPLE_COUNT]{};
+ float aux[FRAME_COUNT]{};
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vollr, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ float accum = 0.f;
+ for (size_t j = 0; j < NCHAN; ++j) {
+ accum += *outp++;
+ }
+ EXPECT_EQ(accum / NCHAN * volaux / volmono, *auxp++);
+ }
+ }
+ }
+};
+
+TEST(mixerops, stereovolume_1) { // Note: mono not used for output sinks yet.
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 1>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_2) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 2>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_3) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 3>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_4) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 4>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_5) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 5>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_6) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 6>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_7) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 7>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_8) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 8>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_12) {
+ if constexpr (FCC_LIMIT >= 12) { // NOTE: FCC_LIMIT is an enum, so can't #if
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 12>::testStereoVolume();
+ }
+}
+TEST(mixerops, stereovolume_24) {
+ if constexpr (FCC_LIMIT >= 24) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 24>::testStereoVolume();
+ }
+}
+TEST(mixerops, channel_equivalence) {
+ // we must match the constexpr function with the system determined channel mask from count.
+ for (size_t i = 0; i < FCC_LIMIT; ++i) {
+ const audio_channel_mask_t actual = canonicalChannelMaskFromCount(i);
+ const audio_channel_mask_t system = audio_channel_out_mask_from_count(i);
+ if (system == AUDIO_CHANNEL_INVALID) continue;
+ EXPECT_EQ(system, actual);
+ }
+}
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index a660957..03ce329 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -45,6 +45,7 @@
shared_libs: [
"libaudioutils",
+ "libbase",
"libbinder",
"liblog",
"libutils",
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index 65a20a7..3137e13 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -22,12 +22,15 @@
#include <algorithm>
#include <memory>
+#include <string>
#include <utility>
#include <errno.h>
#include <inttypes.h>
#include <math.h>
+#include <android-base/parsedouble.h>
+#include <android-base/properties.h>
#include <audio_effects/effect_hapticgenerator.h>
#include <audio_utils/format.h>
#include <system/audio.h>
@@ -35,6 +38,7 @@
static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+static constexpr float DEFAULT_DISTORTION_OUTPUT_GAIN = 1.5f;
// This is the only symbol that needs to be exported
__attribute__ ((visibility ("default")))
@@ -81,6 +85,15 @@
namespace {
+float getFloatProperty(const std::string& key, float defaultValue) {
+ float result;
+ std::string value = android::base::GetProperty(key, "");
+ if (!value.empty() && android::base::ParseFloat(value, &result)) {
+ return result;
+ }
+ return defaultValue;
+}
+
int HapticGenerator_Init(struct HapticGeneratorContext *context) {
context->itfe = &gHapticGeneratorInterface;
@@ -114,7 +127,9 @@
context->param.distortionCornerFrequency = 300.0f;
context->param.distortionInputGain = 0.3f;
context->param.distortionCubeThreshold = 0.1f;
- context->param.distortionOutputGain = 1.5f;
+ context->param.distortionOutputGain = getFloatProperty(
+ "vendor.audio.hapticgenerator.distortion.output.gain", DEFAULT_DISTORTION_OUTPUT_GAIN);
+ ALOGD("Using distortion output gain as %f", context->param.distortionOutputGain);
context->state = HAPTICGENERATOR_STATE_INITIALIZED;
return 0;
@@ -287,15 +302,17 @@
break;
}
case HG_PARAM_VIBRATOR_INFO: {
- if (value == nullptr || size != 2 * sizeof(float)) {
+ if (value == nullptr || size != 3 * sizeof(float)) {
return -EINVAL;
}
const float resonantFrequency = *(float*) value;
const float qFactor = *((float *) value + 1);
+ const float maxAmplitude = *((float *) value + 2);
context->param.resonantFrequency =
isnan(resonantFrequency) ? DEFAULT_RESONANT_FREQUENCY : resonantFrequency;
context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
+ context->param.maxHapticAmplitude = maxAmplitude;
if (context->processorsRecord.bpf != nullptr) {
context->processorsRecord.bpf->setCoefficients(
@@ -448,7 +465,8 @@
float* hapticOutBuffer = HapticGenerator_runProcessingChain(
context->processingChain, context->inputBuffer.data(),
context->outputBuffer.data(), inBuffer->frameCount);
- os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity);
+ os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity,
+ context->param.maxHapticAmplitude);
// For haptic data, the haptic playback thread will copy the data from effect input buffer,
// which contains haptic data at the end of the buffer, directly to sink buffer.
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index 96b744a..85e961f 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -51,6 +51,7 @@
// A map from track id to haptic intensity.
std::map<int, os::HapticScale> id2Intensity;
os::HapticScale maxHapticIntensity; // max intensity will be used to scale haptic data.
+ float maxHapticAmplitude; // max amplitude will be used to limit haptic data absolute values.
float resonantFrequency;
float bpfQ;
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index c89c023..c9f361e 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -17,7 +17,6 @@
#include <arpa/inet.h>
#include <stdint.h>
-#include <sys/types.h>
#include <android/IDataSource.h>
#include <binder/IPCThreadState.h>
diff --git a/media/libmedia/tests/codeclist/Android.bp b/media/libmedia/tests/codeclist/Android.bp
index 7dd0caa..57af9a9 100644
--- a/media/libmedia/tests/codeclist/Android.bp
+++ b/media/libmedia/tests/codeclist/Android.bp
@@ -25,7 +25,7 @@
cc_test {
name: "CodecListTest",
- test_suites: ["device-tests"],
+ test_suites: ["device-tests", "mts"],
gtest: true,
srcs: [
@@ -41,7 +41,7 @@
"libstagefright_xmlparser",
"libutils",
],
-
+ compile_multilib: "first",
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index efd4070..94a0424 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -262,13 +262,10 @@
}
bool isHDR(const sp<AMessage> &format) {
- uint32_t standard, range, transfer;
+ uint32_t standard, transfer;
if (!format->findInt32("color-standard", (int32_t*)&standard)) {
standard = 0;
}
- if (!format->findInt32("color-range", (int32_t*)&range)) {
- range = 0;
- }
if (!format->findInt32("color-transfer", (int32_t*)&transfer)) {
transfer = 0;
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c03236a..a366506 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -4775,8 +4775,8 @@
}
const CryptoPlugin::SubSample *subSamples;
size_t numSubSamples;
- const uint8_t *key;
- const uint8_t *iv;
+ const uint8_t *key = NULL;
+ const uint8_t *iv = NULL;
CryptoPlugin::Mode mode = CryptoPlugin::kMode_Unencrypted;
// We allow the simpler queueInputBuffer API to be used even in
@@ -4791,8 +4791,6 @@
subSamples = &ss;
numSubSamples = 1;
- key = NULL;
- iv = NULL;
pattern.mEncryptBlocks = 0;
pattern.mSkipBlocks = 0;
}
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index ada5d81..0ec5ad5 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -65,7 +65,17 @@
const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM = "audio/x-adpcm-ms";
const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM = "audio/x-adpcm-dvi-ima";
-
+const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/vnd.dts";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_HD = "audio/vnd.dts.hd";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD = "audio/vnd.dts.uhd";
+const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCB = "audio/evrcb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCWB = "audio/evrcwb";
+const char *MEDIA_MIMETYPE_AUDIO_EVRCNW = "audio/evrcnw";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb+";
+const char *MEDIA_MIMETYPE_AUDIO_APTX = "audio/aptx";
+const char *MEDIA_MIMETYPE_AUDIO_DRA = "audio/vnd.dra";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_FORMAT = "audio/aac";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index f5cecef..afa0c6d 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -67,7 +67,17 @@
extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
extern const char *MEDIA_MIMETYPE_AUDIO_MS_ADPCM;
extern const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM;
-
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCWB;
+extern const char *MEDIA_MIMETYPE_AUDIO_EVRCNW;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_APTX;
+extern const char *MEDIA_MIMETYPE_AUDIO_DRA;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_FORMAT;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/media/libstagefright/tests/fuzzers/Android.bp b/media/libstagefright/tests/fuzzers/Android.bp
index 0097830..ea17a4d 100644
--- a/media/libstagefright/tests/fuzzers/Android.bp
+++ b/media/libstagefright/tests/fuzzers/Android.bp
@@ -86,9 +86,6 @@
dictionary: "dictionaries/formats.dict",
defaults: ["libstagefright_fuzzer_defaults"],
static_libs: [
- "libstagefright_webm",
"libdatasource",
- "libstagefright_esds",
- "libogg",
],
}
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index c1793ce..b035e5a 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -351,6 +351,11 @@
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA = "mpeg-user-data";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER = "mpeg2-stream-header";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS = "mpegh-compatible-sets";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION =
+ "mpegh-profile-level-indication";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT =
+ "mpegh-reference-channel-layout";
EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index fbd855d..2d2fcc0 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -320,6 +320,34 @@
extern const char* AMEDIAFORMAT_VIDEO_QP_P_MAX __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_P_MIN __INTRODUCED_IN(31);
+/**
+ * MPEG-H audio profile and level compatibility.
+ *
+ * See FDAmd_2 of ISO_IEC_23008-3;2019 MHAProfileAndLevelCompatibilitySetBox.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio profile level indication.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord mpegh3daProfileLevelIndication.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio reference channel layout.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord referenceChannelLayout
+ * and ISO_IEC_23001‐8 ChannelConfiguration value.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT __INTRODUCED_IN(32);
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 7e9e57e..6f275c7 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -126,6 +126,9 @@
AMEDIAFORMAT_KEY_MIME; # var introduced=21
AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER; # var introduced=29
+ AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS; # var introduced=32
+ AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION; # var introduced=32
+ AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT; # var introduced=32
AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN; # var introduced=29
AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
diff --git a/media/tests/benchmark/MediaBenchmarkTest/Android.bp b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
index 2e06da5..4b44dcf 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/Android.bp
+++ b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
@@ -69,7 +69,6 @@
java_defaults {
name: "MediaBenchmark-defaults",
- sdk_version: "system_current",
min_sdk_version: "28",
- target_sdk_version: "29",
+ target_sdk_version: "30",
}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/build.gradle b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
index b2aee1a..b222d47 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/build.gradle
+++ b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
@@ -17,21 +17,21 @@
buildscript {
repositories {
google()
- jcenter()
+ mavenCentral()
}
dependencies {
- classpath 'com.android.tools.build:gradle:3.5.0'
+ classpath 'com.android.tools.build:gradle:4.2.1'
}
}
apply plugin: 'com.android.application'
android {
- compileSdkVersion 29
+ compileSdkVersion 30
defaultConfig {
applicationId "com.android.media.benchmark"
minSdkVersion 28
- targetSdkVersion 29
+ targetSdkVersion 30
versionCode 1
versionName "1.0"
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
@@ -57,20 +57,20 @@
externalNativeBuild {
cmake {
path "src/main/cpp/CMakeLists.txt"
- version "3.10.2"
+ version "3.18.1"
}
}
}
repositories {
google()
- jcenter()
+ mavenCentral()
}
dependencies {
implementation fileTree(dir: 'libs', include: ['*.jar'])
- implementation 'androidx.appcompat:appcompat:1.1.0'
- testImplementation 'junit:junit:4.12'
- androidTestImplementation 'androidx.test:runner:1.2.0'
- androidTestImplementation 'androidx.test.ext:junit:1.1.1'
+ implementation 'androidx.appcompat:appcompat:1.3.0'
+ testImplementation 'junit:junit:4.13.2'
+ androidTestImplementation 'androidx.test:runner:1.3.0'
+ androidTestImplementation 'androidx.test.ext:junit:1.1.2'
}
\ No newline at end of file
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
index af92424..0192d68 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/Android.bp
@@ -9,7 +9,6 @@
cc_test_library {
name: "libmediabenchmark_jni",
- sdk_version: "current",
defaults: [
"libmediabenchmark_common-defaults",
diff --git a/media/tests/benchmark/src/native/common/Android.bp b/media/tests/benchmark/src/native/common/Android.bp
index 6b54c6a..718d217 100644
--- a/media/tests/benchmark/src/native/common/Android.bp
+++ b/media/tests/benchmark/src/native/common/Android.bp
@@ -55,7 +55,6 @@
cc_defaults {
name: "libmediabenchmark-defaults",
- sdk_version: "current",
stl: "c++_shared",
shared_libs: [
diff --git a/media/tests/benchmark/src/native/extractor/Extractor.cpp b/media/tests/benchmark/src/native/extractor/Extractor.cpp
index f0bb3b9..3bdfbad 100644
--- a/media/tests/benchmark/src/native/extractor/Extractor.cpp
+++ b/media/tests/benchmark/src/native/extractor/Extractor.cpp
@@ -124,9 +124,7 @@
int64_t sTime = mStats->getCurTime();
if (mExtractor) {
- // TODO: (b/140128505) Multiple calls result in DoS.
- // Uncomment call to AMediaExtractor_delete() once this is resolved
- // AMediaExtractor_delete(mExtractor);
+ AMediaExtractor_delete(mExtractor);
mExtractor = nullptr;
}
int64_t eTime = mStats->getCurTime();
diff --git a/media/tests/benchmark/tests/Android.bp b/media/tests/benchmark/tests/Android.bp
index 0fbd20d..9a8caa3 100644
--- a/media/tests/benchmark/tests/Android.bp
+++ b/media/tests/benchmark/tests/Android.bp
@@ -33,7 +33,12 @@
srcs: ["ExtractorTest.cpp"],
- static_libs: ["libmediabenchmark_extractor"]
+ static_libs: ["libmediabenchmark_extractor"],
+
+ shared_libs: [
+ "libbase",
+ "libbinder_ndk",
+ ],
}
cc_test {
@@ -50,6 +55,11 @@
"libmediabenchmark_extractor",
"libmediabenchmark_decoder",
],
+
+ shared_libs: [
+ "libbase",
+ "libbinder_ndk",
+ ],
}
cc_test {
diff --git a/media/tests/benchmark/tests/DecoderTest.cpp b/media/tests/benchmark/tests/DecoderTest.cpp
index 81ef02a..3666724 100644
--- a/media/tests/benchmark/tests/DecoderTest.cpp
+++ b/media/tests/benchmark/tests/DecoderTest.cpp
@@ -21,6 +21,8 @@
#include <iostream>
#include <limits>
+#include <android/binder_process.h>
+
#include "BenchmarkTestEnvironment.h"
#include "Decoder.h"
@@ -175,6 +177,7 @@
"c2.android.hevc.decoder", true)));
int main(int argc, char **argv) {
+ ABinderProcess_startThreadPool();
gEnv = new BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/tests/benchmark/tests/ExtractorTest.cpp b/media/tests/benchmark/tests/ExtractorTest.cpp
index d14d15b..27ee9ba 100644
--- a/media/tests/benchmark/tests/ExtractorTest.cpp
+++ b/media/tests/benchmark/tests/ExtractorTest.cpp
@@ -19,6 +19,8 @@
#include <gtest/gtest.h>
+#include <android/binder_process.h>
+
#include "BenchmarkTestEnvironment.h"
#include "Extractor.h"
@@ -73,6 +75,7 @@
0)));
int main(int argc, char **argv) {
+ ABinderProcess_startThreadPool();
gEnv = new BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index bfe73d5..73c4e3b 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -81,6 +81,36 @@
export_include_dirs: ["include"],
}
+cc_library {
+ name: "libmediautils_vendor",
+ vendor_available: true, // required for platform/hardware/interfaces
+ srcs: [
+ "MemoryLeakTrackUtil.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+ shared_libs: [
+ "liblog",
+ "libutils",
+ ],
+
+ static_libs: [
+ "libc_malloc_debug_backtrace",
+ ],
+
+ header_libs: [
+ "bionic_libc_platform_headers",
+ ],
+
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
+
+
cc_library_headers {
name: "libmediautils_headers",
vendor_available: true, // required for platform/hardware/interfaces
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 6e52512..51e8d7a 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -17,7 +17,7 @@
#include <fcntl.h>
#include <functional>
-#include <type_traits>
+#include <type_traits>
#include <android/content/AttributionSourceState.h>
#include "fuzzer/FuzzedDataProvider.h"
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 65a163f..a1fb304 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -336,11 +336,11 @@
}
// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
-const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
+std::optional<media::AudioVibratorInfo> AudioFlinger::getDefaultVibratorInfo_l() {
if (mAudioVibratorInfos.empty()) {
- return nullptr;
+ return {};
}
- return &mAudioVibratorInfos.front();
+ return mAudioVibratorInfos.front();
}
AudioFlinger::~AudioFlinger()
@@ -695,7 +695,7 @@
// dump all hardware devs
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
- dev->dump(fd);
+ dev->dump(fd, args);
}
mPatchPanel.dump(fd);
@@ -1873,13 +1873,13 @@
}
}
-void AudioFlinger::ioConfigChanged(audio_io_config_event event,
+void AudioFlinger::ioConfigChanged(audio_io_config_event_t event,
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid) {
+ media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
+ legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(event));
media::AudioIoDescriptor descAidl = VALUE_OR_FATAL(
legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(ioDesc));
- media::AudioIoConfigEvent eventAidl = VALUE_OR_FATAL(
- legacy2aidl_audio_io_config_event_AudioIoConfigEvent(event));
Mutex::Autolock _l(mClientLock);
size_t size = mNotificationClients.size();
@@ -2455,6 +2455,10 @@
ThreadBase *thread = (ThreadBase *)mRecordThreads.valueAt(i).get();
thread->systemReady();
}
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ ThreadBase *thread = (ThreadBase *)mMmapThreads.valueAt(i).get();
+ thread->systemReady();
+ }
return NO_ERROR;
}
@@ -2501,7 +2505,8 @@
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig __unused,
audio_devices_t deviceType,
const String8& address,
audio_output_flags_t flags)
@@ -2529,16 +2534,16 @@
// Check only for Normal Mixing mode
if (kEnableExtendedPrecision) {
// Specify format (uncomment one below to choose)
- //config->format = AUDIO_FORMAT_PCM_FLOAT;
- //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
- //config->format = AUDIO_FORMAT_PCM_32_BIT;
- //config->format = AUDIO_FORMAT_PCM_8_24_BIT;
- // ALOGV("openOutput_l() upgrading format to %#08x", config->format);
+ //halConfig->format = AUDIO_FORMAT_PCM_FLOAT;
+ //halConfig->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ //halConfig->format = AUDIO_FORMAT_PCM_32_BIT;
+ //halConfig->format = AUDIO_FORMAT_PCM_8_24_BIT;
+ // ALOGV("openOutput_l() upgrading format to %#08x", halConfig->format);
}
if (kEnableExtendedChannels) {
// Specify channel mask (uncomment one below to choose)
- //config->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch
- //config->channel_mask = audio_channel_mask_from_representation_and_bits(
+ //halConfig->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch
+ //halConfig->channel_mask = audio_channel_mask_from_representation_and_bits(
// AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1); // another 4ch example
}
}
@@ -2549,7 +2554,7 @@
*output,
deviceType,
flags,
- config,
+ halConfig,
address.string());
mHardwareStatus = AUDIO_HW_IDLE;
@@ -2564,13 +2569,20 @@
return thread;
} else {
sp<PlaybackThread> thread;
- if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ //TODO: b/193496180 use virtualizer stage flag at audio HAL when available
+ if (flags == (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST
+ | AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
+ thread = new VirtualizerStageThread(this, outputStream, *output,
+ mSystemReady, mixerConfig);
+ ALOGD("openOutput_l() created virtualizer output: ID %d thread %p",
+ *output, thread.get());
+ } else if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread = new OffloadThread(this, outputStream, *output, mSystemReady);
ALOGV("openOutput_l() created offload output: ID %d thread %p",
*output, thread.get());
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
- || !isValidPcmSinkFormat(config->format)
- || !isValidPcmSinkChannelMask(config->channel_mask)) {
+ || !isValidPcmSinkFormat(halConfig->format)
+ || !isValidPcmSinkChannelMask(halConfig->channel_mask)) {
thread = new DirectOutputThread(this, outputStream, *output, mSystemReady);
ALOGV("openOutput_l() created direct output: ID %d thread %p",
*output, thread.get());
@@ -2597,8 +2609,10 @@
{
audio_module_handle_t module = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_module_handle_t(request.module));
- audio_config_t config = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioConfig_audio_config_t(request.config));
+ audio_config_t halConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(request.halConfig, false /*isInput*/));
+ audio_config_base_t mixerConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(request.mixerConfig, false/*isInput*/));
sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
aidl2legacy_DeviceDescriptorBase(request.device));
audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
@@ -2611,9 +2625,9 @@
"Channels %#x, flags %#x",
this, module,
device->toString().c_str(),
- config.sample_rate,
- config.format,
- config.channel_mask,
+ halConfig.sample_rate,
+ halConfig.format,
+ halConfig.channel_mask,
flags);
audio_devices_t deviceType = device->type();
@@ -2625,7 +2639,8 @@
Mutex::Autolock _l(mLock);
- sp<ThreadBase> thread = openOutput_l(module, &output, &config, deviceType, address, flags);
+ sp<ThreadBase> thread = openOutput_l(module, &output, &halConfig,
+ &mixerConfig, deviceType, address, flags);
if (thread != 0) {
if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
@@ -2650,7 +2665,8 @@
mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
}
response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
- response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ response->config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(halConfig, false /*isInput*/));
response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
response->flags = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
@@ -2736,9 +2752,7 @@
mMmapThreads.removeItem(output);
ALOGD("closing mmapThread %p", mmapThread.get());
}
- const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
- ioDesc->mIoHandle = output;
- ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
+ ioConfigChanged(AUDIO_OUTPUT_CLOSED, sp<AudioIoDescriptor>::make(output));
mPatchPanel.notifyStreamClosed(output);
}
// The thread entity (active unit of execution) is no longer running here,
@@ -2811,16 +2825,16 @@
{
Mutex::Autolock _l(mLock);
- if (request.device.type == AUDIO_DEVICE_NONE) {
+ AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceTypeAddress(request.device));
+ if (device.mType == AUDIO_DEVICE_NONE) {
return BAD_VALUE;
}
audio_io_handle_t input = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_io_handle_t(request.input));
audio_config_t config = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioConfig_audio_config_t(request.config));
- AudioDeviceTypeAddr device = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioDeviceTypeAddress(request.device));
+ aidl2legacy_AudioConfig_audio_config_t(request.config, true /*isInput*/));
sp<ThreadBase> thread = openInput_l(
VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_module_handle_t(request.module)),
@@ -2834,7 +2848,8 @@
String8{});
response->input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
- response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ response->config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(config, true /*isInput*/));
response->device = request.device;
if (thread != 0) {
@@ -2996,9 +3011,7 @@
dumpToThreadLog_l(mmapThread);
mMmapThreads.removeItem(input);
}
- const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
- ioDesc->mIoHandle = input;
- ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
+ ioConfigChanged(AUDIO_INPUT_CLOSED, sp<AudioIoDescriptor>::make(input));
}
// FIXME: calling thread->exit() without mLock held should not be needed anymore now that
// we have a different lock for notification client
@@ -3797,7 +3810,8 @@
io = mPlaybackThreads.keyAt(0);
}
ALOGV("createEffect() got io %d for effect %s", io, descOut.name);
- } else if (checkPlaybackThread_l(io) != nullptr) {
+ } else if (checkPlaybackThread_l(io) != nullptr
+ && sessionId != AUDIO_SESSION_OUTPUT_STAGE) {
// allow only one effect chain per sessionId on mPlaybackThreads.
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
const audio_io_handle_t checkIo = mPlaybackThreads.keyAt(i);
@@ -4178,6 +4192,7 @@
case TransactionCode::LIST_AUDIO_PATCHES:
case TransactionCode::SET_AUDIO_PORT_CONFIG:
case TransactionCode::SET_RECORD_SILENCED:
+ case TransactionCode::AUDIO_POLICY_READY:
ALOGW("%s: transaction %d received from PID %d",
__func__, code, IPCThreadState::self()->getCallingPid());
// return status only for non void methods
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index fff61f8..d6bf0ae 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -269,6 +269,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
virtual status_t systemReady();
+ virtual status_t audioPolicyReady() { mAudioPolicyReady.store(true); return NO_ERROR; }
+ bool isAudioPolicyReady() const { return mAudioPolicyReady.load(); }
+
virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
@@ -309,7 +312,7 @@
void updateDownStreamPatches_l(const struct audio_patch *patch,
const std::set<audio_io_handle_t> streams);
- const media::AudioVibratorInfo* getDefaultVibratorInfo_l();
+ std::optional<media::AudioVibratorInfo> getDefaultVibratorInfo_l();
private:
// FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
@@ -735,7 +738,8 @@
const String8& outputDeviceAddress);
sp<ThreadBase> openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
audio_devices_t deviceType,
const String8& address,
audio_output_flags_t flags);
@@ -746,7 +750,7 @@
// no range check, AudioFlinger::mLock held
bool streamMute_l(audio_stream_type_t stream) const
{ return mStreamTypes[stream].mute; }
- void ioConfigChanged(audio_io_config_event event,
+ void ioConfigChanged(audio_io_config_event_t event,
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid = 0);
@@ -986,6 +990,7 @@
DeviceEffectManager mDeviceEffectManager;
bool mSystemReady;
+ std::atomic_bool mAudioPolicyReady{};
mediautils::UidInfo mUidInfo;
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index a05f5fe..c222de8 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -163,8 +163,13 @@
bool isOffloadOrMmap() const override { return false; }
uint32_t sampleRate() const override { return 0; }
- audio_channel_mask_t channelMask() const override { return AUDIO_CHANNEL_NONE; }
- uint32_t channelCount() const override { return 0; }
+ audio_channel_mask_t inChannelMask(int id __unused) const override {
+ return AUDIO_CHANNEL_NONE;
+ }
+ uint32_t inChannelCount(int id __unused) const override { return 0; }
+ audio_channel_mask_t outChannelMask() const override { return AUDIO_CHANNEL_NONE; }
+ uint32_t outChannelCount() const override { return 0; }
+
audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
@@ -190,6 +195,10 @@
wp<EffectChain> chain() const override { return nullptr; }
+ bool isAudioPolicyReady() const override {
+ return mManager.audioFlinger().isAudioPolicyReady();
+ }
+
int newEffectId() { return mManager.audioFlinger().nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT); }
status_t addEffectToHal(audio_port_handle_t deviceId,
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index b267d88..bd661f9 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -242,6 +242,12 @@
{
Mutex::Autolock _l(mLock);
+
+ if ((isInternal_l() && !mPolicyRegistered)
+ || !getCallback()->isAudioPolicyReady()) {
+ return NO_ERROR;
+ }
+
// register effect when first handle is attached and unregister when last handle is removed
if (mPolicyRegistered != mHandles.size() > 0) {
doRegister = true;
@@ -875,9 +881,9 @@
// similar to output EFFECT_FLAG_TYPE_INSERT/REPLACE,
// in which case input channel masks should be used here.
callback = getCallback();
- channelMask = callback->channelMask();
+ channelMask = callback->inChannelMask(mId);
mConfig.inputCfg.channels = channelMask;
- mConfig.outputCfg.channels = channelMask;
+ mConfig.outputCfg.channels = callback->outChannelMask();
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_MONO) {
@@ -1600,7 +1606,7 @@
return status;
}
-status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo)
+status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo)
{
if (mStatus != NO_ERROR) {
return mStatus;
@@ -1610,15 +1616,17 @@
return INVALID_OPERATION;
}
+ const size_t paramCount = 3;
std::vector<uint8_t> request(
- sizeof(effect_param_t) + sizeof(int32_t) + 2 * sizeof(float));
+ sizeof(effect_param_t) + sizeof(int32_t) + paramCount * sizeof(float));
effect_param_t *param = (effect_param_t*) request.data();
param->psize = sizeof(int32_t);
- param->vsize = 2 * sizeof(float);
+ param->vsize = paramCount * sizeof(float);
*(int32_t*)param->data = HG_PARAM_VIBRATOR_INFO;
float* vibratorInfoPtr = reinterpret_cast<float*>(param->data + sizeof(int32_t));
- vibratorInfoPtr[0] = vibratorInfo->resonantFrequency;
- vibratorInfoPtr[1] = vibratorInfo->qFactor;
+ vibratorInfoPtr[0] = vibratorInfo.resonantFrequency;
+ vibratorInfoPtr[1] = vibratorInfo.qFactor;
+ vibratorInfoPtr[2] = vibratorInfo.maxAmplitude;
std::vector<uint8_t> response;
status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
if (status == NO_ERROR) {
@@ -2048,11 +2056,11 @@
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX),
mEffectCallback(new EffectCallback(wp<EffectChain>(this), thread))
{
- mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
sp<ThreadBase> p = thread.promote();
if (p == nullptr) {
return;
}
+ mStrategy = p->getStrategyForStream(AUDIO_STREAM_MUSIC);
mMaxTailBuffers = ((kProcessTailDurationMs * p->sampleRate()) / 1000) /
p->frameCount();
}
@@ -2125,8 +2133,8 @@
if (mInBuffer == NULL) {
return;
}
- const size_t frameSize =
- audio_bytes_per_sample(EFFECT_BUFFER_FORMAT) * mEffectCallback->channelCount();
+ const size_t frameSize = audio_bytes_per_sample(EFFECT_BUFFER_FORMAT)
+ * mEffectCallback->inChannelCount(mEffects[0]->id());
memset(mInBuffer->audioBuffer()->raw, 0, mEffectCallback->frameCount() * frameSize);
mInBuffer->commit();
@@ -2236,6 +2244,9 @@
numSamples * sizeof(int32_t), &halBuffer);
#endif
if (result != OK) return result;
+
+ effect->configure();
+
effect->setInBuffer(halBuffer);
// auxiliary effects output samples to chain input buffer for further processing
// by insert effects
@@ -2303,6 +2314,10 @@
}
}
+ mEffects.insertAt(effect, idx_insert);
+
+ effect->configure();
+
// always read samples from chain input buffer
effect->setInBuffer(mInBuffer);
@@ -2310,14 +2325,13 @@
// output buffer, otherwise to chain input buffer
if (idx_insert == size) {
if (idx_insert != 0) {
- mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
mEffects[idx_insert-1]->configure();
+ mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
}
effect->setOutBuffer(mOutBuffer);
} else {
effect->setOutBuffer(mInBuffer);
}
- mEffects.insertAt(effect, idx_insert);
ALOGV("addEffect_l() effect %p, added in chain %p at rank %zu", effect.get(), this,
idx_insert);
@@ -2350,14 +2364,21 @@
if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
if (i == size - 1 && i != 0) {
- mEffects[i - 1]->setOutBuffer(mOutBuffer);
mEffects[i - 1]->configure();
+ mEffects[i - 1]->setOutBuffer(mOutBuffer);
}
}
mEffects.removeAt(i);
+
+ // make sure the input buffer configuration for the new first effect in the chain
+ // is updated if needed (can switch from HAL channel mask to mixer channel mask)
+ if (i == 0 && size > 1) {
+ mEffects[0]->configure();
+ mEffects[0]->setInBuffer(mInBuffer);
+ }
+
ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
this, i);
-
break;
}
}
@@ -2932,7 +2953,43 @@
return t->sampleRate();
}
-audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::inChannelMask(int id) const {
+ sp<ThreadBase> t = thread().promote();
+ if (t == nullptr) {
+ return AUDIO_CHANNEL_NONE;
+ }
+ sp<EffectChain> c = chain().promote();
+ if (c == nullptr) {
+ return AUDIO_CHANNEL_NONE;
+ }
+
+ if (c->sessionId() != AUDIO_SESSION_OUTPUT_STAGE
+ || c->isFirstEffect(id)) {
+ return t->mixerChannelMask();
+ } else {
+ return t->channelMask();
+ }
+}
+
+uint32_t AudioFlinger::EffectChain::EffectCallback::inChannelCount(int id) const {
+ sp<ThreadBase> t = thread().promote();
+ if (t == nullptr) {
+ return 0;
+ }
+ sp<EffectChain> c = chain().promote();
+ if (c == nullptr) {
+ return 0;
+ }
+
+ if (c->sessionId() != AUDIO_SESSION_OUTPUT_STAGE
+ || c->isFirstEffect(id)) {
+ return audio_channel_count_from_out_mask(t->mixerChannelMask());
+ } else {
+ return t->channelCount();
+ }
+}
+
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::outChannelMask() const {
sp<ThreadBase> t = thread().promote();
if (t == nullptr) {
return AUDIO_CHANNEL_NONE;
@@ -2940,7 +2997,7 @@
return t->channelMask();
}
-uint32_t AudioFlinger::EffectChain::EffectCallback::channelCount() const {
+uint32_t AudioFlinger::EffectChain::EffectCallback::outChannelCount() const {
sp<ThreadBase> t = thread().promote();
if (t == nullptr) {
return 0;
@@ -3364,7 +3421,8 @@
return proxy->sampleRate();
}
-audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelMask(
+ int id __unused) const {
sp<DeviceEffectProxy> proxy = mProxy.promote();
if (proxy == nullptr) {
return AUDIO_CHANNEL_OUT_STEREO;
@@ -3372,7 +3430,23 @@
return proxy->channelMask();
}
-uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelCount() const {
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelCount(int id __unused) const {
+ sp<DeviceEffectProxy> proxy = mProxy.promote();
+ if (proxy == nullptr) {
+ return 2;
+ }
+ return proxy->channelCount();
+}
+
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelMask() const {
+ sp<DeviceEffectProxy> proxy = mProxy.promote();
+ if (proxy == nullptr) {
+ return AUDIO_CHANNEL_OUT_STEREO;
+ }
+ return proxy->channelMask();
+}
+
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelCount() const {
sp<DeviceEffectProxy> proxy = mProxy.promote();
if (proxy == nullptr) {
return 2;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index a727e04..1d0d00d 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -34,8 +34,10 @@
virtual bool isOffloadOrDirect() const = 0;
virtual bool isOffloadOrMmap() const = 0;
virtual uint32_t sampleRate() const = 0;
- virtual audio_channel_mask_t channelMask() const = 0;
- virtual uint32_t channelCount() const = 0;
+ virtual audio_channel_mask_t inChannelMask(int id) const = 0;
+ virtual uint32_t inChannelCount(int id) const = 0;
+ virtual audio_channel_mask_t outChannelMask() const = 0;
+ virtual uint32_t outChannelCount() const = 0;
virtual audio_channel_mask_t hapticChannelMask() const = 0;
virtual size_t frameCount() const = 0;
@@ -64,6 +66,8 @@
virtual void resetVolume() = 0;
virtual wp<EffectChain> chain() const = 0;
+
+ virtual bool isAudioPolicyReady() const = 0;
};
// EffectBase(EffectModule) and EffectChain classes both have their own mutex to protect
@@ -164,6 +168,16 @@
void dump(int fd, const Vector<String16>& args);
+protected:
+ bool isInternal_l() const {
+ for (auto handle : mHandles) {
+ if (handle->client() != nullptr) {
+ return false;
+ }
+ }
+ return true;
+ }
+
private:
friend class AudioFlinger; // for mHandles
bool mPinned = false;
@@ -259,7 +273,7 @@
bool isHapticGenerator() const;
status_t setHapticIntensity(int id, int intensity);
- status_t setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo);
+ status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo);
void dump(int fd, const Vector<String16>& args);
@@ -342,6 +356,8 @@
android::binder::Status disconnect() override;
android::binder::Status getCblk(media::SharedFileRegion* _aidl_return) override;
+ sp<Client> client() const { return mClient; }
+
private:
void disconnect(bool unpinIfLast);
@@ -511,6 +527,8 @@
sp<EffectCallbackInterface> effectCallback() const { return mEffectCallback; }
wp<ThreadBase> thread() const { return mEffectCallback->thread(); }
+ bool isFirstEffect(int id) const { return !mEffects.isEmpty() && id == mEffects[0]->id(); }
+
void dump(int fd, const Vector<String16>& args);
private:
@@ -544,8 +562,10 @@
bool isOffloadOrMmap() const override;
uint32_t sampleRate() const override;
- audio_channel_mask_t channelMask() const override;
- uint32_t channelCount() const override;
+ audio_channel_mask_t inChannelMask(int id) const override;
+ uint32_t inChannelCount(int id) const override;
+ audio_channel_mask_t outChannelMask() const override;
+ uint32_t outChannelCount() const override;
audio_channel_mask_t hapticChannelMask() const override;
size_t frameCount() const override;
uint32_t latency() const override;
@@ -566,6 +586,10 @@
wp<EffectChain> chain() const override { return mChain; }
+ bool isAudioPolicyReady() const override {
+ return mAudioFlinger.isAudioPolicyReady();
+ }
+
wp<ThreadBase> thread() const { return mThread.load(); }
void setThread(const wp<ThreadBase>& thread) {
@@ -694,8 +718,10 @@
bool isOffloadOrMmap() const override { return false; }
uint32_t sampleRate() const override;
- audio_channel_mask_t channelMask() const override;
- uint32_t channelCount() const override;
+ audio_channel_mask_t inChannelMask(int id) const override;
+ uint32_t inChannelCount(int id) const override;
+ audio_channel_mask_t outChannelMask() const override;
+ uint32_t outChannelCount() const override;
audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
@@ -716,6 +742,10 @@
wp<EffectChain> chain() const override { return nullptr; }
+ bool isAudioPolicyReady() const override {
+ return mManagerCallback->isAudioPolicyReady();
+ }
+
int newEffectId();
private:
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 88d4eaf..fc34d95 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -204,6 +204,8 @@
(void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
(void *)(uintptr_t)fastTrack->mHapticIntensity);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_MAX_AMPLITUDE,
+ (void *)(&(fastTrack->mHapticMaxAmplitude)));
mMixer->enable(index);
break;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 857d3de..ce3cc14 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_FAST_MIXER_STATE_H
#define ANDROID_AUDIO_FAST_MIXER_STATE_H
+#include <math.h>
+
#include <audio_utils/minifloat.h>
#include <system/audio.h>
#include <media/AudioMixer.h>
@@ -51,6 +53,7 @@
int mGeneration; // increment when any field is assigned
bool mHapticPlaybackEnabled = false; // haptic playback is enabled or not
os::HapticScale mHapticIntensity = os::HapticScale::MUTE; // intensity of haptic data
+ float mHapticMaxAmplitude = NAN; // max amplitude allowed for haptic data
};
// Represents a single state of the fast mixer
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index a381c7d..93118b8 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -258,6 +258,7 @@
reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
} else {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
@@ -276,6 +277,7 @@
patch->sinks[0].ext.device.hw_module,
&output,
&config,
+ &mixerConfig,
outputDevice,
outputDeviceAddress,
flags);
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 0929055..e9e98ca 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -19,6 +19,8 @@
#error This header file should only be included from AudioFlinger.h
#endif
+#include <math.h>
+
// Checks and monitors OP_PLAY_AUDIO
class OpPlayAudioMonitor : public RefBase {
public:
@@ -161,6 +163,8 @@
}
/** Return at what intensity to play haptics, used in mixer. */
os::HapticScale getHapticIntensity() const { return mHapticIntensity; }
+ /** Return the maximum amplitude allowed for haptics data, used in mixer. */
+ float getHapticMaxAmplitude() const { return mHapticMaxAmplitude; }
/** Set intensity of haptic playback, should be set after querying vibrator service. */
void setHapticIntensity(os::HapticScale hapticIntensity) {
if (os::isValidHapticScale(hapticIntensity)) {
@@ -168,6 +172,12 @@
setHapticPlaybackEnabled(mHapticIntensity != os::HapticScale::MUTE);
}
}
+ /** Set maximum amplitude allowed for haptic data, should be set after querying
+ * vibrator service.
+ */
+ void setHapticMaxAmplitude(float maxAmplitude) {
+ mHapticMaxAmplitude = maxAmplitude;
+ }
sp<os::ExternalVibration> getExternalVibration() const { return mExternalVibration; }
void setTeePatches(TeePatches teePatches);
@@ -282,6 +292,8 @@
bool mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
// intensity to play haptic data
os::HapticScale mHapticIntensity = os::HapticScale::MUTE;
+ // max amplitude allowed for haptic data
+ float mHapticMaxAmplitude = NAN;
class AudioVibrationController : public os::BnExternalVibrationController {
public:
explicit AudioVibrationController(Track* track) : mTrack(track) {}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b9cdab8..9665424 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -50,8 +50,10 @@
#include <audio_utils/format.h>
#include <audio_utils/minifloat.h>
#include <audio_utils/safe_math.h>
-#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_downmix.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_virtualizer_stage.h>
#include <system/audio.h>
// NBAIO implementations
@@ -507,6 +509,8 @@
return "MMAP_PLAYBACK";
case MMAP_CAPTURE:
return "MMAP_CAPTURE";
+ case VIRTUALIZER_STAGE:
+ return "VIRTUALIZER_STAGE";
default:
return "unknown";
}
@@ -622,7 +626,7 @@
return status;
}
-void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId)
{
Mutex::Autolock _l(mLock);
@@ -630,7 +634,7 @@
}
// sendIoConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid,
+void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId)
{
// The audio statistics history is exponentially weighted to forget events
@@ -722,6 +726,19 @@
sendConfigEvent_l(configEvent);
}
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent()
+{
+ Mutex::Autolock _l(mLock);
+ sendCheckOutputStageEffectsEvent_l();
+}
+
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent_l()
+{
+ sp<ConfigEvent> configEvent =
+ (ConfigEvent *)new CheckOutputStageEffectsEvent();
+ sendConfigEvent_l(configEvent);
+}
+
// post condition: mConfigEvents.isEmpty()
void AudioFlinger::ThreadBase::processConfigEvents_l()
{
@@ -784,6 +801,11 @@
(ResizeBufferConfigEventData *)event->mData.get();
resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
} break;
+
+ case CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS: {
+ setCheckOutputStageEffects();
+ } break;
+
default:
ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
break;
@@ -1008,6 +1030,8 @@
return String16("MmapPlayback");
case MMAP_CAPTURE:
return String16("MmapCapture");
+ case VIRTUALIZER_STAGE:
+ return String16("AudioVirt");
default:
ALOG_ASSERT(false);
return String16("AudioUnknown");
@@ -1401,6 +1425,13 @@
return BAD_VALUE;
}
break;
+ case VIRTUALIZER_STAGE:
+ if (!audio_is_global_session(sessionId)) {
+ ALOGW("checkEffectCompatibility_l(): non global effect %s on VIRTUALIZER_STAGE"
+ " thread %s", desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+ break;
default:
LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
}
@@ -1477,11 +1508,11 @@
if (effect->isHapticGenerator()) {
// TODO(b/184194057): Use the vibrator information from the vibrator that will be used
// for the HapticGenerator.
- const media::AudioVibratorInfo* defaultVibratorInfo =
- mAudioFlinger->getDefaultVibratorInfo_l();
- if (defaultVibratorInfo != nullptr) {
+ const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
+ std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+ if (defaultVibratorInfo) {
// Only set the vibrator info when it is a valid one.
- effect->setVibratorInfo(defaultVibratorInfo);
+ effect->setVibratorInfo(*defaultVibratorInfo);
}
}
// create effect handle and connect it to effect module
@@ -1489,6 +1520,7 @@
lStatus = handle->initCheck();
if (lStatus == OK) {
lStatus = effect->addHandle(handle.get());
+ sendCheckOutputStageEffectsEvent_l();
}
if (enabled != NULL) {
*enabled = (int)effect->isEnabled();
@@ -1531,6 +1563,7 @@
if (remove) {
removeEffect_l(effect, true);
}
+ sendCheckOutputStageEffectsEvent_l();
}
if (remove) {
mAudioFlinger->updateOrphanEffectChains(effect);
@@ -1888,6 +1921,14 @@
item->selfrecord();
}
+product_strategy_t AudioFlinger::ThreadBase::getStrategyForStream(audio_stream_type_t stream) const
+{
+ if (!mAudioFlinger->isAudioPolicyReady()) {
+ return PRODUCT_STRATEGY_NONE;
+ }
+ return AudioSystem::getStrategyForStream(stream);
+}
+
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
@@ -1896,15 +1937,16 @@
AudioStreamOut* output,
audio_io_handle_t id,
type_t type,
- bool systemReady)
+ bool systemReady,
+ audio_config_base_t *mixerConfig)
: ThreadBase(audioFlinger, id, type, systemReady, true /* isOut */),
mNormalFrameCount(0), mSinkBuffer(NULL),
- mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+ mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == VIRTUALIZER_STAGE),
mMixerBuffer(NULL),
mMixerBufferSize(0),
mMixerBufferFormat(AUDIO_FORMAT_INVALID),
mMixerBufferValid(false),
- mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+ mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == VIRTUALIZER_STAGE),
mEffectBuffer(NULL),
mEffectBufferSize(0),
mEffectBufferFormat(AUDIO_FORMAT_INVALID),
@@ -1956,8 +1998,18 @@
mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
}
+ if (mixerConfig != nullptr && mixerConfig->channel_mask != AUDIO_CHANNEL_NONE) {
+ mMixerChannelMask = mixerConfig->channel_mask;
+ }
+
readOutputParameters_l();
+ if (mType != VIRTUALIZER_STAGE
+ && mMixerChannelMask != mChannelMask) {
+ LOG_ALWAYS_FATAL("HAL channel mask %#x does not match mixer channel mask %#x",
+ mChannelMask, mMixerChannelMask);
+ }
+
// TODO: We may also match on address as well as device type for
// AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
if (type == MIXER || type == DIRECT || type == OFFLOAD) {
@@ -2080,10 +2132,12 @@
write(fd, result.string(), result.size());
}
-void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
{
dprintf(fd, " Master volume: %f\n", mMasterVolume);
dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off");
+ dprintf(fd, " Mixer channel Mask: %#x (%s)\n",
+ mMixerChannelMask, channelMaskToString(mMixerChannelMask, true /* output */).c_str());
if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
channelMaskToString(mHapticChannelMask, true /* output */).c_str());
@@ -2109,7 +2163,7 @@
}
if (output != nullptr) {
dprintf(fd, " Hal stream dump:\n");
- (void)output->stream->dump(fd);
+ (void)output->stream->dump(fd, args);
}
}
@@ -2397,11 +2451,11 @@
// all tracks in same audio session must share the same routing strategy otherwise
// conflicts will happen when tracks are moved from one output to another by audio policy
// manager
- product_strategy_t strategy = AudioSystem::getStrategyForStream(streamType);
+ product_strategy_t strategy = getStrategyForStream(streamType);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> t = mTracks[i];
if (t != 0 && t->isExternalTrack()) {
- product_strategy_t actual = AudioSystem::getStrategyForStream(t->streamType());
+ product_strategy_t actual = getStrategyForStream(t->streamType());
if (sessionId == t->sessionId() && strategy != actual) {
ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
strategy, actual);
@@ -2445,7 +2499,7 @@
if (chain != 0) {
ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
- chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
+ chain->setStrategy(getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
@@ -2613,8 +2667,19 @@
mLock.unlock();
const int intensity = AudioFlinger::onExternalVibrationStart(
track->getExternalVibration());
+ std::optional<media::AudioVibratorInfo> vibratorInfo;
+ {
+ // TODO(b/184194780): Use the vibrator information from the vibrator that will be
+ // used to play this track.
+ Mutex::Autolock _l(mAudioFlinger->mLock);
+ vibratorInfo = std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+ }
mLock.lock();
track->setHapticIntensity(static_cast<os::HapticScale>(intensity));
+ if (vibratorInfo) {
+ track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
+ }
+
// Haptic playback should be enabled by vibrator service.
if (track->getHapticPlaybackEnabled()) {
// Disable haptic playback of all active track to ensure only
@@ -2707,36 +2772,26 @@
return mOutput->stream->selectPresentation(presentationId, programId);
}
-void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId) {
- sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
-
- desc->mIoHandle = mId;
- struct audio_patch patch = mPatch;
- if (isMsdDevice()) {
- patch = mDownStreamPatch;
- }
-
+ sp<AudioIoDescriptor> desc;
+ const struct audio_patch patch = isMsdDevice() ? mDownStreamPatch : mPatch;
switch (event) {
case AUDIO_OUTPUT_OPENED:
case AUDIO_OUTPUT_REGISTERED:
case AUDIO_OUTPUT_CONFIG_CHANGED:
- desc->mPatch = patch;
- desc->mChannelMask = mChannelMask;
- desc->mSamplingRate = mSampleRate;
- desc->mFormat = mFormat;
- desc->mFrameCount = mNormalFrameCount; // FIXME see
- // AudioFlinger::frameCount(audio_io_handle_t)
- desc->mFrameCountHAL = mFrameCount;
- desc->mLatency = latency_l();
+ desc = sp<AudioIoDescriptor>::make(mId, patch, false /*isInput*/,
+ mSampleRate, mFormat, mChannelMask,
+ // FIXME AudioFlinger::frameCount(audio_io_handle_t) instead of mNormalFrameCount?
+ mNormalFrameCount, mFrameCount, latency_l());
break;
case AUDIO_CLIENT_STARTED:
- desc->mPatch = patch;
- desc->mPortId = portId;
+ desc = sp<AudioIoDescriptor>::make(mId, patch, portId);
break;
case AUDIO_OUTPUT_CLOSED:
default:
+ desc = sp<AudioIoDescriptor>::make(mId);
break;
}
mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -2814,14 +2869,20 @@
if (!audio_is_output_channel(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
}
- if ((mType == MIXER || mType == DUPLICATING)
- && !isValidPcmSinkChannelMask(mChannelMask)) {
+ if (hasMixer() && !isValidPcmSinkChannelMask(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
mChannelMask);
}
+
+ if (mMixerChannelMask == AUDIO_CHANNEL_NONE) {
+ mMixerChannelMask = mChannelMask;
+ }
+
mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
mBalance.setChannelMask(mChannelMask);
+ uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mMixerChannelMask);
+
// Get actual HAL format.
status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
@@ -2831,8 +2892,7 @@
if (!audio_is_valid_format(mFormat)) {
LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
}
- if ((mType == MIXER || mType == DUPLICATING)
- && !isValidPcmSinkFormat(mFormat)) {
+ if (hasMixer() && !isValidPcmSinkFormat(mFormat)) {
LOG_FATAL("HAL format %#x not supported for mixed output",
mFormat);
}
@@ -2841,7 +2901,7 @@
LOG_ALWAYS_FATAL_IF(result != OK,
"Error when retrieving output stream buffer size: %d", result);
mFrameCount = mBufferSize / mFrameSize;
- if ((mType == MIXER || mType == DUPLICATING) && (mFrameCount & 15)) {
+ if (hasMixer() && (mFrameCount & 15)) {
ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
@@ -2914,7 +2974,7 @@
}
mNormalFrameCount = multiplier * mFrameCount;
// round up to nearest 16 frames to satisfy AudioMixer
- if (mType == MIXER || mType == DUPLICATING) {
+ if (hasMixer()) {
mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
}
ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
@@ -2941,7 +3001,7 @@
mMixerBuffer = NULL;
if (mMixerBufferEnabled) {
mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
- mMixerBufferSize = mNormalFrameCount * mChannelCount
+ mMixerBufferSize = mNormalFrameCount * mixerChannelCount
* audio_bytes_per_sample(mMixerBufferFormat);
(void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
}
@@ -2949,7 +3009,7 @@
mEffectBuffer = NULL;
if (mEffectBufferEnabled) {
mEffectBufferFormat = EFFECT_BUFFER_FORMAT;
- mEffectBufferSize = mNormalFrameCount * mChannelCount
+ mEffectBufferSize = mNormalFrameCount * mixerChannelCount
* audio_bytes_per_sample(mEffectBufferFormat);
(void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
}
@@ -2958,6 +3018,7 @@
mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
mChannelCount -= mHapticChannelCount;
+ mMixerChannelMask = static_cast<audio_channel_mask_t>(mMixerChannelMask & ~mHapticChannelMask);
// force reconfiguration of effect chains and engines to take new buffer size and audio
// parameters into account
@@ -3051,15 +3112,15 @@
// session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
// it is moved to correct output by audio policy manager when A2DP is connected or disconnected
if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ return getStrategyForStream(AUDIO_STREAM_MUSIC);
}
for (size_t i = 0; i < mTracks.size(); i++) {
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() && !track->isInvalid()) {
- return AudioSystem::getStrategyForStream(track->streamType());
+ return getStrategyForStream(track->streamType());
}
}
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ return getStrategyForStream(AUDIO_STREAM_MUSIC);
}
@@ -3348,7 +3409,8 @@
// Only one effect chain can be present in direct output thread and it uses
// the sink buffer as input
if (mType != DIRECT) {
- size_t numSamples = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
+ size_t numSamples = mNormalFrameCount
+ * (audio_channel_count_from_out_mask(mMixerChannelMask) + mHapticChannelCount);
status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
numSamples * sizeof(effect_buffer_t),
&halInBuffer);
@@ -3531,6 +3593,8 @@
audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ sendCheckOutputStageEffectsEvent();
+
// loopCount is used for statistics and diagnostics.
for (int64_t loopCount = 0; !exitPending(); ++loopCount)
{
@@ -3587,11 +3651,18 @@
}
}
+ if (mCheckOutputStageEffects.exchange(false)) {
+ checkOutputStageEffects();
+ }
+
{ // scope for mLock
Mutex::Autolock _l(mLock);
processConfigEvents_l();
+ if (mCheckOutputStageEffects.load()) {
+ continue;
+ }
// See comment at declaration of logString for why this is done under mLock
if (logString != NULL) {
@@ -3757,6 +3828,8 @@
if (mMixerBufferValid) {
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
+ uint32_t channelCount = mEffectBufferValid ?
+ audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
// mono blend occurs for mixer threads only (not direct or offloaded)
// and is handled here if we're going directly to the sink.
@@ -3774,7 +3847,7 @@
}
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
- mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+ mNormalFrameCount * (channelCount + mHapticChannelCount));
// If we're going directly to the sink and there are haptic channels,
// we should adjust channels as the sample data is partially interleaved
@@ -4448,8 +4521,8 @@
// ----------------------------------------------------------------------------
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, bool systemReady, type_t type)
- : PlaybackThread(audioFlinger, output, id, type, systemReady),
+ audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t *mixerConfig)
+ : PlaybackThread(audioFlinger, output, id, type, systemReady, mixerConfig),
// mAudioMixer below
// mFastMixer below
mFastMixerFutex(0),
@@ -4566,6 +4639,7 @@
fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
fastTrack->mHapticIntensity = os::HapticScale::NONE;
+ fastTrack->mHapticMaxAmplitude = NAN;
fastTrack->mGeneration++;
state->mFastTracksGen++;
state->mTrackMask = 1;
@@ -5103,6 +5177,7 @@
fastTrack->mFormat = track->mFormat;
fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
fastTrack->mHapticIntensity = track->getHapticIntensity();
+ fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
fastTrack->mGeneration++;
state->mTrackMask |= 1 << j;
didModify = true;
@@ -5356,7 +5431,7 @@
trackId,
AudioMixer::TRACK,
AudioMixer::MIXER_CHANNEL_MASK,
- (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+ (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
// limit track sample rate to 2 x output sample rate, which changes at re-configuration
uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
uint32_t reqSampleRate = proxy->getSampleRate();
@@ -5425,6 +5500,10 @@
trackId,
AudioMixer::TRACK,
AudioMixer::HAPTIC_INTENSITY, (void *)(uintptr_t)track->getHapticIntensity());
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::HAPTIC_MAX_AMPLITUDE, (void *)(&(track->mHapticMaxAmplitude)));
// reset retry count
track->mRetryCount = kMaxTrackRetries;
@@ -5575,7 +5654,8 @@
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
- if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0) {
+ if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0 ||
+ getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE) != 0) {
mEffectBufferValid = true;
}
@@ -6971,6 +7051,69 @@
MixerThread::cacheParameters_l();
}
+// ----------------------------------------------------------------------------
+
+AudioFlinger::VirtualizerStageThread::VirtualizerStageThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ bool systemReady,
+ audio_config_base_t *mixerConfig)
+ : MixerThread(audioFlinger, output, id, systemReady, VIRTUALIZER_STAGE, mixerConfig)
+{
+}
+
+void AudioFlinger::VirtualizerStageThread::checkOutputStageEffects()
+{
+ bool hasVirtualizer = false;
+ bool hasDownMixer = false;
+ sp<EffectHandle> finalDownMixer;
+ {
+ Mutex::Autolock _l(mLock);
+ sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
+ if (chain != 0) {
+ hasVirtualizer = chain->getEffectFromType_l(FX_IID_VIRTUALIZER_STAGE) != nullptr;
+ hasDownMixer = chain->getEffectFromType_l(EFFECT_UIID_DOWNMIX) != nullptr;
+ }
+
+ finalDownMixer = mFinalDownMixer;
+ mFinalDownMixer.clear();
+ }
+
+ if (hasVirtualizer) {
+ if (finalDownMixer != nullptr) {
+ int32_t ret;
+ finalDownMixer->disable(&ret);
+ }
+ finalDownMixer.clear();
+ } else if (!hasDownMixer) {
+ std::vector<effect_descriptor_t> descriptors;
+ status_t status = mAudioFlinger->mEffectsFactoryHal->getDescriptors(
+ EFFECT_UIID_DOWNMIX, &descriptors);
+ if (status != NO_ERROR) {
+ return;
+ }
+ ALOG_ASSERT(!descriptors.empty(),
+ "%s getDescriptors() returned no error but empty list", __func__);
+
+ finalDownMixer = createEffect_l(nullptr /*client*/, nullptr /*effectClient*/,
+ 0 /*priority*/, AUDIO_SESSION_OUTPUT_STAGE, &descriptors[0], nullptr /*enabled*/,
+ &status, false /*pinned*/, false /*probe*/);
+
+ if (finalDownMixer == nullptr || (status != NO_ERROR && status != ALREADY_EXISTS)) {
+ ALOGW("%s error creating downmixer %d", __func__, status);
+ finalDownMixer.clear();
+ } else {
+ int32_t ret;
+ finalDownMixer->enable(&ret);
+ }
+ }
+
+ {
+ Mutex::Autolock _l(mLock);
+ mFinalDownMixer = finalDownMixer;
+ }
+}
+
// ----------------------------------------------------------------------------
// Record
@@ -8631,30 +8774,22 @@
return String8();
}
-void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId) {
- sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
- desc->mIoHandle = mId;
-
+ sp<AudioIoDescriptor> desc;
switch (event) {
case AUDIO_INPUT_OPENED:
case AUDIO_INPUT_REGISTERED:
case AUDIO_INPUT_CONFIG_CHANGED:
- desc->mPatch = mPatch;
- desc->mChannelMask = mChannelMask;
- desc->mSamplingRate = mSampleRate;
- desc->mFormat = mFormat;
- desc->mFrameCount = mFrameCount;
- desc->mFrameCountHAL = mFrameCount;
- desc->mLatency = 0;
+ desc = sp<AudioIoDescriptor>::make(mId, mPatch, true /*isInput*/,
+ mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
break;
case AUDIO_CLIENT_STARTED:
- desc->mPatch = mPatch;
- desc->mPortId = portId;
+ desc = sp<AudioIoDescriptor>::make(mId, mPatch, portId);
break;
case AUDIO_INPUT_CLOSED:
default:
+ desc = sp<AudioIoDescriptor>::make(mId);
break;
}
mAudioFlinger->ioConfigChanged(event, desc, pid);
@@ -9278,7 +9413,7 @@
mActiveTracks.add(track);
sp<EffectChain> chain = getEffectChain_l(mSessionId);
if (chain != 0) {
- chain->setStrategy(AudioSystem::getStrategyForStream(streamType()));
+ chain->setStrategy(getStrategyForStream(streamType()));
chain->incTrackCnt();
chain->incActiveTrackCnt();
}
@@ -9497,31 +9632,26 @@
return String8();
}
-void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event event, pid_t pid,
+void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId __unused) {
- sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
-
- desc->mIoHandle = mId;
-
+ sp<AudioIoDescriptor> desc;
+ bool isInput = false;
switch (event) {
case AUDIO_INPUT_OPENED:
case AUDIO_INPUT_REGISTERED:
case AUDIO_INPUT_CONFIG_CHANGED:
+ isInput = true;
+ FALLTHROUGH_INTENDED;
case AUDIO_OUTPUT_OPENED:
case AUDIO_OUTPUT_REGISTERED:
case AUDIO_OUTPUT_CONFIG_CHANGED:
- desc->mPatch = mPatch;
- desc->mChannelMask = mChannelMask;
- desc->mSamplingRate = mSampleRate;
- desc->mFormat = mFormat;
- desc->mFrameCount = mFrameCount;
- desc->mFrameCountHAL = mFrameCount;
- desc->mLatency = 0;
+ desc = sp<AudioIoDescriptor>::make(mId, mPatch, isInput,
+ mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
break;
-
case AUDIO_INPUT_CLOSED:
case AUDIO_OUTPUT_CLOSED:
default:
+ desc = sp<AudioIoDescriptor>::make(mId);
break;
}
mAudioFlinger->ioConfigChanged(event, desc, pid);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 16082a9..38e55a3 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -32,6 +32,7 @@
OFFLOAD, // Thread class is OffloadThread
MMAP_PLAYBACK, // Thread class for MMAP playback stream
MMAP_CAPTURE, // Thread class for MMAP capture stream
+ VIRTUALIZER_STAGE, //
// If you add any values here, also update ThreadBase::threadTypeToString()
};
@@ -53,7 +54,8 @@
CFG_EVENT_CREATE_AUDIO_PATCH,
CFG_EVENT_RELEASE_AUDIO_PATCH,
CFG_EVENT_UPDATE_OUT_DEVICE,
- CFG_EVENT_RESIZE_BUFFER
+ CFG_EVENT_RESIZE_BUFFER,
+ CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS
};
class ConfigEventData: public RefBase {
@@ -87,7 +89,13 @@
public:
virtual ~ConfigEvent() {}
- void dump(char *buffer, size_t size) { mData->dump(buffer, size); }
+ void dump(char *buffer, size_t size) {
+ snprintf(buffer, size, "Event type: %d\n", mType);
+ if (mData != nullptr) {
+ snprintf(buffer, size, "Data:\n");
+ mData->dump(buffer, size);
+ }
+ }
const int mType; // event type e.g. CFG_EVENT_IO
Mutex mLock; // mutex associated with mCond
@@ -105,22 +113,22 @@
class IoConfigEventData : public ConfigEventData {
public:
- IoConfigEventData(audio_io_config_event event, pid_t pid,
+ IoConfigEventData(audio_io_config_event_t event, pid_t pid,
audio_port_handle_t portId) :
mEvent(event), mPid(pid), mPortId(portId) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "IO event: event %d\n", mEvent);
+ snprintf(buffer, size, "- IO event: event %d\n", mEvent);
}
- const audio_io_config_event mEvent;
+ const audio_io_config_event_t mEvent;
const pid_t mPid;
const audio_port_handle_t mPortId;
};
class IoConfigEvent : public ConfigEvent {
public:
- IoConfigEvent(audio_io_config_event event, pid_t pid, audio_port_handle_t portId) :
+ IoConfigEvent(audio_io_config_event_t event, pid_t pid, audio_port_handle_t portId) :
ConfigEvent(CFG_EVENT_IO) {
mData = new IoConfigEventData(event, pid, portId);
}
@@ -133,7 +141,7 @@
mPid(pid), mTid(tid), mPrio(prio), mForApp(forApp) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d, for app? %d\n",
+ snprintf(buffer, size, "- Prio event: pid %d, tid %d, prio %d, for app? %d\n",
mPid, mTid, mPrio, mForApp);
}
@@ -158,7 +166,7 @@
mKeyValuePairs(keyValuePairs) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "KeyValue: %s\n", mKeyValuePairs.string());
+ snprintf(buffer, size, "- KeyValue: %s\n", mKeyValuePairs.string());
}
const String8 mKeyValuePairs;
@@ -181,7 +189,7 @@
mPatch(patch), mHandle(handle) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+ snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
}
const struct audio_patch mPatch;
@@ -205,7 +213,7 @@
mHandle(handle) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+ snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
}
audio_patch_handle_t mHandle;
@@ -227,7 +235,7 @@
mOutDevices(outDevices) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Devices: %s", android::toString(mOutDevices).c_str());
+ snprintf(buffer, size, "- Devices: %s", android::toString(mOutDevices).c_str());
}
DeviceDescriptorBaseVector mOutDevices;
@@ -249,7 +257,7 @@
mMaxSharedAudioHistoryMs(maxSharedAudioHistoryMs) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
+ snprintf(buffer, size, "- mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
}
int32_t mMaxSharedAudioHistoryMs;
@@ -265,6 +273,16 @@
virtual ~ResizeBufferConfigEvent() {}
};
+ class CheckOutputStageEffectsEvent : public ConfigEvent {
+ public:
+ CheckOutputStageEffectsEvent() :
+ ConfigEvent(CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS) {
+ }
+
+ virtual ~CheckOutputStageEffectsEvent() {}
+ };
+
+
class PMDeathRecipient : public IBinder::DeathRecipient {
public:
explicit PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {}
@@ -290,8 +308,11 @@
// dynamic externally-visible
uint32_t sampleRate() const { return mSampleRate; }
audio_channel_mask_t channelMask() const { return mChannelMask; }
+ virtual audio_channel_mask_t mixerChannelMask() const { return mChannelMask; }
+
audio_format_t format() const { return mHALFormat; }
uint32_t channelCount() const { return mChannelCount; }
+
// Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
// and returns the [normal mix] buffer's frame count.
virtual size_t frameCount() const = 0;
@@ -311,15 +332,15 @@
status_t& status) = 0;
virtual status_t setParameters(const String8& keyValuePairs);
virtual String8 getParameters(const String8& keys) = 0;
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE) = 0;
// sendConfigEvent_l() must be called with ThreadBase::mLock held
// Can temporarily release the lock if waiting for a reply from
// processConfigEvents_l().
status_t sendConfigEvent_l(sp<ConfigEvent>& event);
- void sendIoConfigEvent(audio_io_config_event event, pid_t pid = 0,
+ void sendIoConfigEvent(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
- void sendIoConfigEvent_l(audio_io_config_event event, pid_t pid = 0,
+ void sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
void sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp);
void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio, bool forApp);
@@ -330,7 +351,11 @@
status_t sendUpdateOutDeviceConfigEvent(
const DeviceDescriptorBaseVector& outDevices);
void sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs);
+ void sendCheckOutputStageEffectsEvent();
+ void sendCheckOutputStageEffectsEvent_l();
+
void processConfigEvents_l();
+ virtual void setCheckOutputStageEffects() {}
virtual void cacheParameters_l() = 0;
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle) = 0;
@@ -574,6 +599,8 @@
return INVALID_OPERATION;
}
+ product_strategy_t getStrategyForStream(audio_stream_type_t stream) const;
+
virtual void dumpInternals_l(int fd __unused, const Vector<String16>& args __unused)
{ }
virtual void dumpTracks_l(int fd __unused, const Vector<String16>& args __unused) { }
@@ -824,7 +851,8 @@
static const nsecs_t kMaxNextBufferDelayNs = 100000000;
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, type_t type, bool systemReady);
+ audio_io_handle_t id, type_t type, bool systemReady,
+ audio_config_base_t *mixerConfig = nullptr);
virtual ~PlaybackThread();
// Thread virtuals
@@ -881,6 +909,8 @@
mActiveTracks.updatePowerState(this, true /* force */);
}
+ virtual void checkOutputStageEffects() {}
+
void dumpInternals_l(int fd, const Vector<String16>& args) override;
void dumpTracks_l(int fd, const Vector<String16>& args) override;
@@ -942,7 +972,7 @@
{ return android_atomic_acquire_load(&mSuspended) > 0; }
virtual String8 getParameters(const String8& keys);
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
// Consider also removing and passing an explicit mMainBuffer initialization
@@ -973,6 +1003,10 @@
virtual size_t frameCount() const { return mNormalFrameCount; }
+ audio_channel_mask_t mixerChannelMask() const override {
+ return mMixerChannelMask;
+ }
+
status_t getTimestamp_l(AudioTimestamp& timestamp);
void addPatchTrack(const sp<PatchTrack>& track);
@@ -1015,6 +1049,9 @@
PlaybackThread::Track* getTrackById_l(audio_port_handle_t trackId);
+ bool hasMixer() const {
+ return mType == MIXER || mType == DUPLICATING || mType == VIRTUALIZER_STAGE;
+ }
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -1101,6 +1138,9 @@
// haptic playback.
audio_channel_mask_t mHapticChannelMask = AUDIO_CHANNEL_NONE;
uint32_t mHapticChannelCount = 0;
+
+ audio_channel_mask_t mMixerChannelMask = AUDIO_CHANNEL_NONE;
+
private:
// mMasterMute is in both PlaybackThread and in AudioFlinger. When a
// PlaybackThread needs to find out if master-muted, it checks it's local
@@ -1134,6 +1174,9 @@
// Cache various calculated values, at threadLoop() entry and after a parameter change
virtual void cacheParameters_l();
+ void setCheckOutputStageEffects() override {
+ mCheckOutputStageEffects.store(true);
+ }
virtual uint32_t correctLatency_l(uint32_t latency) const;
@@ -1314,6 +1357,8 @@
// audio patch used by the downstream software patch.
// Only used if ThreadBase::mIsMsdDevice is true.
struct audio_patch mDownStreamPatch;
+
+ std::atomic_bool mCheckOutputStageEffects{};
};
class MixerThread : public PlaybackThread {
@@ -1322,7 +1367,8 @@
AudioStreamOut* output,
audio_io_handle_t id,
bool systemReady,
- type_t type = MIXER);
+ type_t type = MIXER,
+ audio_config_base_t *mixerConfig = nullptr);
virtual ~MixerThread();
// Thread virtuals
@@ -1611,6 +1657,24 @@
}
};
+class VirtualizerStageThread : public MixerThread {
+public:
+ VirtualizerStageThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ bool systemReady,
+ audio_config_base_t *mixerConfig);
+ ~VirtualizerStageThread() override {}
+
+ bool hasFastMixer() const override { return false; }
+
+protected:
+ void checkOutputStageEffects() override;
+
+private:
+ sp<EffectHandle> mFinalDownMixer;
+};
+
// record thread
class RecordThread : public ThreadBase
{
@@ -1721,7 +1785,7 @@
status_t& status);
virtual void cacheParameters_l() {}
virtual String8 getParameters(const String8& keys);
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle);
@@ -1930,7 +1994,7 @@
virtual bool checkForNewParameter_l(const String8& keyValuePair,
status_t& status);
virtual String8 getParameters(const String8& keys);
- virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0,
+ virtual void ioConfigChanged(audio_io_config_event_t event, pid_t pid = 0,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
void readHalParameters_l();
virtual void cacheParameters_l() {}
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 2e49e71..c9cf564 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -18,6 +18,7 @@
#define ANDROID_AUDIOPOLICY_INTERFACE_H
#include <media/AudioCommonTypes.h>
+#include <media/AudioContainers.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioSystem.h>
#include <media/AudioPolicy.h>
@@ -198,7 +199,7 @@
virtual product_strategy_t getStrategyForStream(audio_stream_type_t stream) = 0;
// return the enabled output devices for the given stream type
- virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
+ virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream) = 0;
// retrieves the list of enabled output devices for the given audio attributes
virtual status_t getDevicesForAttributes(const audio_attributes_t &attr,
@@ -359,7 +360,8 @@
// The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags) = 0;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 1f9b535..7c7f02d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -362,7 +362,8 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port_v7 *port) const;
- status_t open(const audio_config_t *config,
+ status_t open(const audio_config_t *halConfig,
+ const audio_config_base_t *mixerConfig,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
@@ -423,6 +424,7 @@
uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
audio_session_t mDirectClientSession; // session id of the direct output client
bool mPendingReopenToQueryProfiles = false;
+ audio_channel_mask_t mMixerChannelMask = AUDIO_CHANNEL_NONE;
};
// Audio output driven by an input device directly.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index cf1f64c..9837336 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -202,6 +202,20 @@
{AUDIO_FORMAT_AC4, {}}};
}
+ //TODO: b/193496180 use virtualizer stage flag at audio HAL when available
+ // until then, use DEEP_BUFFER+FAST flag combo to indicate the virtualizer stage output profile
+ void convertVirtualizerStageFlag()
+ {
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->getOutputProfiles()) {
+ if (curProfile->getFlags()
+ == (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
+ curProfile->setFlags(AUDIO_OUTPUT_FLAG_VIRTUALIZER_STAGE);
+ }
+ }
+ }
+ }
+
private:
static const constexpr char* const kDefaultEngineLibraryNameSuffix = "default";
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 20b4044..58d05c6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -168,6 +168,10 @@
DeviceVector getDevicesFromDeviceTypeAddrVec(
const AudioDeviceTypeAddrVector& deviceTypeAddrVector) const;
+ // Return the device vector that contains device descriptor whose AudioDeviceTypeAddr appears
+ // in the given AudioDeviceTypeAddrVector
+ AudioDeviceTypeAddrVector toTypeAddrVector() const;
+
// If there are devices with the given type and the devices to add is not empty,
// remove all the devices with the given type and add all the devices to add.
void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 6b08f7c..6c3386f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -491,7 +491,8 @@
return true;
}
-status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+status_t SwAudioOutputDescriptor::open(const audio_config_t *halConfig,
+ const audio_config_base_t *mixerConfig,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
@@ -504,45 +505,62 @@
"with the requested devices, all device types: %s",
__func__, dumpDeviceTypes(devices.types()).c_str());
- audio_config_t lConfig;
- if (config == nullptr) {
- lConfig = AUDIO_CONFIG_INITIALIZER;
- lConfig.sample_rate = mSamplingRate;
- lConfig.channel_mask = mChannelMask;
- lConfig.format = mFormat;
+ audio_config_t lHalConfig;
+ if (halConfig == nullptr) {
+ lHalConfig = AUDIO_CONFIG_INITIALIZER;
+ lHalConfig.sample_rate = mSamplingRate;
+ lHalConfig.channel_mask = mChannelMask;
+ lHalConfig.format = mFormat;
} else {
- lConfig = *config;
+ lHalConfig = *halConfig;
}
// if the selected profile is offloaded and no offload info was specified,
// create a default one
if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
- lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+ lHalConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- lConfig.offload_info = AUDIO_INFO_INITIALIZER;
- lConfig.offload_info.sample_rate = lConfig.sample_rate;
- lConfig.offload_info.channel_mask = lConfig.channel_mask;
- lConfig.offload_info.format = lConfig.format;
- lConfig.offload_info.stream_type = stream;
- lConfig.offload_info.duration_us = -1;
- lConfig.offload_info.has_video = true; // conservative
- lConfig.offload_info.is_streaming = true; // likely
- lConfig.offload_info.encapsulation_mode = lConfig.offload_info.encapsulation_mode;
- lConfig.offload_info.content_id = lConfig.offload_info.content_id;
- lConfig.offload_info.sync_id = lConfig.offload_info.sync_id;
+ lHalConfig.offload_info = AUDIO_INFO_INITIALIZER;
+ lHalConfig.offload_info.sample_rate = lHalConfig.sample_rate;
+ lHalConfig.offload_info.channel_mask = lHalConfig.channel_mask;
+ lHalConfig.offload_info.format = lHalConfig.format;
+ lHalConfig.offload_info.stream_type = stream;
+ lHalConfig.offload_info.duration_us = -1;
+ lHalConfig.offload_info.has_video = true; // conservative
+ lHalConfig.offload_info.is_streaming = true; // likely
+ lHalConfig.offload_info.encapsulation_mode = lHalConfig.offload_info.encapsulation_mode;
+ lHalConfig.offload_info.content_id = lHalConfig.offload_info.content_id;
+ lHalConfig.offload_info.sync_id = lHalConfig.offload_info.sync_id;
+ }
+
+ audio_config_base_t lMixerConfig;
+ if (mixerConfig == nullptr) {
+ lMixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ lMixerConfig.sample_rate = lHalConfig.sample_rate;
+ lMixerConfig.channel_mask = lHalConfig.channel_mask;
+ lMixerConfig.format = lHalConfig.format;
+ } else {
+ lMixerConfig = *mixerConfig;
}
mFlags = (audio_output_flags_t)(mFlags | flags);
+ //TODO: b/193496180 use virtualizer stage flag at audio HAL when available
+ audio_output_flags_t halFlags = mFlags;
+ if ((mFlags & AUDIO_OUTPUT_FLAG_VIRTUALIZER_STAGE) != 0) {
+ halFlags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
+ }
+
ALOGV("opening output for device %s profile %p name %s",
mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
output,
- &lConfig,
+ &lHalConfig,
+ &lMixerConfig,
device,
&mLatency,
- mFlags);
+ halFlags);
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
@@ -550,9 +568,10 @@
"selected device %s for opening",
__FUNCTION__, *output, devices.toString().c_str(),
device->toString().c_str());
- mSamplingRate = lConfig.sample_rate;
- mChannelMask = lConfig.channel_mask;
- mFormat = lConfig.format;
+ mSamplingRate = lHalConfig.sample_rate;
+ mChannelMask = lHalConfig.channel_mask;
+ mFormat = lHalConfig.format;
+ mMixerChannelMask = lMixerConfig.channel_mask;
mId = PolicyAudioPort::getNextUniqueId();
mIoHandle = *output;
mProfile->curOpenCount++;
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a92d31e..1722032 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -451,6 +451,14 @@
return devices;
}
+AudioDeviceTypeAddrVector DeviceVector::toTypeAddrVector() const {
+ AudioDeviceTypeAddrVector result;
+ for (const auto& device : *this) {
+ result.push_back(AudioDeviceTypeAddr(device->type(), device->address()));
+ }
+ return result;
+}
+
void DeviceVector::replaceDevicesByType(
audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
DeviceVector devicesToRemove = getDevicesFromType(typeToRemove);
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
index 8c61b90..5986069 100644
--- a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -30,9 +30,9 @@
// --- PolicyAudioPort class implementation
void PolicyAudioPort::attach(const sp<HwModule>& module)
{
+ mModule = module;
ALOGV("%s: attaching module %s to port %s",
__FUNCTION__, getModuleName(), asAudioPort()->getName().c_str());
- mModule = module;
}
void PolicyAudioPort::detach()
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
index 98415b7..22ff954 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -22,6 +22,17 @@
samplingRates="8000,16000,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
</mixPort>
+ <mixPort name="le audio input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
</mixPorts>
<devicePorts>
<!-- A2DP Audio Ports -->
@@ -49,6 +60,7 @@
-->
<devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
<devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+ <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
</devicePorts>
<routes>
<route type="mix" sink="BT A2DP Out"
@@ -61,6 +73,8 @@
sources="hearing aid output"/>
<route type="mix" sink="BLE Headset Out"
sources="le audio output"/>
+ <route type="mix" sink="le audio input"
+ sources="BLE Headset In"/>
<route type="mix" sink="BLE Speaker Out"
sources="le audio output"/>
</routes>
diff --git a/services/audiopolicy/config/le_audio_policy_configuration.xml b/services/audiopolicy/config/le_audio_policy_configuration.xml
index a3dc72b..dcdd805 100644
--- a/services/audiopolicy/config/le_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/le_audio_policy_configuration.xml
@@ -7,13 +7,20 @@
samplingRates="8000,16000,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
</mixPort>
+ <mixPort name="le audio input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT,AUDIO_FORMAT_PCM_24_BIT,AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
</mixPorts>
<devicePorts>
<devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
<devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
+ <devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
</devicePorts>
<routes>
<route type="mix" sink="BLE Headset Out" sources="le audio output"/>
<route type="mix" sink="BLE Speaker Out" sources="le audio output"/>
+ <route type="mix" sink="le audio input" sources="BLE Headset In"/>
</routes>
</module>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index cc2d8e8..3833a4c 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -246,9 +246,11 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
// close unused outputs after device disconnection or direct outputs that have
// been opened by checkOutputsForDevice() to query dynamic parameters
- if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
- (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
- (desc->mDirectOpenCount == 0))) {
+ if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE)
+ || (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+ (desc->mDirectOpenCount == 0))
+ || (((desc->mFlags & AUDIO_OUTPUT_FLAG_VIRTUALIZER_STAGE) != 0) &&
+ (desc != mVirtualizerStageOutput))) {
clearAudioSourcesForOutput(output);
closeOutput(output);
}
@@ -925,6 +927,36 @@
return profile;
}
+sp<IOProfile> AudioPolicyManager::getVirtualizerStageOutputProfile(
+ const audio_config_t *config __unused, const AudioDeviceTypeAddrVector &devices,
+ bool forOpening) const
+{
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->getOutputProfiles()) {
+ if (curProfile->getFlags() != AUDIO_OUTPUT_FLAG_VIRTUALIZER_STAGE) {
+ continue;
+ }
+ // reject profiles not corresponding to a device currently available
+ DeviceVector supportedDevices = curProfile->getSupportedDevices();
+ if (!mAvailableOutputDevices.containsAtLeastOne(supportedDevices)) {
+ continue;
+ }
+ if (!devices.empty()) {
+ if (supportedDevices.getDevicesFromDeviceTypeAddrVec(devices).size()
+ != devices.size()) {
+ continue;
+ }
+ }
+ if (forOpening && !curProfile->canOpenNewIo()) {
+ continue;
+ }
+ ALOGV("%s found profile %s", __func__, curProfile->getName().c_str());
+ return curProfile;
+ }
+ }
+ return nullptr;
+}
+
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
{
DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/);
@@ -1094,7 +1126,7 @@
*output = AUDIO_IO_HANDLE_NONE;
if (!msdDevices.isEmpty()) {
- *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
+ *output = getOutputForDevices(msdDevices, session, resultAttr, config, flags);
if (*output != AUDIO_IO_HANDLE_NONE && setMsdOutputPatches(&outputDevices) == NO_ERROR) {
ALOGV("%s() Using MSD devices %s instead of devices %s",
__func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
@@ -1103,7 +1135,7 @@
}
}
if (*output == AUDIO_IO_HANDLE_NONE) {
- *output = getOutputForDevices(outputDevices, session, *stream, config,
+ *output = getOutputForDevices(outputDevices, session, resultAttr, config,
flags, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC);
}
if (*output == AUDIO_IO_HANDLE_NONE) {
@@ -1265,7 +1297,8 @@
// all MSD patches to prioritize this request over any active output on MSD.
releaseMsdOutputPatches(devices);
- status_t status = outputDesc->open(config, devices, stream, flags, output);
+ status_t status =
+ outputDesc->open(config, nullptr /* mixerConfig */, devices, stream, flags, output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
@@ -1300,7 +1333,7 @@
audio_io_handle_t AudioPolicyManager::getOutputForDevices(
const DeviceVector &devices,
audio_session_t session,
- audio_stream_type_t stream,
+ const audio_attributes_t *attr,
const audio_config_t *config,
audio_output_flags_t *flags,
bool forceMutingHaptic)
@@ -1322,6 +1355,9 @@
if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
+
+ audio_stream_type_t stream = mEngine->getStreamTypeForAttributes(*attr);
+
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
*flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
@@ -1341,6 +1377,11 @@
ALOGV("Set VoIP and Direct output flags for PCM format");
}
+ if (mVirtualizerStageOutput != nullptr
+ && canBeVirtualized(attr, config, devices.toTypeAddrVector())) {
+ return mVirtualizerStageOutput->mIoHandle;
+ }
+
audio_config_t directConfig = *config;
directConfig.channel_mask = channelMask;
status_t status = openDirectOutput(stream, session, &directConfig, *flags, devices, &output);
@@ -2123,8 +2164,9 @@
audio_port_handle_t *portId)
{
ALOGV("%s() source %d, sampling rate %d, format %#x, channel mask %#x, session %d, "
- "flags %#x attributes=%s", __func__, attr->source, config->sample_rate,
- config->format, config->channel_mask, session, flags, toString(*attr).c_str());
+ "flags %#x attributes=%s requested device ID %d",
+ __func__, attr->source, config->sample_rate, config->format, config->channel_mask,
+ session, flags, toString(*attr).c_str(), *selectedDeviceId);
status_t status = NO_ERROR;
audio_source_t halInputSource;
@@ -4802,6 +4844,136 @@
return source;
}
+bool AudioPolicyManager::canBeVirtualized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const
+{
+ // The caller can have the audio attributes criteria ignored by either passing a null ptr or
+ // the AUDIO_ATTRIBUTES_INITIALIZER value.
+ // If attributes are specified, current policy is to only allow virtualization for media
+ // and game usages.
+ if (attr != nullptr && *attr != AUDIO_ATTRIBUTES_INITIALIZER &&
+ attr->usage != AUDIO_USAGE_MEDIA && attr->usage != AUDIO_USAGE_GAME) {
+ return false;
+ }
+
+ // The caller can have the devices criteria ignored by passing and empty vector, and
+ // getVirtualizerStageOutputProfile() will ignore the devices when looking for a match.
+ // Otherwise an output profile supporting a virtualizer stage effect that can be routed
+ // to the specified devices must exist.
+ sp<IOProfile> profile =
+ getVirtualizerStageOutputProfile(config, devices, false /*forOpening*/);
+ if (profile == nullptr) {
+ return false;
+ }
+
+ // The caller can have the audio config criteria ignored by either passing a null ptr or
+ // the AUDIO_CONFIG_INITIALIZER value.
+ // If an audio config is specified, current policy is to only allow virtualization for
+ // 5.1, 7.1and 7.1.4 audio.
+ // If the virtualizer stage output is already opened, only channel masks included in the
+ // virtualizer stage output mixer channel mask are allowed.
+ if (config != nullptr && *config != AUDIO_CONFIG_INITIALIZER) {
+ if (config->channel_mask != AUDIO_CHANNEL_OUT_5POINT1
+ && config->channel_mask != AUDIO_CHANNEL_OUT_7POINT1
+ && config->channel_mask != AUDIO_CHANNEL_OUT_7POINT1POINT4) {
+ return false;
+ }
+ if (mVirtualizerStageOutput != nullptr) {
+ if ((config->channel_mask & mVirtualizerStageOutput->mMixerChannelMask)
+ != config->channel_mask) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void AudioPolicyManager::checkVirtualizerClientRoutes() {
+ std::set<audio_stream_type_t> streamsToInvalidate;
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ const sp<SwAudioOutputDescriptor>& outputDescriptor = mOutputs[i];
+ for (const sp<TrackClientDescriptor>& client : outputDescriptor->getClientIterable()) {
+ audio_attributes_t attr = client->attributes();
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, false);
+ AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+ audio_config_base_t clientConfig = client->config();
+ audio_config_t config = audio_config_initializer(&clientConfig);
+ if (canBeVirtualized(&attr, &config, devicesTypeAddress)) {
+ streamsToInvalidate.insert(client->stream());
+ }
+ }
+ }
+
+ for (audio_stream_type_t stream : streamsToInvalidate) {
+ mpClientInterface->invalidateStream(stream);
+ }
+}
+
+status_t AudioPolicyManager::getVirtualizerStageOutput(const audio_config_base_t *mixerConfig,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output) {
+ *output = AUDIO_IO_HANDLE_NONE;
+
+ if (mVirtualizerStageOutput != nullptr) {
+ return INVALID_OPERATION;
+ }
+
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(*attr, nullptr, false);
+ AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+ audio_config_t *configPtr = nullptr;
+ audio_config_t config;
+ if (mixerConfig != nullptr) {
+ config = audio_config_initializer(mixerConfig);
+ configPtr = &config;
+ }
+ if (!canBeVirtualized(attr, configPtr, devicesTypeAddress)) {
+ return BAD_VALUE;
+ }
+
+ sp<IOProfile> profile =
+ getVirtualizerStageOutputProfile(configPtr, devicesTypeAddress, true /*forOpening*/);
+ if (profile == nullptr) {
+ return BAD_VALUE;
+ }
+
+ mVirtualizerStageOutput = new SwAudioOutputDescriptor(profile, mpClientInterface);
+ status_t status = mVirtualizerStageOutput->open(nullptr, mixerConfig, devices,
+ mEngine->getStreamTypeForAttributes(*attr),
+ AUDIO_OUTPUT_FLAG_VIRTUALIZER_STAGE, output);
+ if (status != NO_ERROR) {
+ ALOGV("%s failed opening output: status %d, output %d", __func__, status, *output);
+ if (*output != AUDIO_IO_HANDLE_NONE) {
+ mVirtualizerStageOutput->close();
+ }
+ mVirtualizerStageOutput.clear();
+ *output = AUDIO_IO_HANDLE_NONE;
+ return status;
+ }
+
+ checkVirtualizerClientRoutes();
+
+ addOutput(*output, mVirtualizerStageOutput);
+ mPreviousOutputs = mOutputs;
+ mpClientInterface->onAudioPortListUpdate();
+
+ ALOGV("%s returns new virtualizer stage output %d", __func__, *output);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::releaseVirtualizerStageOutput(audio_io_handle_t output) {
+ if (mVirtualizerStageOutput == nullptr) {
+ return INVALID_OPERATION;
+ }
+ if (mVirtualizerStageOutput->mIoHandle != output) {
+ return BAD_VALUE;
+ }
+ closeOutput(output);
+ mVirtualizerStageOutput.clear();
+ return NO_ERROR;
+}
+
// ----------------------------------------------------------------------------
// AudioPolicyManager
// ----------------------------------------------------------------------------
@@ -4851,6 +5023,8 @@
ALOGE("could not load audio policy configuration file, setting defaults");
getConfig().setDefault();
}
+ //TODO: b/193496180 use virtualizer stage flag at audio HAL when available
+ getConfig().convertVirtualizerStageFlag();
}
status_t AudioPolicyManager::initialize() {
@@ -4990,7 +5164,8 @@
sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice),
+ status_t status = outputDesc->open(nullptr /* halConfig */, nullptr /* mixerConfig */,
+ DeviceVector(supportedDevice),
AUDIO_STREAM_DEFAULT,
AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
@@ -5012,7 +5187,8 @@
outProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
mPrimaryOutput = outputDesc;
}
- if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+ if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_DIRECT) != 0
+ || (outProfile->getFlags() & AUDIO_OUTPUT_FLAG_VIRTUALIZER_STAGE) != 0 ) {
outputDesc->close();
} else {
addOutput(output, outputDesc);
@@ -5917,13 +6093,13 @@
return (stream1 == stream2);
}
-audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
+DeviceTypeSet AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
// By checking the range of stream before calling getStrategy, we avoid
// getOutputDevicesForStream's behavior for invalid streams.
// engine's getOutputDevicesForStream would fallback on its default behavior (most probably
// device for music stream), but we want to return the empty set.
if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
- return AUDIO_DEVICE_NONE;
+ return DeviceTypeSet{};
}
DeviceVector activeDevices;
DeviceVector devices;
@@ -5954,8 +6130,7 @@
devices.merge(mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
devices.remove(speakerSafeDevices);
}
- // FIXME: use DeviceTypeSet when Java layer is ready for it.
- return deviceTypesToBitMask(devices.types());
+ return devices.types();
}
status_t AudioPolicyManager::getDevicesForAttributes(
@@ -6995,7 +7170,7 @@
}
sp<SwAudioOutputDescriptor> desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = desc->open(nullptr, devices,
+ status_t status = desc->open(nullptr /* halConfig */, nullptr /* mixerConfig */, devices,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
return nullptr;
@@ -7025,7 +7200,7 @@
config.offload_info.channel_mask = config.channel_mask;
config.offload_info.format = config.format;
- status = desc->open(&config, devices,
+ status = desc->open(&config, nullptr /* mixerConfig */, devices,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
return nullptr;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 98f96d1..5cafef8 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -193,7 +193,7 @@
}
// return the enabled output devices for the given stream type
- virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+ virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
virtual status_t getDevicesForAttributes(
const audio_attributes_t &attributes,
@@ -356,6 +356,16 @@
BAD_VALUE : NO_ERROR;
}
+ virtual bool canBeVirtualized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const;
+
+ virtual status_t getVirtualizerStageOutput(const audio_config_base_t *config,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output);
+
+ virtual status_t releaseVirtualizerStageOutput(audio_io_handle_t output);
+
bool isCallScreenModeSupported() override;
void onNewAudioModulesAvailable() override;
@@ -797,6 +807,8 @@
sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor
// list of descriptors for outputs currently opened
+ sp<SwAudioOutputDescriptor> mVirtualizerStageOutput;
+
SwAudioOutputCollection mOutputs;
// copy of mOutputs before setDeviceConnectionState() opens new outputs
// reset to mOutputs when updateDevicesAndOutputs() is called.
@@ -933,7 +945,7 @@
audio_io_handle_t getOutputForDevices(
const DeviceVector &devices,
audio_session_t session,
- audio_stream_type_t stream,
+ const audio_attributes_t *attr,
const audio_config_t *config,
audio_output_flags_t *flags,
bool forceMutingHaptic = false);
@@ -948,6 +960,13 @@
audio_output_flags_t flags,
const DeviceVector &devices,
audio_io_handle_t *output);
+
+ sp<IOProfile> getVirtualizerStageOutputProfile(const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices,
+ bool forOpening) const;
+
+ void checkVirtualizerClientRoutes();
+
/**
* @brief getInputForDevice selects an input handle for a given input device and
* requester context
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index cd53073..c8db45b 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -40,7 +40,8 @@
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags)
@@ -55,14 +56,18 @@
media::OpenOutputResponse response;
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+ request.halConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(*halConfig, false /*isInput*/));
+ request.mixerConfig = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(*mixerConfig, false /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
status_t status = af->openOutput(request, &response);
if (status == OK) {
*output = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.output));
- *config = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfig_audio_config_t(response.config));
+ *halConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(response.config, false /*isInput*/));
*latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(response.latencyMs));
}
return status;
@@ -131,7 +136,8 @@
media::OpenInputRequest request;
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(*input));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+ request.config = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 9987252..4f920b1 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -99,13 +99,13 @@
const media::AudioDevice& deviceAidl,
media::AudioPolicyDeviceState stateAidl,
const std::string& deviceNameAidl,
- media::audio::common::AudioFormat encodedFormatAidl) {
+ const media::AudioFormatDescription& encodedFormatAidl) {
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
audio_policy_dev_state_t state = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioPolicyDeviceState_audio_policy_dev_state_t(stateAidl));
audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+ aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -131,7 +131,7 @@
Status AudioPolicyService::getDeviceConnectionState(const media::AudioDevice& deviceAidl,
media::AudioPolicyDeviceState* _aidl_return) {
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
if (mAudioPolicyManager == NULL) {
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_policy_dev_state_t_AudioPolicyDeviceState(
@@ -149,11 +149,11 @@
Status AudioPolicyService::handleDeviceConfigChange(
const media::AudioDevice& deviceAidl,
const std::string& deviceNameAidl,
- media::audio::common::AudioFormat encodedFormatAidl) {
+ const media::AudioFormatDescription& encodedFormatAidl) {
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl.type));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioFormat_audio_format_t(encodedFormatAidl));
+ aidl2legacy_AudioFormatDescription_audio_format_t(encodedFormatAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -292,7 +292,7 @@
aidl2legacy_int32_t_audio_session_t(sessionAidl));
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
audio_config_t config = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfig_audio_config_t(configAidl));
+ aidl2legacy_AudioConfig_audio_config_t(configAidl, false /*isInput*/));
audio_output_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_output_flags_t_mask(flagsAidl));
audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -523,7 +523,7 @@
audio_session_t session = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_session_t(sessionAidl));
audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, true /*isInput*/));
audio_input_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_input_flags_t_mask(flagsAidl));
audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
@@ -925,12 +925,13 @@
}
Status AudioPolicyService::setStreamVolumeIndex(media::AudioStreamType streamAidl,
- int32_t deviceAidl, int32_t indexAidl) {
+ const media::AudioDeviceDescription& deviceAidl,
+ int32_t indexAidl) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -949,11 +950,12 @@
}
Status AudioPolicyService::getStreamVolumeIndex(media::AudioStreamType streamAidl,
- int32_t deviceAidl, int32_t* _aidl_return) {
+ const media::AudioDeviceDescription& deviceAidl,
+ int32_t* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
int index;
if (mAudioPolicyManager == NULL) {
@@ -971,12 +973,13 @@
}
Status AudioPolicyService::setVolumeIndexForAttributes(
- const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t indexAidl) {
+ const media::AudioAttributesInternal& attrAidl,
+ const media::AudioDeviceDescription& deviceAidl, int32_t indexAidl) {
audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
AudioValidator::validateAudioAttributes(attributes, "169572641")));
@@ -993,11 +996,12 @@
}
Status AudioPolicyService::getVolumeIndexForAttributes(
- const media::AudioAttributesInternal& attrAidl, int32_t deviceAidl, int32_t* _aidl_return) {
+ const media::AudioAttributesInternal& attrAidl,
+ const media::AudioDeviceDescription& deviceAidl, int32_t* _aidl_return) {
audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
int index;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
AudioValidator::validateAudioAttributes(attributes, "169572641")));
@@ -1075,14 +1079,14 @@
//audio policy: use audio_device_t appropriately
-Status AudioPolicyService::getDevicesForStream(media::AudioStreamType streamAidl,
- int32_t* _aidl_return) {
+Status AudioPolicyService::getDevicesForStream(
+ media::AudioStreamType streamAidl,
+ std::vector<media::AudioDeviceDescription>* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- legacy2aidl_audio_devices_t_int32_t(AUDIO_DEVICE_NONE));
+ *_aidl_return = std::vector<media::AudioDeviceDescription>{};
return Status::ok();
}
if (mAudioPolicyManager == NULL) {
@@ -1091,7 +1095,9 @@
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- legacy2aidl_audio_devices_t_int32_t(mAudioPolicyManager->getDevicesForStream(stream)));
+ convertContainer<std::vector<media::AudioDeviceDescription>>(
+ mAudioPolicyManager->getDevicesForStream(stream),
+ legacy2aidl_audio_devices_t_AudioDeviceDescription));
return Status::ok();
}
@@ -1427,7 +1433,7 @@
const media::AudioAttributesInternal& attributesAidl,
bool* _aidl_return) {
audio_config_base_t config = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
+ aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl, false /*isInput*/));
audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attributesAidl));
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
@@ -1602,7 +1608,7 @@
_aidl_return->ioHandle = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
_aidl_return->device = VALUE_OR_RETURN_BINDER_STATUS(
- legacy2aidl_audio_devices_t_int32_t(device));
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(device));
return Status::ok();
}
@@ -1802,13 +1808,14 @@
}
-Status AudioPolicyService::getStreamVolumeDB(media::AudioStreamType streamAidl, int32_t indexAidl,
- int32_t deviceAidl, float* _aidl_return) {
+Status AudioPolicyService::getStreamVolumeDB(
+ media::AudioStreamType streamAidl, int32_t indexAidl,
+ const media::AudioDeviceDescription& deviceAidl, float* _aidl_return) {
audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
int index = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int>(indexAidl));
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_int32_t_audio_devices_t(deviceAidl));
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
@@ -1820,7 +1827,7 @@
}
Status AudioPolicyService::getSurroundFormats(media::Int* count,
- std::vector<media::audio::common::AudioFormat>* formats,
+ std::vector<media::AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
@@ -1842,7 +1849,8 @@
numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
- std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+ std::back_inserter(*formats),
+ legacy2aidl_audio_format_t_AudioFormatDescription)));
formatsEnabled->insert(
formatsEnabled->begin(),
surroundFormatsEnabled.get(),
@@ -1852,7 +1860,7 @@
}
Status AudioPolicyService::getReportedSurroundFormats(
- media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) {
+ media::Int* count, std::vector<media::AudioFormatDescription>* formats) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
if (numSurroundFormats > MAX_ITEMS_PER_LIST) {
@@ -1872,13 +1880,14 @@
numSurroundFormatsReq = std::min(numSurroundFormats, numSurroundFormatsReq);
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
convertRange(surroundFormats.get(), surroundFormats.get() + numSurroundFormatsReq,
- std::back_inserter(*formats), legacy2aidl_audio_format_t_AudioFormat)));
+ std::back_inserter(*formats),
+ legacy2aidl_audio_format_t_AudioFormatDescription)));
count->value = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<uint32_t>(numSurroundFormats));
return Status::ok();
}
Status AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::audio::common::AudioFormat>* _aidl_return) {
+ std::vector<media::AudioFormatDescription>* _aidl_return) {
std::vector<audio_format_t> formats;
if (mAudioPolicyManager == NULL) {
@@ -1889,16 +1898,16 @@
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(&formats)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<media::audio::common::AudioFormat>>(
+ convertContainer<std::vector<media::AudioFormatDescription>>(
formats,
- legacy2aidl_audio_format_t_AudioFormat));
+ legacy2aidl_audio_format_t_AudioFormatDescription));
return Status::ok();
}
Status AudioPolicyService::setSurroundFormatEnabled(
- media::audio::common::AudioFormat audioFormatAidl, bool enabled) {
+ const media::AudioFormatDescription& audioFormatAidl, bool enabled) {
audio_format_t audioFormat = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioFormat_audio_format_t(audioFormatAidl));
+ aidl2legacy_AudioFormatDescription_audio_format_t(audioFormatAidl));
if (mAudioPolicyManager == NULL) {
return binderStatusFromStatusT(NO_INIT);
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 4d0e1f1..7313893 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -127,6 +127,7 @@
loadAudioPolicyManager();
mAudioPolicyManager = mCreateAudioPolicyManager(mAudioPolicyClient);
}
+
// load audio processing modules
sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects();
sp<UidPolicy> uidPolicy = new UidPolicy(this);
@@ -139,6 +140,8 @@
}
uidPolicy->registerSelf();
sensorPrivacyPolicy->registerSelf();
+
+ AudioSystem::audioPolicyReady();
}
void AudioPolicyService::unloadAudioPolicyManager()
@@ -443,13 +446,15 @@
media::RecordClientInfo clientInfoAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_record_client_info_t_RecordClientInfo(*clientInfo));
media::AudioConfigBase clientConfigAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_base_t_AudioConfigBase(*clientConfig));
+ legacy2aidl_audio_config_base_t_AudioConfigBase(
+ *clientConfig, true /*isInput*/));
std::vector<media::EffectDescriptor> clientEffectsAidl = VALUE_OR_RETURN_STATUS(
convertContainer<std::vector<media::EffectDescriptor>>(
clientEffects,
legacy2aidl_effect_descriptor_t_EffectDescriptor));
media::AudioConfigBase deviceConfigAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_config_base_t_AudioConfigBase(*deviceConfig));
+ legacy2aidl_audio_config_base_t_AudioConfigBase(
+ *deviceConfig, true /*isInput*/));
std::vector<media::EffectDescriptor> effectsAidl = VALUE_OR_RETURN_STATUS(
convertContainer<std::vector<media::EffectDescriptor>>(
effects,
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 3b77ed8..0b76936 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -71,13 +71,13 @@
const media::AudioDevice& device,
media::AudioPolicyDeviceState state,
const std::string& deviceName,
- media::audio::common::AudioFormat encodedFormat) override;
+ const media::AudioFormatDescription& encodedFormat) override;
binder::Status getDeviceConnectionState(const media::AudioDevice& device,
media::AudioPolicyDeviceState* _aidl_return) override;
binder::Status handleDeviceConfigChange(
const media::AudioDevice& device,
const std::string& deviceName,
- media::audio::common::AudioFormat encodedFormat) override;
+ const media::AudioFormatDescription& encodedFormat) override;
binder::Status setPhoneState(media::AudioMode state, int32_t uid) override;
binder::Status setForceUse(media::AudioPolicyForceUse usage,
media::AudioPolicyForcedConfig config) override;
@@ -103,22 +103,27 @@
binder::Status releaseInput(int32_t portId) override;
binder::Status initStreamVolume(media::AudioStreamType stream, int32_t indexMin,
int32_t indexMax) override;
- binder::Status setStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+ binder::Status setStreamVolumeIndex(media::AudioStreamType stream,
+ const media::AudioDeviceDescription& device,
int32_t index) override;
- binder::Status getStreamVolumeIndex(media::AudioStreamType stream, int32_t device,
+ binder::Status getStreamVolumeIndex(media::AudioStreamType stream,
+ const media::AudioDeviceDescription& device,
int32_t* _aidl_return) override;
binder::Status setVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
- int32_t device, int32_t index) override;
+ const media::AudioDeviceDescription& device,
+ int32_t index) override;
binder::Status getVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
- int32_t device, int32_t* _aidl_return) override;
+ const media::AudioDeviceDescription& device,
+ int32_t* _aidl_return) override;
binder::Status getMaxVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
int32_t* _aidl_return) override;
binder::Status getMinVolumeIndexForAttributes(const media::AudioAttributesInternal& attr,
int32_t* _aidl_return) override;
binder::Status getStrategyForStream(media::AudioStreamType stream,
int32_t* _aidl_return) override;
- binder::Status getDevicesForStream(media::AudioStreamType stream,
- int32_t* _aidl_return) override;
+ binder::Status getDevicesForStream(
+ media::AudioStreamType stream,
+ std::vector<media::AudioDeviceDescription>* _aidl_return) override;
binder::Status getDevicesForAttributes(const media::AudioAttributesEx& attr,
std::vector<media::AudioDevice>* _aidl_return) override;
binder::Status getOutputForEffect(const media::EffectDescriptor& desc,
@@ -187,16 +192,17 @@
binder::Status stopAudioSource(int32_t portId) override;
binder::Status setMasterMono(bool mono) override;
binder::Status getMasterMono(bool* _aidl_return) override;
- binder::Status getStreamVolumeDB(media::AudioStreamType stream, int32_t index, int32_t device,
+ binder::Status getStreamVolumeDB(media::AudioStreamType stream, int32_t index,
+ const media::AudioDeviceDescription& device,
float* _aidl_return) override;
binder::Status getSurroundFormats(media::Int* count,
- std::vector<media::audio::common::AudioFormat>* formats,
+ std::vector<media::AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) override;
binder::Status getReportedSurroundFormats(
- media::Int* count, std::vector<media::audio::common::AudioFormat>* formats) override;
+ media::Int* count, std::vector<media::AudioFormatDescription>* formats) override;
binder::Status getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::audio::common::AudioFormat>* _aidl_return) override;
- binder::Status setSurroundFormatEnabled(media::audio::common::AudioFormat audioFormat,
+ std::vector<media::AudioFormatDescription>* _aidl_return) override;
+ binder::Status setSurroundFormatEnabled(const media::AudioFormatDescription& audioFormat,
bool enabled) override;
binder::Status setAssistantUid(int32_t uid) override;
binder::Status setA11yServicesUids(const std::vector<int32_t>& uids) override;
@@ -659,7 +665,8 @@
// The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags);
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index b296fb0..8fbe8b2 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -25,7 +25,7 @@
"libmedia_helper",
"libutils",
"libxml2",
- "libpermission",
+ "framework-permission-aidl-cpp",
"libbinder",
],
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
index f7b0565..84b40d2 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -37,7 +37,8 @@
status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t * /*config*/,
+ audio_config_t * /*halConfig*/,
+ audio_config_base_t * /*mixerConfig*/,
const sp<DeviceDescriptorBase>& /*device*/,
uint32_t * /*latencyMs*/,
audio_output_flags_t /*flags*/) override {
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 1384864..4e0735b 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -30,7 +30,8 @@
}
status_t openOutput(audio_module_handle_t /*module*/,
audio_io_handle_t* /*output*/,
- audio_config_t* /*config*/,
+ audio_config_t* /*halConfig*/,
+ audio_config_base_t* /*mixerConfig*/,
const sp<DeviceDescriptorBase>& /*device*/,
uint32_t* /*latencyMs*/,
audio_output_flags_t /*flags*/) override { return NO_INIT; }
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index dc101ff..7b138a6 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1867,6 +1867,27 @@
CameraServiceProxyWrapper::logOpen(cameraId, facing, clientPackageName,
effectiveApiLevel, isNdk, openLatencyMs);
+ {
+ Mutex::Autolock lock(mInjectionParametersLock);
+ if (cameraId == mInjectionInternalCamId && mInjectionInitPending) {
+ mInjectionInitPending = false;
+ status_t res = NO_ERROR;
+ auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+ if (clientDescriptor != nullptr) {
+ BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+ res = baseClientPtr->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+ if (res != OK) {
+ mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+ }
+ } else {
+ ALOGE("%s: Internal camera ID = %s 's client does not exist!",
+ __FUNCTION__, mInjectionInternalCamId.string());
+ res = NO_INIT;
+ mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+ }
+ }
+ }
+
return ret;
}
@@ -2527,7 +2548,7 @@
const String16& externalCamId,
const sp<ICameraInjectionCallback>& callback,
/*out*/
- sp<hardware::camera2::ICameraInjectionSession>* cameraInjectionSession) {
+ sp<ICameraInjectionSession>* cameraInjectionSession) {
ATRACE_CALL();
if (!checkCallingPermission(sCameraInjectExternalCameraPermission)) {
@@ -2544,18 +2565,30 @@
__FUNCTION__, String8(packageName).string(),
String8(internalCamId).string(), String8(externalCamId).string());
- binder::Status ret = binder::Status::ok();
- // TODO: Implement the injection camera function.
- // ret = internalInjectCamera(...);
- // if(!ret.isOk()) {
- // mInjectionStatusListener->notifyInjectionError(...);
- // return ret;
- // }
-
+ {
+ Mutex::Autolock lock(mInjectionParametersLock);
+ mInjectionInternalCamId = String8(internalCamId);
+ mInjectionExternalCamId = String8(externalCamId);
+ status_t res = NO_ERROR;
+ auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
+ // If the client already exists, we can directly connect to the camera device through the
+ // client's injectCamera(), otherwise we need to wait until the client is established
+ // (execute connectHelper()) before injecting the camera to the camera device.
+ if (clientDescriptor != nullptr) {
+ mInjectionInitPending = false;
+ BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+ res = baseClientPtr->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+ if(res != OK) {
+ mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
+ }
+ } else {
+ mInjectionInitPending = true;
+ }
+ }
mInjectionStatusListener->addListener(callback);
*cameraInjectionSession = new CameraInjectionSession(this);
- return ret;
+ return binder::Status::ok();
}
void CameraService::removeByClient(const BasicClient* client) {
@@ -3820,22 +3853,62 @@
}
void CameraService::InjectionStatusListener::notifyInjectionError(
- int errorCode) {
- Mutex::Autolock lock(mListenerLock);
+ String8 injectedCamId, status_t err) {
if (mCameraInjectionCallback == nullptr) {
ALOGW("InjectionStatusListener: mCameraInjectionCallback == nullptr");
return;
}
- mCameraInjectionCallback->onInjectionError(errorCode);
+
+ switch (err) {
+ case -ENODEV:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("No camera device with ID \"%s\" currently available!",
+ injectedCamId.string());
+ break;
+ case -EBUSY:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("Higher-priority client using camera, ID \"%s\" currently unavailable!",
+ injectedCamId.string());
+ break;
+ case DEAD_OBJECT:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("Camera ID \"%s\" object is dead!",
+ injectedCamId.string());
+ break;
+ case INVALID_OPERATION:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_SESSION);
+ ALOGE("Camera ID \"%s\" encountered an operating or internal error!",
+ injectedCamId.string());
+ break;
+ case UNKNOWN_TRANSACTION:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_UNSUPPORTED);
+ ALOGE("Camera ID \"%s\" method doesn't support!",
+ injectedCamId.string());
+ break;
+ default:
+ mCameraInjectionCallback->onInjectionError(
+ ICameraInjectionCallback::ERROR_INJECTION_INVALID_ERROR);
+ ALOGE("Unexpected error %s (%d) opening camera \"%s\"!",
+ strerror(-err), err, injectedCamId.string());
+ }
}
void CameraService::InjectionStatusListener::binderDied(
const wp<IBinder>& /*who*/) {
- Mutex::Autolock lock(mListenerLock);
ALOGV("InjectionStatusListener: ICameraInjectionCallback has died");
auto parent = mParent.promote();
if (parent != nullptr) {
- parent->stopInjectionImpl();
+ auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+ if (clientDescriptor != nullptr) {
+ BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+ baseClientPtr->stopInjection();
+ }
+ parent->clearInjectionParameters();
}
}
@@ -3851,7 +3924,20 @@
return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SERVICE,
"Camera service encountered error");
}
- parent->stopInjectionImpl();
+
+ status_t res = NO_ERROR;
+ auto clientDescriptor = parent->mActiveClientManager.get(parent->mInjectionInternalCamId);
+ if (clientDescriptor != nullptr) {
+ BasicClient* baseClientPtr = clientDescriptor->getValue().get();
+ res = baseClientPtr->stopInjection();
+ if (res != OK) {
+ ALOGE("CameraInjectionSession: Failed to stop the injection camera!"
+ " ret != NO_ERROR: %d", res);
+ return STATUS_ERROR(ICameraInjectionCallback::ERROR_INJECTION_SESSION,
+ "Camera session encountered error");
+ }
+ }
+ parent->clearInjectionParameters();
return binder::Status::ok();
}
@@ -4560,10 +4646,14 @@
return mode;
}
-void CameraService::stopInjectionImpl() {
+void CameraService::clearInjectionParameters() {
+ {
+ Mutex::Autolock lock(mInjectionParametersLock);
+ mInjectionInitPending = true;
+ mInjectionInternalCamId = "";
+ }
+ mInjectionExternalCamId = "";
mInjectionStatusListener->removeListener();
-
- // TODO: Implement the stop injection function.
}
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 9021170..ca184a8 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -320,6 +320,14 @@
// Set/reset camera mute
virtual status_t setCameraMute(bool enabled) = 0;
+ // The injection camera session to replace the internal camera
+ // session.
+ virtual status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) = 0;
+
+ // Stop the injection camera and restore to internal camera session.
+ virtual status_t stopInjection() = 0;
+
protected:
BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
@@ -1191,7 +1199,7 @@
void addListener(const sp<hardware::camera2::ICameraInjectionCallback>& callback);
void removeListener();
- void notifyInjectionError(int errorCode);
+ void notifyInjectionError(String8 injectedCamId, status_t err);
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder>& who);
@@ -1218,7 +1226,15 @@
wp<CameraService> mParent;
};
- void stopInjectionImpl();
+ void clearInjectionParameters();
+
+ // This is the existing camera id being replaced.
+ String8 mInjectionInternalCamId;
+ // This is the external camera Id replacing the internalId.
+ String8 mInjectionExternalCamId;
+ bool mInjectionInitPending = true;
+ // Guard mInjectionInternalCamId and mInjectionInitPending.
+ Mutex mInjectionParametersLock;
};
} // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index ef15f2d..652842b 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -330,5 +330,19 @@
CaptureResultExtras());
}
+status_t CameraOfflineSessionClient::injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) {
+ ALOGV("%s: This client doesn't support the injection camera. injectedCamId: %s providerPtr: %p",
+ __FUNCTION__, injectedCamId.string(), manager.get());
+
+ return OK;
+}
+
+status_t CameraOfflineSessionClient::stopInjection() {
+ ALOGV("%s: This client doesn't support the injection camera.", __FUNCTION__);
+
+ return OK;
+}
+
// ----------------------------------------------------------------------------
}; // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index b219a4c..b5238b8 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -98,6 +98,9 @@
void notifyPrepared(int streamId) override;
void notifyRequestQueueEmpty() override;
void notifyRepeatingRequestError(long lastFrameNumber) override;
+ status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) override;
+ status_t stopInjection() override;
private:
mutable Mutex mBinderSerializationLock;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 13d044a..1147e23 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -413,6 +413,17 @@
mRemoteCallback.clear();
}
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) {
+ return mDevice->injectCamera(injectedCamId, manager);
+}
+
+template <typename TClientBase>
+status_t Camera2ClientBase<TClientBase>::stopInjection() {
+ return mDevice->stopInjection();
+}
+
template class Camera2ClientBase<CameraService::Client>;
template class Camera2ClientBase<CameraDeviceClientBase>;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 6246f7b..b593bfa 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -114,6 +114,10 @@
mutable Mutex mRemoteCallbackLock;
} mSharedCameraCallbacks;
+ status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) override;
+ status_t stopInjection() override;
+
protected:
// The PID provided in the constructor call
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 85b0cc2..3c95ed3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -427,6 +427,18 @@
*/
void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+ /**
+ * The injection camera session to replace the internal camera
+ * session.
+ */
+ virtual status_t injectCamera(const String8& injectedCamId,
+ sp<CameraProviderManager> manager) = 0;
+
+ /**
+ * Stop the injection camera and restore to internal camera session.
+ */
+ virtual status_t stopInjection() = 0;
+
protected:
bool mImageDumpMask = 0;
};
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index d572d57..877b683 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2835,17 +2835,28 @@
mRequestBufferSM.onStreamsConfigured();
}
+ // First call injectCamera() and then run configureStreamsLocked() case:
// Since the streams configuration of the injection camera is based on the internal camera, we
- // must wait until the internal camera configure streams before calling injectCamera() to
+ // must wait until the internal camera configure streams before running the injection job to
// configure the injection streams.
if (mInjectionMethods->isInjecting()) {
- ALOGV("%s: Injection camera %s: Start to configure streams.",
+ ALOGD("%s: Injection camera %s: Start to configure streams.",
__FUNCTION__, mInjectionMethods->getInjectedCamId().string());
res = mInjectionMethods->injectCamera(config, bufferSizes);
if (res != OK) {
ALOGE("Can't finish inject camera process!");
return res;
}
+ } else {
+ // First run configureStreamsLocked() and then call injectCamera() case:
+ // If the stream configuration has been completed and camera deive is active, but the
+ // injection camera has not been injected yet, we need to store the stream configuration of
+ // the internal camera (because the stream configuration of the injection camera is based
+ // on the internal camera). When injecting occurs later, this configuration can be used by
+ // the injection camera.
+ ALOGV("%s: The stream configuration is complete and the camera device is active, but the"
+ " injection camera has not been injected yet.", __FUNCTION__);
+ mInjectionMethods->storeInjectionConfig(config, bufferSizes);
}
return OK;
@@ -6584,6 +6595,13 @@
ALOGI("%s Injection camera: injectedCamId = %s", __FUNCTION__, injectedCamId.string());
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
+ // When the camera device is active, injectCamera() and stopInjection() will call
+ // internalPauseAndWaitLocked() and internalResumeLocked(), and then they will call
+ // mStatusChanged.waitRelative(mLock, timeout) of waitUntilStateThenRelock(). But
+ // mStatusChanged.waitRelative(mLock, timeout)'s parameter: mutex "mLock" must be in the locked
+ // state, so we need to add "Mutex::Autolock l(mLock)" to lock the "mLock" before calling
+ // waitUntilStateThenRelock().
+ Mutex::Autolock l(mLock);
status_t res = NO_ERROR;
if (mInjectionMethods->isInjecting()) {
@@ -6606,16 +6624,25 @@
return res;
}
- camera3::camera_stream_configuration injectionConfig;
- std::vector<uint32_t> injectionBufferSizes;
- mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
// When the second display of android is cast to the remote device, and the opened camera is
// also cast to the second display, in this case, because the camera has configured the streams
// at this time, we can directly call injectCamera() to replace the internal camera with
// injection camera.
- if (mOperatingMode >= 0 && injectionConfig.num_streams > 0
- && injectionBufferSizes.size() > 0) {
- ALOGV("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+ if (mInjectionMethods->isStreamConfigCompleteButNotInjected()) {
+ ALOGD("%s: The opened camera is directly cast to the remote device.", __FUNCTION__);
+
+ camera3::camera_stream_configuration injectionConfig;
+ std::vector<uint32_t> injectionBufferSizes;
+ mInjectionMethods->getInjectionConfig(&injectionConfig, &injectionBufferSizes);
+ if (mOperatingMode < 0 || injectionConfig.num_streams <= 0
+ || injectionBufferSizes.size() <= 0) {
+ ALOGE("Failed to inject camera due to abandoned configuration! "
+ "mOperatingMode: %d injectionConfig.num_streams: %d "
+ "injectionBufferSizes.size(): %zu", mOperatingMode,
+ injectionConfig.num_streams, injectionBufferSizes.size());
+ return DEAD_OBJECT;
+ }
+
res = mInjectionMethods->injectCamera(
injectionConfig, injectionBufferSizes);
if (res != OK) {
@@ -6630,6 +6657,7 @@
status_t Camera3Device::stopInjection() {
ALOGI("%s: Injection camera: stopInjection", __FUNCTION__);
Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
return mInjectionMethods->stopInjection();
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 733ed15..310ee27 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -1371,25 +1371,34 @@
// when device is IDLE and request thread is paused.
status_t injectCamera(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes);
+ const std::vector<uint32_t>& injectionBufferSizes);
// Stop the injection camera and switch back to backup hal interface.
status_t stopInjection();
bool isInjecting();
+ bool isStreamConfigCompleteButNotInjected();
+
const String8& getInjectedCamId() const;
void getInjectionConfig(/*out*/ camera3::camera_stream_configuration* injectionConfig,
/*out*/ std::vector<uint32_t>* injectionBufferSizes);
+ // When the streaming configuration is completed and the camera device is active, but the
+ // injection camera has not yet been injected, the streaming configuration of the internal
+ // camera will be stored first.
+ void storeInjectionConfig(
+ const camera3::camera_stream_configuration& injectionConfig,
+ const std::vector<uint32_t>& injectionBufferSizes);
+
private:
// Configure the streams of injection camera, it need wait until the
// output streams are created and configured to the original camera before
// proceeding.
status_t injectionConfigureStreams(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes);
+ const std::vector<uint32_t>& injectionBufferSizes);
// Disconnect the injection camera and delete the hal interface.
void injectionDisconnectImpl();
@@ -1407,6 +1416,17 @@
// Generated injection camera hal interface.
sp<HalInterface> mInjectedCamHalInterface;
+ // Backup of the original camera hal result FMQ.
+ std::unique_ptr<ResultMetadataQueue> mBackupResultMetadataQueue;
+
+ // FMQ writes the result for the injection camera. Must be guarded by
+ // mProcessCaptureResultLock.
+ std::unique_ptr<ResultMetadataQueue> mInjectionResultMetadataQueue;
+
+ // The flag indicates that the stream configuration is complete, the camera device is
+ // active, but the injection camera has not yet been injected.
+ bool mIsStreamConfigCompleteButNotInjected = false;
+
// Copy the configuration of the internal camera.
camera3::camera_stream_configuration mInjectionConfig;
diff --git a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
index f145dac..7026934 100644
--- a/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DeviceInjectionMethods.cpp
@@ -86,7 +86,7 @@
return DEAD_OBJECT;
}
- std::unique_ptr<ResultMetadataQueue>& resQueue = parent->mResultMetadataQueue;
+ std::unique_ptr<ResultMetadataQueue>& resQueue = mInjectionResultMetadataQueue;
auto resultQueueRet = session->getCaptureResultMetadataQueue(
[&resQueue](const auto& descriptor) {
resQueue = std::make_unique<ResultMetadataQueue>(descriptor);
@@ -127,10 +127,8 @@
status_t Camera3Device::Camera3DeviceInjectionMethods::injectCamera(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes) {
+ const std::vector<uint32_t>& injectionBufferSizes) {
status_t res = NO_ERROR;
- mInjectionConfig = injectionConfig;
- mInjectionBufferSizes = injectionBufferSizes;
if (mInjectedCamHalInterface == nullptr) {
ALOGE("%s: mInjectedCamHalInterface does not exist!", __FUNCTION__);
@@ -148,7 +146,6 @@
if (parent->mStatus == STATUS_ACTIVE) {
ALOGV("%s: Let the device be IDLE and the request thread is paused",
__FUNCTION__);
- parent->mPauseStateNotify = true;
res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
if (res != OK) {
ALOGE("%s: Can't pause captures to inject camera!", __FUNCTION__);
@@ -188,7 +185,7 @@
ALOGV("%s: Restarting activity to inject camera", __FUNCTION__);
// Reuse current operating mode and session parameters for new stream
// config.
- parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+ parent->internalResumeLocked();
}
return OK;
@@ -196,6 +193,11 @@
status_t Camera3Device::Camera3DeviceInjectionMethods::stopInjection() {
status_t res = NO_ERROR;
+ mIsStreamConfigCompleteButNotInjected = false;
+ if (mInjectionConfig.streams != nullptr) {
+ delete [] mInjectionConfig.streams;
+ mInjectionConfig.streams = nullptr;
+ }
sp<Camera3Device> parent = mParent.promote();
if (parent == nullptr) {
@@ -208,7 +210,6 @@
if (parent->mStatus == STATUS_ACTIVE) {
ALOGV("%s: Let the device be IDLE and the request thread is paused",
__FUNCTION__);
- parent->mPauseStateNotify = true;
res = parent->internalPauseAndWaitLocked(maxExpectedDuration);
if (res != OK) {
ALOGE("%s: Can't pause captures to stop injection!", __FUNCTION__);
@@ -229,7 +230,7 @@
ALOGV("%s: Restarting activity to stop injection", __FUNCTION__);
// Reuse current operating mode and session parameters for new stream
// config.
- parent->internalUpdateStatusLocked(STATUS_ACTIVE);
+ parent->internalResumeLocked();
}
return OK;
@@ -243,6 +244,10 @@
}
}
+bool Camera3Device::Camera3DeviceInjectionMethods::isStreamConfigCompleteButNotInjected() {
+ return mIsStreamConfigCompleteButNotInjected;
+}
+
const String8& Camera3Device::Camera3DeviceInjectionMethods::getInjectedCamId()
const {
return mInjectedCamId;
@@ -260,10 +265,26 @@
*injectionBufferSizes = mInjectionBufferSizes;
}
+void Camera3Device::Camera3DeviceInjectionMethods::storeInjectionConfig(
+ const camera3::camera_stream_configuration& injectionConfig,
+ const std::vector<uint32_t>& injectionBufferSizes) {
+ mIsStreamConfigCompleteButNotInjected = true;
+ if (mInjectionConfig.streams != nullptr) {
+ delete [] mInjectionConfig.streams;
+ mInjectionConfig.streams = nullptr;
+ }
+ mInjectionConfig = injectionConfig;
+ mInjectionConfig.streams =
+ (android::camera3::camera_stream_t **) new camera_stream_t*[injectionConfig.num_streams];
+ for (size_t i = 0; i < injectionConfig.num_streams; i++) {
+ mInjectionConfig.streams[i] = injectionConfig.streams[i];
+ }
+ mInjectionBufferSizes = injectionBufferSizes;
+}
status_t Camera3Device::Camera3DeviceInjectionMethods::injectionConfigureStreams(
camera3::camera_stream_configuration& injectionConfig,
- std::vector<uint32_t>& injectionBufferSizes) {
+ const std::vector<uint32_t>& injectionBufferSizes) {
ATRACE_CALL();
status_t res = NO_ERROR;
@@ -326,7 +347,6 @@
mInjectedCamId.string());
auto rc = parent->mPreparerThread->resume();
-
if (rc != OK) {
ALOGE("%s: Injection camera %s: Preparer thread failed to resume!",
__FUNCTION__, mInjectedCamId.string());
@@ -380,10 +400,18 @@
return INVALID_OPERATION;
}
- if (keepBackup && mBackupHalInterface == nullptr) {
- mBackupHalInterface = parent->mInterface;
- } else if (!keepBackup) {
+ if (keepBackup) {
+ if (mBackupHalInterface == nullptr) {
+ mBackupHalInterface = parent->mInterface;
+ }
+ if (mBackupResultMetadataQueue == nullptr) {
+ mBackupResultMetadataQueue = std::move(parent->mResultMetadataQueue);
+ parent->mResultMetadataQueue = std::move(mInjectionResultMetadataQueue);
+ }
+ } else {
mBackupHalInterface = nullptr;
+ parent->mResultMetadataQueue = std::move(mBackupResultMetadataQueue);
+ mBackupResultMetadataQueue = nullptr;
}
parent->mInterface = newHalInterface;
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 0204d49..2f4d669 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -82,7 +82,7 @@
camera_stream::width, camera_stream::height,
camera_stream::format, camera_stream::data_space);
lines.appendFormat(" Max size: %zu\n", mMaxSize);
- lines.appendFormat(" Combined usage: %" PRIu64 ", max HAL buffers: %d\n",
+ lines.appendFormat(" Combined usage: 0x%" PRIx64 ", max HAL buffers: %d\n",
mUsage | consumerUsage, camera_stream::max_buffers);
if (strlen(camera_stream::physical_camera_id) > 0) {
lines.appendFormat(" Physical camera id: %s\n", camera_stream::physical_camera_id);
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 696b967..d10e339 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -7,7 +7,7 @@
default_applicable_licenses: ["frameworks_av_services_mediacodec_license"],
}
-cc_library_shared {
+cc_library {
name: "libmedia_codecserviceregistrant",
vendor_available: true,
srcs: [
diff --git a/services/mediacodec/registrant/fuzzer/Android.bp b/services/mediacodec/registrant/fuzzer/Android.bp
new file mode 100644
index 0000000..22c10a8
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/Android.bp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_fuzz {
+ name: "codecServiceRegistrant_fuzzer",
+ srcs: [
+ "codecServiceRegistrant_fuzzer.cpp",
+ ],
+ static_libs: [
+ "libmedia_codecserviceregistrant",
+ ],
+ header_libs: [
+ "libmedia_headers",
+ ],
+ defaults: [
+ "libcodec2-hidl-defaults",
+ ],
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
diff --git a/services/mediacodec/registrant/fuzzer/README.md b/services/mediacodec/registrant/fuzzer/README.md
new file mode 100644
index 0000000..0ffa063
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/README.md
@@ -0,0 +1,56 @@
+# Fuzzer for libmedia_codecserviceregistrant
+
+## Plugin Design Considerations
+The fuzzer plugin for libmedia_codecserviceregistrant is designed based on the understanding of the library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+libmedia_codecserviceregistrant supports the following parameters:
+1. C2String (parameter name: `c2String`)
+2. Width (parameter name: `width`)
+3. Height (parameter name: `height`)
+4. SamplingRate (parameter name: `samplingRate`)
+5. Channels (parameter name: `channels`)
+6. Stream (parameter name: `stream`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `c2String` |`String` | Value obtained from FuzzedDataProvider|
+| `width` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `height` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `samplingRate` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `channels` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `stream` |`UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the libmedia_codecserviceregistrant module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build codecServiceRegistrant_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) codecServiceRegistrant_fuzzer
+```
+#### Steps to run
+
+To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/${TARGET_ARCH}/codecServiceRegistrant_fuzzer/codecServiceRegistrant_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp b/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp
new file mode 100644
index 0000000..c8a8d8a
--- /dev/null
+++ b/services/mediacodec/registrant/fuzzer/codecServiceRegistrant_fuzzer.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "../CodecServiceRegistrant.cpp"
+#include "fuzzer/FuzzedDataProvider.h"
+#include <C2Config.h>
+#include <C2Param.h>
+
+using namespace std;
+
+constexpr char kServiceName[] = "software";
+
+class CodecServiceRegistrantFuzzer {
+public:
+ void process(const uint8_t *data, size_t size);
+ ~CodecServiceRegistrantFuzzer() {
+ delete mH2C2;
+ if (mInputSize) {
+ delete mInputSize;
+ }
+ if (mSampleRateInfo) {
+ delete mSampleRateInfo;
+ }
+ if (mChannelCountInfo) {
+ delete mChannelCountInfo;
+ }
+ }
+
+private:
+ void initH2C2ComponentStore();
+ void invokeH2C2ComponentStore();
+ void invokeConfigSM();
+ void invokeQuerySM();
+ H2C2ComponentStore *mH2C2 = nullptr;
+ C2StreamPictureSizeInfo::input *mInputSize = nullptr;
+ C2StreamSampleRateInfo::output *mSampleRateInfo = nullptr;
+ C2StreamChannelCountInfo::output *mChannelCountInfo = nullptr;
+ C2Param::Index mIndex = C2StreamProfileLevelInfo::output::PARAM_TYPE;
+ C2StreamFrameRateInfo::output mFrameRate;
+ FuzzedDataProvider *mFDP = nullptr;
+};
+
+void CodecServiceRegistrantFuzzer::initH2C2ComponentStore() {
+ using namespace ::android::hardware::media::c2;
+ shared_ptr<C2ComponentStore> store =
+ android::GetCodec2PlatformComponentStore();
+ if (!store) {
+ return;
+ }
+ android::sp<V1_1::IComponentStore> storeV1_1 =
+ new V1_1::utils::ComponentStore(store);
+ if (storeV1_1->registerAsService(string(kServiceName)) != android::OK) {
+ return;
+ }
+ string const preferredStoreName = string(kServiceName);
+ sp<IComponentStore> preferredStore =
+ IComponentStore::getService(preferredStoreName.c_str());
+ mH2C2 = new H2C2ComponentStore(preferredStore);
+}
+
+void CodecServiceRegistrantFuzzer::invokeConfigSM() {
+ vector<C2Param *> configParams;
+ uint32_t width = mFDP->ConsumeIntegral<uint32_t>();
+ uint32_t height = mFDP->ConsumeIntegral<uint32_t>();
+ uint32_t samplingRate = mFDP->ConsumeIntegral<uint32_t>();
+ uint32_t channels = mFDP->ConsumeIntegral<uint32_t>();
+ if (mFDP->ConsumeBool()) {
+ mInputSize = new C2StreamPictureSizeInfo::input(0u, width, height);
+ configParams.push_back(mInputSize);
+ } else {
+ if (mFDP->ConsumeBool()) {
+ mSampleRateInfo = new C2StreamSampleRateInfo::output(0u, samplingRate);
+ configParams.push_back(mSampleRateInfo);
+ }
+ if (mFDP->ConsumeBool()) {
+ mChannelCountInfo = new C2StreamChannelCountInfo::output(0u, channels);
+ configParams.push_back(mChannelCountInfo);
+ }
+ }
+ vector<unique_ptr<C2SettingResult>> failures;
+ mH2C2->config_sm(configParams, &failures);
+}
+
+void CodecServiceRegistrantFuzzer::invokeQuerySM() {
+ vector<C2Param *> stackParams;
+ vector<C2Param::Index> heapParamIndices;
+ if (mFDP->ConsumeBool()) {
+ stackParams = {};
+ heapParamIndices = {};
+ } else {
+ uint32_t stream = mFDP->ConsumeIntegral<uint32_t>();
+ mFrameRate.setStream(stream);
+ stackParams.push_back(&mFrameRate);
+ heapParamIndices.push_back(mIndex);
+ }
+ vector<unique_ptr<C2Param>> heapParams;
+ mH2C2->query_sm(stackParams, heapParamIndices, &heapParams);
+}
+
+void CodecServiceRegistrantFuzzer::invokeH2C2ComponentStore() {
+ initH2C2ComponentStore();
+ shared_ptr<C2Component> component;
+ shared_ptr<C2ComponentInterface> interface;
+ string c2String = mFDP->ConsumeRandomLengthString();
+ mH2C2->createComponent(c2String, &component);
+ mH2C2->createInterface(c2String, &interface);
+ invokeConfigSM();
+ invokeQuerySM();
+
+ vector<shared_ptr<C2ParamDescriptor>> params;
+ mH2C2->querySupportedParams_nb(¶ms);
+
+ C2StoreIonUsageInfo usageInfo;
+ std::vector<C2FieldSupportedValuesQuery> query = {
+ C2FieldSupportedValuesQuery::Possible(
+ C2ParamField::Make(usageInfo, usageInfo.usage)),
+ C2FieldSupportedValuesQuery::Possible(
+ C2ParamField::Make(usageInfo, usageInfo.capacity)),
+ };
+ mH2C2->querySupportedValues_sm(query);
+
+ mH2C2->getName();
+ mH2C2->getParamReflector();
+ mH2C2->listComponents();
+ shared_ptr<C2GraphicBuffer> src;
+ shared_ptr<C2GraphicBuffer> dst;
+ mH2C2->copyBuffer(src, dst);
+}
+
+void CodecServiceRegistrantFuzzer::process(const uint8_t *data, size_t size) {
+ mFDP = new FuzzedDataProvider(data, size);
+ invokeH2C2ComponentStore();
+ /** RegisterCodecServices is called here to improve code coverage */
+ /** as currently it is not called by codecServiceRegistrant */
+ RegisterCodecServices();
+ delete mFDP;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ CodecServiceRegistrantFuzzer codecServiceRegistrantFuzzer;
+ codecServiceRegistrantFuzzer.process(data, size);
+ return 0;
+}
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 5989181..0351d2d 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -148,7 +148,8 @@
"statsd_mediaparser.cpp",
"statsd_nuplayer.cpp",
"statsd_recorder.cpp",
- "StringUtils.cpp"
+ "StringUtils.cpp",
+ "ValidateId.cpp",
],
proto: {
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 11ec993..a0dcb55 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -28,6 +28,7 @@
#include "AudioTypes.h" // string to int conversions
#include "MediaMetricsService.h" // package info
#include "StringUtils.h"
+#include "ValidateId.h"
#define PROP_AUDIO_ANALYTICS_CLOUD_ENABLED "persist.audio.analytics.cloud.enabled"
@@ -562,7 +563,7 @@
const auto flagsForStats = types::lookup<types::INPUT_FLAG, short_enum_type_t>(flags);
const auto sourceForStats = types::lookup<types::SOURCE_TYPE, short_enum_type_t>(source);
// Android S
- const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+ const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
LOG(LOG_LEVEL) << "key:" << key
<< " id:" << id
@@ -717,7 +718,7 @@
types::lookup<types::TRACK_TRAITS, short_enum_type_t>(traits);
const auto usageForStats = types::lookup<types::USAGE, short_enum_type_t>(usage);
// Android S
- const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+ const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
LOG(LOG_LEVEL) << "key:" << key
<< " id:" << id
diff --git a/services/mediametrics/LruSet.h b/services/mediametrics/LruSet.h
new file mode 100644
index 0000000..1f0ab60
--- /dev/null
+++ b/services/mediametrics/LruSet.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <list>
+#include <sstream>
+#include <unordered_map>
+
+namespace android::mediametrics {
+
+/**
+ * LruSet keeps a set of the last "Size" elements added or accessed.
+ *
+ * (Lru stands for least-recently-used eviction policy).
+ *
+ * Runs in O(1) time for add, remove, and check. Internally implemented
+ * with an unordered_map and a list. In order to remove elements,
+ * a list iterator is stored in the unordered_map
+ * (noting that std::list::erase() contractually
+ * does not affect iterators other than the one erased).
+ */
+
+template <typename T>
+class LruSet {
+ const size_t mMaxSize;
+ std::list<T> mAccessOrder; // front is the most recent, back is the oldest.
+ // item T with its access order iterator.
+ std::unordered_map<T, typename std::list<T>::iterator> mMap;
+
+public:
+ /**
+ * Constructs a LruSet which checks whether the element was
+ * accessed or added recently.
+ *
+ * The parameter maxSize is used to cap growth of LruSet;
+ * eviction is based on least recently used LRU.
+ * If maxSize is zero, the LruSet contains no elements
+ * and check() always returns false.
+ *
+ * \param maxSize the maximum number of elements that are tracked.
+ */
+ explicit LruSet(size_t maxSize) : mMaxSize(maxSize) {}
+
+ /**
+ * Returns the number of entries in the LruSet.
+ *
+ * This is a number between 0 and maxSize.
+ */
+ size_t size() const {
+ return mMap.size();
+ }
+
+ /** Clears the container contents. */
+ void clear() {
+ mMap.clear();
+ mAccessOrder.clear();
+ }
+
+ /** Returns a string dump of the last n entries. */
+ std::string dump(size_t n) const {
+ std::stringstream ss;
+ auto it = mAccessOrder.cbegin();
+ for (size_t i = 0; i < n && it != mAccessOrder.cend(); ++i) {
+ ss << *it++ << "\n";
+ }
+ return ss.str();
+ }
+
+ /** Adds a new item to the set. */
+ void add(const T& t) {
+ if (mMaxSize == 0) return;
+ auto it = mMap.find(t);
+ if (it != mMap.end()) { // already exists.
+ mAccessOrder.erase(it->second); // move-to-front on the chronologically ordered list.
+ } else if (mAccessOrder.size() >= mMaxSize) {
+ const T last = mAccessOrder.back();
+ mAccessOrder.pop_back();
+ mMap.erase(last);
+ }
+ mAccessOrder.push_front(t);
+ mMap[t] = mAccessOrder.begin();
+ }
+
+ /**
+ * Removes an item from the set.
+ *
+ * \param t item to be removed.
+ * \return false if the item doesn't exist.
+ */
+ bool remove(const T& t) {
+ auto it = mMap.find(t);
+ if (it == mMap.end()) return false;
+ mAccessOrder.erase(it->second);
+ mMap.erase(it);
+ return true;
+ }
+
+ /** Returns true if t is present (and moves the access order of t to the front). */
+ bool check(const T& t) { // not const, as it adjusts the least-recently-used order.
+ auto it = mMap.find(t);
+ if (it == mMap.end()) return false;
+ mAccessOrder.erase(it->second);
+ mAccessOrder.push_front(it->first);
+ it->second = mAccessOrder.begin();
+ return true;
+ }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index 1d64878..35e0ae4 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include "MediaMetricsService.h"
+#include "ValidateId.h"
#include "iface_statsd.h"
#include <pwd.h> //getpwuid
@@ -204,6 +205,15 @@
// now attach either the item or its dup to a const shared pointer
std::shared_ptr<const mediametrics::Item> sitem(release ? item : item->dup());
+ // register log session ids with singleton.
+ if (startsWith(item->getKey(), "metrics.manager")) {
+ std::string logSessionId;
+ if (item->get("logSessionId", &logSessionId)
+ && mediametrics::stringutils::isLogSessionId(logSessionId.c_str())) {
+ mediametrics::ValidateId::get()->registerId(logSessionId);
+ }
+ }
+
(void)mAudioAnalytics.submit(sitem, isTrusted);
(void)dump2Statsd(sitem, mStatsdLog); // failure should be logged in function.
@@ -309,6 +319,9 @@
result << "-- some lines may be truncated --\n";
}
+ result << "LogSessionId:\n"
+ << mediametrics::ValidateId::get()->dump();
+
// Dump the statsd atoms we sent out.
result << "Statsd atoms:\n"
<< mStatsdLog->dumpToString(" " /* prefix */,
diff --git a/services/mediametrics/TransactionLog.h b/services/mediametrics/TransactionLog.h
index 0ca4639..fd42518 100644
--- a/services/mediametrics/TransactionLog.h
+++ b/services/mediametrics/TransactionLog.h
@@ -158,7 +158,7 @@
++it) {
if (ll <= 0) break;
if (prefix != nullptr && !startsWith(it->first, prefix)) break;
- auto [s, l] = dumpMapTimeItem(it->second, ll - 1, sinceNs, prefix);
+ std::tie(s, l) = dumpMapTimeItem(it->second, ll - 1, sinceNs, prefix);
if (l == 0) continue; // don't show empty groups (due to sinceNs).
ss << " " << it->first << "\n" << s;
ll -= l + 1;
diff --git a/services/mediametrics/ValidateId.cpp b/services/mediametrics/ValidateId.cpp
new file mode 100644
index 0000000..0cc8593
--- /dev/null
+++ b/services/mediametrics/ValidateId.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaMetricsService" // not ValidateId
+#include <utils/Log.h>
+
+#include "ValidateId.h"
+
+namespace android::mediametrics {
+
+std::string ValidateId::dump() const
+{
+ std::stringstream ss;
+ ss << "Entries:" << mIdSet.size() << " InvalidIds:" << mInvalidIds << "\n";
+ ss << mIdSet.dump(10);
+ return ss.str();
+}
+
+void ValidateId::registerId(const std::string& id)
+{
+ if (id.empty()) return;
+ if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+ ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+ return;
+ }
+ ALOGV("%s: registering %s", __func__, id.c_str());
+ mIdSet.add(id);
+}
+
+const std::string& ValidateId::validateId(const std::string& id)
+{
+ static const std::string empty{};
+ if (id.empty()) return empty;
+
+ // reject because the id is malformed
+ if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+ ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+ ++mInvalidIds;
+ return empty;
+ }
+
+ // reject because the id is unregistered
+ if (!mIdSet.check(id)) {
+ ALOGW("%s: rejecting unregistered id %s", __func__, id.c_str());
+ ++mInvalidIds;
+ return empty;
+ }
+ return id;
+}
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/ValidateId.h b/services/mediametrics/ValidateId.h
new file mode 100644
index 0000000..166b39a
--- /dev/null
+++ b/services/mediametrics/ValidateId.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "LruSet.h"
+#include "StringUtils.h"
+#include "Wrap.h"
+
+namespace android::mediametrics {
+
+/*
+ * ValidateId is used to check whether the log session id is properly formed
+ * and has been registered (i.e. from the Java MediaMetricsManagerService).
+ *
+ * The default memory window to track registered ids is set to SINGLETON_LRU_SET_SIZE.
+ *
+ * This class is not thread-safe, but the singleton returned by get() uses LockWrap<>
+ * to ensure thread-safety.
+ */
+class ValidateId {
+ mediametrics::LruSet<std::string> mIdSet;
+ size_t mInvalidIds = 0; // count invalid ids encountered.
+public:
+ /** Creates a ValidateId object with size memory window. */
+ explicit ValidateId(size_t size) : mIdSet{size} {}
+
+ /** Returns a string dump of recent contents and stats. */
+ std::string dump() const;
+
+ /**
+ * Registers the id string.
+ *
+ * If id string is malformed (not 16 Base64Url chars), it is ignored.
+ * Once registered, calling validateId() will return id (instead of the empty string).
+ * ValidateId may "forget" the id after not encountering it within the past N ids,
+ * where N is the size set in the constructor.
+ *
+ * param id string (from MediaMetricsManagerService).
+ */
+ void registerId(const std::string& id);
+
+ /**
+ * Returns the empty string if id string is malformed (not 16 Base64Url chars)
+ * or if id string has not been seen (in the recent size ids);
+ * otherwise it returns the same id parameter.
+ *
+ * \param id string (to be sent to statsd).
+ */
+ const std::string& validateId(const std::string& id);
+
+ /** Singleton set size */
+ static inline constexpr size_t SINGLETON_LRU_SET_SIZE = 2000;
+
+ using LockedValidateId = mediametrics::LockWrap<ValidateId>;
+ /**
+ * Returns a singleton locked ValidateId object that is thread-safe using LockWrap<>.
+ *
+ * The Singleton ValidateId object is created with size LRU_SET_SIZE (during first call).
+ */
+ static inline LockedValidateId& get() {
+ static LockedValidateId privateSet{SINGLETON_LRU_SET_SIZE};
+ return privateSet;
+ }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
index 8b0b479..06ab16e 100644
--- a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
+++ b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
@@ -48,6 +48,7 @@
void invokeAudioAnalytics(const uint8_t *data, size_t size);
void invokeTimedAction(const uint8_t *data, size_t size);
void process(const uint8_t *data, size_t size);
+ std::atomic_int mValue = 0;
};
void MediaMetricsServiceFuzzer::invokeStartsWith(const uint8_t *data, size_t size) {
@@ -342,11 +343,10 @@
void MediaMetricsServiceFuzzer::invokeTimedAction(const uint8_t *data, size_t size) {
FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
android::mediametrics::TimedAction timedAction;
- std::atomic_int value = 0;
while (fdp.remaining_bytes()) {
timedAction.postIn(std::chrono::seconds(fdp.ConsumeIntegral<int32_t>()),
- [&value] { ++value; });
+ [this] { ++mValue; });
timedAction.size();
}
}
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index 41efcaa..c53b6f3 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -143,8 +143,7 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiorecord.logSessionId", &logSessionId);
- const auto log_session_id =
- mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index 59627ae..707effd 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -137,8 +137,7 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiotrack.logSessionId", &logSessionId);
- const auto log_session_id =
- mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index 46cbdc8..8581437 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -34,7 +34,7 @@
#include "cleaner.h"
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -228,7 +228,7 @@
std::string sessionId;
if (item->getString("android.media.mediacodec.log-session-id", &sessionId)) {
- sessionId = mediametrics::stringutils::sanitizeLogSessionId(sessionId);
+ sessionId = mediametrics::ValidateId::get()->validateId(sessionId);
metrics_proto.set_log_session_id(sessionId);
}
AStatsEvent_writeString(event, codec.c_str());
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index bcf2e0a..a8bfeaa 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -86,7 +86,7 @@
std::string log_session_id;
if (item->getString("android.media.mediaextractor.logSessionId", &log_session_id)) {
- log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+ log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
metrics_proto.set_log_session_id(log_session_id);
}
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 921b320..67ca874b 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -31,7 +31,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/enums/stats/mediametrics/mediametrics.pb.h"
#include "iface_statsd.h"
@@ -81,7 +81,7 @@
std::string logSessionId;
item->getString("android.media.mediaparser.logSessionId", &logSessionId);
- logSessionId = mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ logSessionId = mediametrics::ValidateId::get()->validateId(logSessionId);
int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
timestamp_nanos,
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index b29ad73..5f54a68 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -59,7 +59,7 @@
// string kRecorderLogSessionId = "android.media.mediarecorder.log-session-id";
std::string log_session_id;
if (item->getString("android.media.mediarecorder.log-session-id", &log_session_id)) {
- log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+ log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
metrics_proto.set_log_session_id(log_session_id);
}
// string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index 2336d6f..69ec947 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -28,6 +28,7 @@
#include "AudioTypes.h"
#include "StringUtils.h"
+#include "ValidateId.h"
using namespace android;
@@ -1127,3 +1128,100 @@
validId2[3] = '!';
ASSERT_EQ("", mediametrics::stringutils::sanitizeLogSessionId(validId2));
}
+
+TEST(mediametrics_tests, LruSet) {
+ constexpr size_t LRU_SET_SIZE = 2;
+ mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+ // test adding a couple strings.
+ lruSet.add("abc");
+ ASSERT_EQ(1u, lruSet.size());
+ ASSERT_TRUE(lruSet.check("abc"));
+ lruSet.add("def");
+ ASSERT_EQ(2u, lruSet.size());
+
+ // now adding the third string causes eviction of the oldest.
+ lruSet.add("ghi");
+ ASSERT_FALSE(lruSet.check("abc"));
+ ASSERT_TRUE(lruSet.check("ghi"));
+ ASSERT_TRUE(lruSet.check("def")); // "def" is most recent.
+ ASSERT_EQ(2u, lruSet.size()); // "abc" is correctly discarded.
+
+ // adding another string will evict the oldest.
+ lruSet.add("foo");
+ ASSERT_FALSE(lruSet.check("ghi")); // note: "ghi" discarded when "foo" added.
+ ASSERT_TRUE(lruSet.check("foo"));
+ ASSERT_TRUE(lruSet.check("def"));
+
+ // manual removing of a string works, too.
+ ASSERT_TRUE(lruSet.remove("def"));
+ ASSERT_FALSE(lruSet.check("def")); // we manually removed "def".
+ ASSERT_TRUE(lruSet.check("foo")); // "foo" is still there.
+ ASSERT_EQ(1u, lruSet.size());
+
+ // you can't remove a string that has not been added.
+ ASSERT_FALSE(lruSet.remove("bar")); // Note: "bar" doesn't exist, so remove returns false.
+ ASSERT_EQ(1u, lruSet.size());
+
+ lruSet.add("foo"); // adding "foo" (which already exists) doesn't change size.
+ ASSERT_EQ(1u, lruSet.size());
+ lruSet.add("bar"); // add "bar"
+ ASSERT_EQ(2u, lruSet.size());
+ lruSet.add("glorp"); // add "glorp" evicts "foo".
+ ASSERT_EQ(2u, lruSet.size());
+ ASSERT_TRUE(lruSet.check("bar"));
+ ASSERT_TRUE(lruSet.check("glorp"));
+ ASSERT_FALSE(lruSet.check("foo"));
+}
+
+TEST(mediametrics_tests, LruSet0) {
+ constexpr size_t LRU_SET_SIZE = 0;
+ mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+ lruSet.add("a");
+ ASSERT_EQ(0u, lruSet.size());
+ ASSERT_FALSE(lruSet.check("a"));
+ ASSERT_FALSE(lruSet.remove("a")); // never added.
+ ASSERT_EQ(0u, lruSet.size());
+}
+
+// Returns a 16 Base64Url string representing the decimal representation of value
+// (with leading 0s) e.g. 0000000000000000, 0000000000000001, 0000000000000002, ...
+static std::string generateId(size_t value)
+{
+ char id[16 + 1]; // to be filled with 16 Base64Url chars (and zero termination)
+ char *sptr = id + 16; // start at the end.
+ *sptr-- = 0; // zero terminate.
+ // output the digits from least significant to most significant.
+ while (value) {
+ *sptr-- = value % 10;
+ value /= 10;
+ }
+ // add leading 0's
+ while (sptr > id) {
+ *sptr-- = '0';
+ }
+ return std::string(id);
+}
+
+TEST(mediametrics_tests, ValidateId) {
+ constexpr size_t LRU_SET_SIZE = 3;
+ constexpr size_t IDS = 10;
+ static_assert(IDS > LRU_SET_SIZE); // IDS must be greater than LRU_SET_SIZE.
+ mediametrics::ValidateId validateId(LRU_SET_SIZE);
+
+
+ // register IDs as integer strings counting from 0.
+ for (size_t i = 0; i < IDS; ++i) {
+ validateId.registerId(generateId(i));
+ }
+
+ // only the last LRU_SET_SIZE exist.
+ for (size_t i = 0; i < IDS - LRU_SET_SIZE; ++i) {
+ ASSERT_EQ("", validateId.validateId(generateId(i)));
+ }
+ for (size_t i = IDS - LRU_SET_SIZE; i < IDS; ++i) {
+ const std::string id = generateId(i);
+ ASSERT_EQ(id, validateId.validateId(id));
+ }
+}
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 13dd3d3..340076e 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -59,6 +59,7 @@
result << " Device Id: " << getDeviceId() << "\n";
result << " Sample Rate: " << getSampleRate() << "\n";
result << " Channel Count: " << getSamplesPerFrame() << "\n";
+ result << " Channel Mask: 0x" << std::hex << getChannelMask() << std::dec << "\n";
result << " Format: " << getFormat() << "\n";
result << " Frames Per Burst: " << mFramesPerBurst << "\n";
result << " Usage: " << getUsage() << "\n";
@@ -164,6 +165,10 @@
configuration.getSamplesPerFrame() != getSamplesPerFrame()) {
return false;
}
+ if (configuration.getChannelMask() != AAUDIO_UNSPECIFIED &&
+ configuration.getChannelMask() != getChannelMask()) {
+ return false;
+ }
return true;
}
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index a08098c..35a0890 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -126,20 +126,15 @@
}
config.sample_rate = aaudioSampleRate;
- int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
-
const aaudio_direction_t direction = getDirection();
+ config.channel_mask = AAudio_getChannelMaskForOpen(
+ getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
+
if (direction == AAUDIO_DIRECTION_OUTPUT) {
- config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
- ? AUDIO_CHANNEL_OUT_STEREO
- : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
} else if (direction == AAUDIO_DIRECTION_INPUT) {
- config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
- ? AUDIO_CHANNEL_IN_STEREO
- : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
} else {
@@ -225,9 +220,9 @@
}
// Get information about the stream and pass it back to the caller.
- setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT)
- ? audio_channel_count_from_out_mask(config.channel_mask)
- : audio_channel_count_from_in_mask(config.channel_mask));
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ config.channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
+ AAudio_isChannelIndexMask(config.channel_mask)));
// AAudio creates a copy of this FD and retains ownership of the copy.
// Assume that AudioFlinger will close the original shared_memory_fd.
@@ -247,9 +242,9 @@
setFormat(config.format);
setSampleRate(config.sample_rate);
- ALOGD("%s() actual rate = %d, channels = %d"
- ", deviceId = %d, capacity = %d\n",
- __func__, getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
+ ALOGD("%s() actual rate = %d, channels = %d channelMask = %#x, deviceId = %d, capacity = %d\n",
+ __func__, getSampleRate(), getSamplesPerFrame(), getChannelMask(),
+ deviceId, getBufferCapacity());
ALOGD("%s() format = 0x%08x, frame size = %d, burst size = %d",
__func__, getFormat(), calculateBytesPerFrame(), mFramesPerBurst);
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 5fbcadb..5af0a91 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -78,7 +78,7 @@
result = mStreamInternal->open(builder);
setSampleRate(mStreamInternal->getSampleRate());
- setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
+ setChannelMask(mStreamInternal->getChannelMask());
setDeviceId(mStreamInternal->getDeviceId());
setSessionId(mStreamInternal->getSessionId());
setFormat(AUDIO_FORMAT_PCM_FLOAT); // force for mixer
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 34ddd4d..4ffc127 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -73,7 +73,8 @@
}
std::string AAudioServiceStreamBase::dumpHeader() {
- return std::string(" T Handle UId Port Run State Format Burst Chan Capacity");
+ return std::string(
+ " T Handle UId Port Run State Format Burst Chan Mask Capacity");
}
std::string AAudioServiceStreamBase::dump() const {
@@ -88,6 +89,7 @@
result << std::setw(7) << getFormat();
result << std::setw(6) << mFramesPerBurst;
result << std::setw(5) << getSamplesPerFrame();
+ result << std::setw(8) << std::hex << getChannelMask() << std::dec;
result << std::setw(9) << getBufferCapacity();
return result.str();
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index c665cda..ad06d97 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -164,11 +164,11 @@
goto error;
}
- setSamplesPerFrame(configurationInput.getSamplesPerFrame());
- if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
- setSamplesPerFrame(endpoint->getSamplesPerFrame());
+ setChannelMask(configurationInput.getChannelMask());
+ if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+ setChannelMask(endpoint->getChannelMask());
} else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
- ALOGD("%s() mSamplesPerFrame = %d, need %d",
+ ALOGD("%s() mSamplesPerFrame = %#x, need %#x",
__func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
diff --git a/services/oboeservice/fuzzer/README.md b/services/oboeservice/fuzzer/README.md
index 00b85df..ae7af3eb 100644
--- a/services/oboeservice/fuzzer/README.md
+++ b/services/oboeservice/fuzzer/README.md
@@ -15,7 +15,7 @@
4. InService
5. DeviceId
6. SampleRate
-7. SamplesPerFrame
+7. ChannelMask
8. Direction
9. SharingMode
10. Usage
@@ -31,7 +31,7 @@
| `InService` | `bool` | Value obtained from FuzzedDataProvider |
| `DeviceId` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
| `SampleRate` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
-| `SamplesPerFrame` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
+| `ChannelMask` | `AAUDIO_UNSPECIFIED`, `AAUDIO_CHANNEL_INDEX_MASK_1`, `AAUDIO_CHANNEL_INDEX_MASK_2`, `AAUDIO_CHANNEL_INDEX_MASK_3`, `AAUDIO_CHANNEL_INDEX_MASK_4`, `AAUDIO_CHANNEL_INDEX_MASK_5`, `AAUDIO_CHANNEL_INDEX_MASK_6`, `AAUDIO_CHANNEL_INDEX_MASK_7`, `AAUDIO_CHANNEL_INDEX_MASK_8`, `AAUDIO_CHANNEL_INDEX_MASK_9`, `AAUDIO_CHANNEL_INDEX_MASK_10`, `AAUDIO_CHANNEL_INDEX_MASK_11`, `AAUDIO_CHANNEL_INDEX_MASK_12`, `AAUDIO_CHANNEL_INDEX_MASK_13`, `AAUDIO_CHANNEL_INDEX_MASK_14`, `AAUDIO_CHANNEL_INDEX_MASK_15`, `AAUDIO_CHANNEL_INDEX_MASK_16`, `AAUDIO_CHANNEL_INDEX_MASK_17`, `AAUDIO_CHANNEL_INDEX_MASK_18`, `AAUDIO_CHANNEL_INDEX_MASK_19`, `AAUDIO_CHANNEL_INDEX_MASK_20`, `AAUDIO_CHANNEL_INDEX_MASK_21`, `AAUDIO_CHANNEL_INDEX_MASK_22`, `AAUDIO_CHANNEL_INDEX_MASK_23`, `AAUDIO_CHANNEL_INDEX_MASK_24`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_FRONT_BACK`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_2POINT1`, `AAUDIO_CHANNEL_TRI`, `AAUDIO_CHANNEL_TRI_BACK`, `AAUDIO_CHANNEL_3POINT1`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_QUAD`, `AAUDIO_CHANNEL_QUAD_SIDE`, `AAUDIO_CHANNEL_SURROUND`, `AAUDIO_CHANNEL_PENTA`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_5POINT1_SIDE`, `AAUDIO_CHANNEL_5POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1POINT4`, `AAUDIO_CHANNEL_6POINT1`, `AAUDIO_CHANNEL_7POINT1`, `AAUDIO_CHANNEL_7POINT1POINT2`, `AAUDIO_CHANNEL_7POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT6` | Value obtained from FuzzedDataProvider |
| `Direction` | `AAUDIO_DIRECTION_OUTPUT`, `AAUDIO_DIRECTION_INPUT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
| `SharingMode` | `AAUDIO_SHARING_MODE_EXCLUSIVE`, `AAUDIO_SHARING_MODE_SHARED` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
| `Usage` | `AAUDIO_USAGE_MEDIA`, `AAUDIO_USAGE_VOICE_COMMUNICATION`, `AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING`, `AAUDIO_USAGE_ALARM`, `AAUDIO_USAGE_NOTIFICATION`, `AAUDIO_USAGE_NOTIFICATION_RINGTONE`, `AAUDIO_USAGE_NOTIFICATION_EVENT`, `AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY`, `AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE`, `AAUDIO_USAGE_ASSISTANCE_SONIFICATION`, `AAUDIO_USAGE_GAME`, `AAUDIO_USAGE_ASSISTANT`, `AAUDIO_SYSTEM_USAGE_EMERGENCY`, `AAUDIO_SYSTEM_USAGE_SAFETY`, `AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS`, `AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
diff --git a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
index 4bc661c..17e8d36 100644
--- a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
+++ b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
@@ -68,10 +68,71 @@
AAUDIO_INPUT_PRESET_UNPROCESSED, AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
};
+aaudio_channel_mask_t kAAudioChannelMasks[] = {
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_CHANNEL_INDEX_MASK_1,
+ AAUDIO_CHANNEL_INDEX_MASK_2,
+ AAUDIO_CHANNEL_INDEX_MASK_3,
+ AAUDIO_CHANNEL_INDEX_MASK_4,
+ AAUDIO_CHANNEL_INDEX_MASK_5,
+ AAUDIO_CHANNEL_INDEX_MASK_6,
+ AAUDIO_CHANNEL_INDEX_MASK_7,
+ AAUDIO_CHANNEL_INDEX_MASK_8,
+ AAUDIO_CHANNEL_INDEX_MASK_9,
+ AAUDIO_CHANNEL_INDEX_MASK_10,
+ AAUDIO_CHANNEL_INDEX_MASK_11,
+ AAUDIO_CHANNEL_INDEX_MASK_12,
+ AAUDIO_CHANNEL_INDEX_MASK_13,
+ AAUDIO_CHANNEL_INDEX_MASK_14,
+ AAUDIO_CHANNEL_INDEX_MASK_15,
+ AAUDIO_CHANNEL_INDEX_MASK_16,
+ AAUDIO_CHANNEL_INDEX_MASK_17,
+ AAUDIO_CHANNEL_INDEX_MASK_18,
+ AAUDIO_CHANNEL_INDEX_MASK_19,
+ AAUDIO_CHANNEL_INDEX_MASK_20,
+ AAUDIO_CHANNEL_INDEX_MASK_21,
+ AAUDIO_CHANNEL_INDEX_MASK_22,
+ AAUDIO_CHANNEL_INDEX_MASK_23,
+ AAUDIO_CHANNEL_INDEX_MASK_24,
+ AAUDIO_CHANNEL_MONO,
+ AAUDIO_CHANNEL_STEREO,
+ AAUDIO_CHANNEL_FRONT_BACK,
+ AAUDIO_CHANNEL_2POINT0POINT2,
+ AAUDIO_CHANNEL_2POINT1POINT2,
+ AAUDIO_CHANNEL_3POINT0POINT2,
+ AAUDIO_CHANNEL_3POINT1POINT2,
+ AAUDIO_CHANNEL_5POINT1,
+ AAUDIO_CHANNEL_MONO,
+ AAUDIO_CHANNEL_STEREO,
+ AAUDIO_CHANNEL_2POINT1,
+ AAUDIO_CHANNEL_TRI,
+ AAUDIO_CHANNEL_TRI_BACK,
+ AAUDIO_CHANNEL_3POINT1,
+ AAUDIO_CHANNEL_2POINT0POINT2,
+ AAUDIO_CHANNEL_2POINT1POINT2,
+ AAUDIO_CHANNEL_3POINT0POINT2,
+ AAUDIO_CHANNEL_3POINT1POINT2,
+ AAUDIO_CHANNEL_QUAD,
+ AAUDIO_CHANNEL_QUAD_SIDE,
+ AAUDIO_CHANNEL_SURROUND,
+ AAUDIO_CHANNEL_PENTA,
+ AAUDIO_CHANNEL_5POINT1,
+ AAUDIO_CHANNEL_5POINT1_SIDE,
+ AAUDIO_CHANNEL_5POINT1POINT2,
+ AAUDIO_CHANNEL_5POINT1POINT4,
+ AAUDIO_CHANNEL_6POINT1,
+ AAUDIO_CHANNEL_7POINT1,
+ AAUDIO_CHANNEL_7POINT1POINT2,
+ AAUDIO_CHANNEL_7POINT1POINT4,
+ AAUDIO_CHANNEL_9POINT1POINT4,
+ AAUDIO_CHANNEL_9POINT1POINT6,
+};
+
const size_t kNumAAudioFormats = std::size(kAAudioFormats);
const size_t kNumAAudioUsages = std::size(kAAudioUsages);
const size_t kNumAAudioContentTypes = std::size(kAAudioContentTypes);
const size_t kNumAAudioInputPresets = std::size(kAAudioInputPresets);
+const size_t kNumAAudioChannelMasks = std::size(kAAudioChannelMasks);
class FuzzAAudioClient : public virtual RefBase, public AAudioServiceInterface {
public:
@@ -305,7 +366,11 @@
request.getConfiguration().setDeviceId(fdp.ConsumeIntegral<int32_t>());
request.getConfiguration().setSampleRate(fdp.ConsumeIntegral<int32_t>());
- request.getConfiguration().setSamplesPerFrame(fdp.ConsumeIntegral<int32_t>());
+ request.getConfiguration().setChannelMask((aaudio_channel_mask_t)(
+ fdp.ConsumeBool()
+ ? fdp.ConsumeIntegral<int32_t>()
+ : kAAudioChannelMasks[fdp.ConsumeIntegralInRange<int32_t>(
+ 0, kNumAAudioChannelMasks - 1)]));
request.getConfiguration().setDirection(
fdp.ConsumeBool() ? fdp.ConsumeIntegral<int32_t>()
: (fdp.ConsumeBool() ? AAUDIO_DIRECTION_OUTPUT : AAUDIO_DIRECTION_INPUT));