Merge "libbinder: split out PackageManagerNative aidl"
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..a7614d2
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,13 @@
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Left
+TabWidth: 4
+UseTab: Never
diff --git a/camera/OWNERS b/camera/OWNERS
index d6b95da..47d1d19 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -2,7 +2,7 @@
etalvala@google.com
jchowdhary@google.com
shuzhenwang@google.com
-yinchiayeh@google.com
+ruchamk@google.com
# backup owner
cychen@google.com
zhijunhe@google.com
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index 87becb6..fd4ef95 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,19 +1,19 @@
{
"presubmit": [
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaDrmTestCases",
"options" : [
{
"include-annotation": "android.platform.test.annotations.Presubmit"
},
{
- "include-filter": "android.media.cts.MediaDrmClearkeyTest"
+ "include-filter": "android.mediadrm.cts.MediaDrmClearkeyTest"
},
{
- "include-filter": "android.media.cts.MediaDrmMetricsTest"
+ "include-filter": "android.mediadrm.cts.MediaDrmMetricsTest"
},
{
- "include-filter": "android.media.cts.NativeMediaDrmClearkeyTest"
+ "include-filter": "android.mediadrm.cts.NativeMediaDrmClearkeyTest"
}
]
}
diff --git a/media/codec2/components/aac/C2SoftAacDec.cpp b/media/codec2/components/aac/C2SoftAacDec.cpp
index 342d771..57cdcd0 100644
--- a/media/codec2/components/aac/C2SoftAacDec.cpp
+++ b/media/codec2/components/aac/C2SoftAacDec.cpp
@@ -287,6 +287,7 @@
mOutputDelayRingBufferWritePos = 0;
mOutputDelayRingBufferReadPos = 0;
mOutputDelayRingBufferFilled = 0;
+ mOutputDelayRingBuffer.reset();
mBuffersInfo.clear();
status_t status = UNKNOWN_ERROR;
@@ -308,10 +309,7 @@
aacDecoder_Close(mAACDecoder);
mAACDecoder = nullptr;
}
- if (mOutputDelayRingBuffer) {
- delete[] mOutputDelayRingBuffer;
- mOutputDelayRingBuffer = nullptr;
- }
+ mOutputDelayRingBuffer.reset();
}
status_t C2SoftAacDec::initDecoder() {
@@ -327,7 +325,7 @@
mOutputDelayCompensated = 0;
mOutputDelayRingBufferSize = 2048 * MAX_CHANNEL_COUNT * kNumDelayBlocksMax;
- mOutputDelayRingBuffer = new short[mOutputDelayRingBufferSize];
+ mOutputDelayRingBuffer.reset(new short[mOutputDelayRingBufferSize]);
mOutputDelayRingBufferWritePos = 0;
mOutputDelayRingBufferReadPos = 0;
mOutputDelayRingBufferFilled = 0;
diff --git a/media/codec2/components/aac/C2SoftAacDec.h b/media/codec2/components/aac/C2SoftAacDec.h
index 986187c..a03fc70 100644
--- a/media/codec2/components/aac/C2SoftAacDec.h
+++ b/media/codec2/components/aac/C2SoftAacDec.h
@@ -93,7 +93,7 @@
bool mEndOfOutput;
int32_t mOutputDelayCompensated;
int32_t mOutputDelayRingBufferSize;
- short *mOutputDelayRingBuffer;
+ std::unique_ptr<short[]> mOutputDelayRingBuffer;
int32_t mOutputDelayRingBufferWritePos;
int32_t mOutputDelayRingBufferReadPos;
int32_t mOutputDelayRingBufferFilled;
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 2953d90..45e2ca8 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -149,8 +149,16 @@
#else
addParameter(
DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withConstValue(new C2StreamProfileLevelInfo::input(0u,
- C2Config::PROFILE_UNUSED, C2Config::LEVEL_UNUSED))
+ .withDefault(new C2StreamProfileLevelInfo::input(0u,
+ C2Config::PROFILE_VP8_0, C2Config::LEVEL_UNUSED))
+ .withFields({
+ C2F(mProfileLevel, profile).equalTo(
+ PROFILE_VP8_0
+ ),
+ C2F(mProfileLevel, level).equalTo(
+ LEVEL_UNUSED),
+ })
+ .withSetter(ProfileLevelSetter, mSize)
.build());
#endif
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index c98b802..926b2e9 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -324,21 +324,35 @@
.withConstValue(new C2StreamIntraRefreshTuning::output(
0u, C2Config::INTRA_REFRESH_DISABLED, 0.))
.build());
-
+#ifdef VP9
addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::output(
- 0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
- .withFields({
- C2F(mProfileLevel, profile).equalTo(
- PROFILE_VP9_0
- ),
- C2F(mProfileLevel, level).equalTo(
- LEVEL_VP9_4_1),
- })
- .withSetter(ProfileLevelSetter)
- .build());
-
+ DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::output(
+ 0u, PROFILE_VP9_0, LEVEL_VP9_4_1))
+ .withFields({
+ C2F(mProfileLevel, profile).equalTo(
+ PROFILE_VP9_0
+ ),
+ C2F(mProfileLevel, level).equalTo(
+ LEVEL_VP9_4_1),
+ })
+ .withSetter(ProfileLevelSetter)
+ .build());
+#else
+ addParameter(
+ DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::output(
+ 0u, PROFILE_VP8_0, LEVEL_UNUSED))
+ .withFields({
+ C2F(mProfileLevel, profile).equalTo(
+ PROFILE_VP8_0
+ ),
+ C2F(mProfileLevel, level).equalTo(
+ LEVEL_UNUSED),
+ })
+ .withSetter(ProfileLevelSetter)
+ .build());
+#endif
addParameter(
DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
.withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
diff --git a/media/codec2/hidl/1.0/vts/.clang-format b/media/codec2/hidl/1.0/vts/.clang-format
deleted file mode 120000
index 136279c..0000000
--- a/media/codec2/hidl/1.0/vts/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
index c487fa3..1dc037a 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
@@ -81,6 +81,7 @@
mEos = false;
mCsd = false;
mFramesReceived = 0;
+ mEncoderFrameSize = 0;
mWorkResult = C2_OK;
mOutputSize = 0u;
getInputMaxBufSize();
@@ -146,6 +147,7 @@
uint32_t mFramesReceived;
int32_t mInputMaxBufSize;
uint64_t mOutputSize;
+ uint32_t mEncoderFrameSize;
std::list<uint64_t> mFlushedIndices;
C2BlockPool::local_id_t mBlockPoolId;
@@ -304,20 +306,21 @@
c2_status_t c2err = mComponent->query({}, {C2StreamAudioFrameSizeInfo::input::PARAM_TYPE},
C2_DONT_BLOCK, &queried);
size_t offset = sizeof(C2Param);
- uint32_t maxInputSize = 0;
if (c2err == C2_OK && queried.size()) {
C2Param* param = queried[0].get();
- maxInputSize = *(uint32_t*)((uint8_t*)param + offset);
+ mEncoderFrameSize = *(uint32_t*)((uint8_t*)param + offset);
+ if (mEncoderFrameSize) {
+ *samplesPerFrame = mEncoderFrameSize;
+ return C2_OK;
+ }
}
- if (0 == maxInputSize) {
- c2err = mComponent->query({}, {C2StreamMaxBufferSizeInfo::input::PARAM_TYPE}, C2_DONT_BLOCK,
- &queried);
- if (c2err != C2_OK || queried.size() == 0) return c2err;
+ c2err = mComponent->query({}, {C2StreamMaxBufferSizeInfo::input::PARAM_TYPE}, C2_DONT_BLOCK,
+ &queried);
+ if (c2err != C2_OK || queried.size() == 0) return c2err;
- C2Param* param = queried[0].get();
- maxInputSize = *(uint32_t*)((uint8_t*)param + offset);
- }
+ C2Param* param = queried[0].get();
+ uint32_t maxInputSize = *(uint32_t*)((uint8_t*)param + offset);
*samplesPerFrame = std::min((maxInputSize / (nChannels * 2)), kMaxSamplesPerFrame);
return C2_OK;
@@ -450,10 +453,13 @@
ALOGV("EncodeTest");
if (mDisableTest) GTEST_SKIP() << "Test is disabled";
bool signalEOS = std::get<2>(GetParam());
- // Ratio w.r.t to mInputMaxBufSize
- int32_t inputMaxBufRatio = std::get<3>(GetParam());
- mSamplesPerFrame = ((mInputMaxBufSize / inputMaxBufRatio) / (mNumChannels * 2));
-
+ // Set samples per frame based on inputMaxBufRatio if component does not
+ // advertise supported frame size
+ if (!mEncoderFrameSize) {
+ // Ratio w.r.t to mInputMaxBufSize
+ int32_t inputMaxBufRatio = std::get<3>(GetParam());
+ mSamplesPerFrame = ((mInputMaxBufSize / inputMaxBufRatio) / (mNumChannels * 2));
+ }
ALOGV("signalEOS %d mInputMaxBufSize %d mSamplesPerFrame %d", signalEOS, mInputMaxBufSize,
mSamplesPerFrame);
diff --git a/media/codec2/vndk/C2PlatformStorePluginLoader.cpp b/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
index bee028a..2a888a8 100644
--- a/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
+++ b/media/codec2/vndk/C2PlatformStorePluginLoader.cpp
@@ -59,13 +59,14 @@
c2_status_t C2PlatformStorePluginLoader::createBlockPool(
::C2Allocator::id_t allocatorId, ::C2BlockPool::local_id_t blockPoolId,
- std::shared_ptr<C2BlockPool>* pool) {
+ std::shared_ptr<C2BlockPool>* pool,
+ std::function<void(C2BlockPool *)> deleter) {
if (mCreateBlockPool == nullptr) {
ALOGD("Handle or CreateBlockPool symbol is null");
return C2_NOT_FOUND;
}
- std::shared_ptr<::C2BlockPool> ptr(mCreateBlockPool(allocatorId, blockPoolId));
+ std::shared_ptr<::C2BlockPool> ptr(mCreateBlockPool(allocatorId, blockPoolId), deleter);
if (ptr) {
*pool = ptr;
return C2_OK;
@@ -75,14 +76,16 @@
}
c2_status_t C2PlatformStorePluginLoader::createAllocator(
- ::C2Allocator::id_t allocatorId, std::shared_ptr<C2Allocator>* const allocator) {
+ ::C2Allocator::id_t allocatorId,
+ std::shared_ptr<C2Allocator>* const allocator,
+ std::function<void(C2Allocator *)> deleter) {
if (mCreateAllocator == nullptr) {
ALOGD("Handle or CreateAllocator symbol is null");
return C2_NOT_FOUND;
}
c2_status_t res = C2_CORRUPTED;
- allocator->reset(mCreateAllocator(allocatorId, &res));
+ allocator->reset(mCreateAllocator(allocatorId, &res), deleter);
if (res != C2_OK) {
ALOGD("Failed to CreateAllocator by id: %u, res: %d", allocatorId, res);
allocator->reset();
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index c07c09e..1660c38 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -443,6 +443,7 @@
public:
_C2BlockPoolCache() : mBlockPoolSeqId(C2BlockPool::PLATFORM_START + 1) {}
+private:
c2_status_t _createBlockPool(
C2PlatformAllocatorStore::id_t allocatorId,
std::vector<std::shared_ptr<const C2Component>> components,
@@ -456,14 +457,19 @@
if (allocatorId == C2AllocatorStore::DEFAULT_LINEAR) {
allocatorId = GetPreferredLinearAllocatorId(GetCodec2PoolMask());
}
+ auto deleter = [this, poolId](C2BlockPool *pool) {
+ std::unique_lock lock(mMutex);
+ mBlockPools.erase(poolId);
+ mComponents.erase(poolId);
+ delete pool;
+ };
switch(allocatorId) {
case C2PlatformAllocatorStore::ION: /* also ::DMABUFHEAP */
res = allocatorStore->fetchAllocator(
C2PlatformAllocatorStore::ION, &allocator);
if (res == C2_OK) {
- std::shared_ptr<C2BlockPool> ptr =
- std::make_shared<C2PooledBlockPool>(
- allocator, poolId);
+ std::shared_ptr<C2BlockPool> ptr(
+ new C2PooledBlockPool(allocator, poolId), deleter);
*pool = ptr;
mBlockPools[poolId] = ptr;
mComponents[poolId].insert(
@@ -475,9 +481,8 @@
res = allocatorStore->fetchAllocator(
C2PlatformAllocatorStore::BLOB, &allocator);
if (res == C2_OK) {
- std::shared_ptr<C2BlockPool> ptr =
- std::make_shared<C2PooledBlockPool>(
- allocator, poolId);
+ std::shared_ptr<C2BlockPool> ptr(
+ new C2PooledBlockPool(allocator, poolId), deleter);
*pool = ptr;
mBlockPools[poolId] = ptr;
mComponents[poolId].insert(
@@ -490,8 +495,8 @@
res = allocatorStore->fetchAllocator(
C2AllocatorStore::DEFAULT_GRAPHIC, &allocator);
if (res == C2_OK) {
- std::shared_ptr<C2BlockPool> ptr =
- std::make_shared<C2PooledBlockPool>(allocator, poolId);
+ std::shared_ptr<C2BlockPool> ptr(
+ new C2PooledBlockPool(allocator, poolId), deleter);
*pool = ptr;
mBlockPools[poolId] = ptr;
mComponents[poolId].insert(
@@ -503,9 +508,8 @@
res = allocatorStore->fetchAllocator(
C2PlatformAllocatorStore::BUFFERQUEUE, &allocator);
if (res == C2_OK) {
- std::shared_ptr<C2BlockPool> ptr =
- std::make_shared<C2BufferQueueBlockPool>(
- allocator, poolId);
+ std::shared_ptr<C2BlockPool> ptr(
+ new C2BufferQueueBlockPool(allocator, poolId), deleter);
*pool = ptr;
mBlockPools[poolId] = ptr;
mComponents[poolId].insert(
@@ -517,7 +521,7 @@
// Try to create block pool from platform store plugins.
std::shared_ptr<C2BlockPool> ptr;
res = C2PlatformStorePluginLoader::GetInstance()->createBlockPool(
- allocatorId, poolId, &ptr);
+ allocatorId, poolId, &ptr, deleter);
if (res == C2_OK) {
*pool = ptr;
mBlockPools[poolId] = ptr;
@@ -530,17 +534,20 @@
return res;
}
+public:
c2_status_t createBlockPool(
C2PlatformAllocatorStore::id_t allocatorId,
std::vector<std::shared_ptr<const C2Component>> components,
std::shared_ptr<C2BlockPool> *pool) {
+ std::unique_lock lock(mMutex);
return _createBlockPool(allocatorId, components, mBlockPoolSeqId++, pool);
}
- bool getBlockPool(
+ c2_status_t getBlockPool(
C2BlockPool::local_id_t blockPoolId,
std::shared_ptr<const C2Component> component,
std::shared_ptr<C2BlockPool> *pool) {
+ std::unique_lock lock(mMutex);
// TODO: use one iterator for multiple blockpool type scalability.
std::shared_ptr<C2BlockPool> ptr;
auto it = mBlockPools.find(blockPoolId);
@@ -558,14 +565,22 @@
});
if (found != mComponents[blockPoolId].end()) {
*pool = ptr;
- return true;
+ return C2_OK;
}
}
}
- return false;
+ // TODO: remove this. this is temporary
+ if (blockPoolId == C2BlockPool::PLATFORM_START) {
+ return _createBlockPool(
+ C2PlatformAllocatorStore::BUFFERQUEUE, {component}, blockPoolId, pool);
+ }
+ return C2_NOT_FOUND;
}
private:
+ // Deleter needs to hold this mutex, and there is a small chance that deleter
+ // is invoked while the mutex is held.
+ std::recursive_mutex mMutex;
C2BlockPool::local_id_t mBlockPoolSeqId;
std::map<C2BlockPool::local_id_t, std::weak_ptr<C2BlockPool>> mBlockPools;
@@ -574,7 +589,6 @@
static std::unique_ptr<_C2BlockPoolCache> sBlockPoolCache =
std::make_unique<_C2BlockPoolCache>();
-static std::mutex sBlockPoolCacheMutex;
} // anynymous namespace
@@ -582,15 +596,12 @@
C2BlockPool::local_id_t id, std::shared_ptr<const C2Component> component,
std::shared_ptr<C2BlockPool> *pool) {
pool->reset();
- std::lock_guard<std::mutex> lock(sBlockPoolCacheMutex);
std::shared_ptr<C2AllocatorStore> allocatorStore = GetCodec2PlatformAllocatorStore();
std::shared_ptr<C2Allocator> allocator;
c2_status_t res = C2_NOT_FOUND;
if (id >= C2BlockPool::PLATFORM_START) {
- if (sBlockPoolCache->getBlockPool(id, component, pool)) {
- return C2_OK;
- }
+ return sBlockPoolCache->getBlockPool(id, component, pool);
}
switch (id) {
@@ -606,11 +617,6 @@
*pool = std::make_shared<C2BasicGraphicBlockPool>(allocator);
}
break;
- // TODO: remove this. this is temporary
- case C2BlockPool::PLATFORM_START:
- res = sBlockPoolCache->_createBlockPool(
- C2PlatformAllocatorStore::BUFFERQUEUE, {component}, id, pool);
- break;
default:
break;
}
@@ -623,7 +629,6 @@
std::shared_ptr<C2BlockPool> *pool) {
pool->reset();
- std::lock_guard<std::mutex> lock(sBlockPoolCacheMutex);
return sBlockPoolCache->createBlockPool(allocatorId, components, pool);
}
@@ -633,7 +638,6 @@
std::shared_ptr<C2BlockPool> *pool) {
pool->reset();
- std::lock_guard<std::mutex> lock(sBlockPoolCacheMutex);
return sBlockPoolCache->createBlockPool(allocatorId, {component}, pool);
}
diff --git a/media/codec2/vndk/include/C2PlatformStorePluginLoader.h b/media/codec2/vndk/include/C2PlatformStorePluginLoader.h
index 4c10643..73d1b5e 100644
--- a/media/codec2/vndk/include/C2PlatformStorePluginLoader.h
+++ b/media/codec2/vndk/include/C2PlatformStorePluginLoader.h
@@ -61,9 +61,11 @@
* \retval C2_NOT_FOUND the extension symbol was not found.
* \retval C2_BAD_INDEX the input allocatorId is not defined in platform store extension.
*/
- c2_status_t createBlockPool(::C2Allocator::id_t allocatorId,
- ::C2BlockPool::local_id_t blockPoolId,
- std::shared_ptr<C2BlockPool>* pool);
+ c2_status_t createBlockPool(
+ ::C2Allocator::id_t allocatorId,
+ ::C2BlockPool::local_id_t blockPoolId,
+ std::shared_ptr<C2BlockPool>* pool,
+ std::function<void(C2BlockPool *)> deleter = std::default_delete<C2BlockPool>());
/**
* Creates allocator from platform store extension.
@@ -81,8 +83,10 @@
* \retval C2_BAD_INDEX the input allocatorId is not defined in platform store extension.
* \retval C2_NO_MEMORY not enough memory to create the allocator
*/
- c2_status_t createAllocator(::C2Allocator::id_t allocatorId,
- std::shared_ptr<C2Allocator>* const allocator);
+ c2_status_t createAllocator(
+ ::C2Allocator::id_t allocatorId,
+ std::shared_ptr<C2Allocator>* const allocator,
+ std::function<void(C2Allocator *)> deleter = std::default_delete<C2Allocator>());
private:
explicit C2PlatformStorePluginLoader(const char *libPath);
diff --git a/media/extractors/tests/Android.bp b/media/extractors/tests/Android.bp
index 5d97d9a..23c74f7 100644
--- a/media/extractors/tests/Android.bp
+++ b/media/extractors/tests/Android.bp
@@ -45,14 +45,11 @@
"libdatasource",
"libwatchdog",
- "libstagefright",
"libstagefright_id3",
"libstagefright_flacdec",
"libstagefright_esds",
"libstagefright_mpeg2support",
- "libstagefright_mpeg2extractor",
"libstagefright_foundation_colorutils_ndk",
- "libstagefright_foundation",
"libstagefright_metadatautils",
"libmedia_midiiowrapper",
@@ -74,6 +71,8 @@
"libcutils",
"libmediandk",
"libmedia",
+ "libstagefright",
+ "libstagefright_foundation",
"libcrypto",
"libhidlmemory",
"libhidlbase",
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index da59da6..01aaa2a 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -168,6 +168,7 @@
"libbinder",
"framework-permission-aidl-cpp",
"aaudio-aidl-cpp",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
],
@@ -246,6 +247,7 @@
"binding/aidl/aaudio/IAAudioService.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"shared-file-region-aidl",
"framework-permission-aidl",
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index cf76225..14d1671 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -29,7 +29,7 @@
using namespace aaudio;
-using android::media::AudioFormatDescription;
+using android::media::audio::common::AudioFormatDescription;
AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
setChannelMask(parcelable.channelMask);
@@ -76,7 +76,8 @@
result.audioFormat = convAudioFormat.value();
} else {
result.audioFormat = AudioFormatDescription{};
- result.audioFormat.type = android::media::AudioFormatType::SYS_RESERVED_INVALID;
+ result.audioFormat.type =
+ android::media::audio::common::AudioFormatType::SYS_RESERVED_INVALID;
}
static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
result.direction = getDirection();
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index 3b274f2..cc4f138 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -16,7 +16,7 @@
package aaudio;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioFormatDescription;
parcelable StreamParameters {
int channelMask; // = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 98e9727..ea00a5a 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -48,7 +48,7 @@
shared_libs: ["libaaudio_internal"],
}
-cc_test {
+cc_binary {
name: "test_timestamps",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_timestamps.cpp"],
@@ -60,121 +60,71 @@
name: "test_open_params",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_open_params.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_no_close",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_no_close.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_aaudio_recovery",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_recovery.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_n_streams",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_n_streams.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_bad_disconnect",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_bad_disconnect.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_various",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_various.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_session_id",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_session_id.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
-cc_test {
+cc_binary {
name: "test_aaudio_monkey",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_aaudio_monkey.cpp"],
header_libs: ["libaaudio_example_utils"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_attributes",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_attributes.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_interference",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_interference.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
@@ -196,28 +146,18 @@
],
}
-cc_test {
+cc_binary {
name: "test_return_stop",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_return_stop.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
name: "test_callback_race",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_callback_race.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
cc_test {
@@ -238,7 +178,7 @@
],
}
-cc_test {
+cc_binary {
name: "test_steal_exclusive",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_steal_exclusive.cpp"],
@@ -251,15 +191,9 @@
],
}
-
-cc_test {
+cc_binary {
name: "test_disconnect_race",
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_disconnect_race.cpp"],
- shared_libs: [
- "libaaudio",
- "libbinder",
- "libcutils",
- "libutils",
- ],
+ shared_libs: ["libaaudio"],
}
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index b0eb7bd..80b9820 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -34,6 +34,10 @@
namespace android {
using base::unexpected;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
namespace {
@@ -388,21 +392,21 @@
namespace {
namespace detail {
-using AudioChannelPair = std::pair<audio_channel_mask_t, media::AudioChannelLayout>;
+using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
using AudioChannelPairs = std::vector<AudioChannelPair>;
using AudioDevicePair = std::pair<audio_devices_t, media::AudioDeviceDescription>;
using AudioDevicePairs = std::vector<AudioDevicePair>;
-using AudioFormatPair = std::pair<audio_format_t, media::AudioFormatDescription>;
+using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
using AudioFormatPairs = std::vector<AudioFormatPair>;
}
const detail::AudioChannelPairs& getInAudioChannelPairs() {
static const detail::AudioChannelPairs pairs = {
-#define DEFINE_INPUT_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_IN_##n, \
- media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
- media::AudioChannelLayout::LAYOUT_##n) \
+#define DEFINE_INPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
}
DEFINE_INPUT_LAYOUT(MONO),
@@ -421,11 +425,11 @@
const detail::AudioChannelPairs& getOutAudioChannelPairs() {
static const detail::AudioChannelPairs pairs = {
-#define DEFINE_OUTPUT_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_OUT_##n, \
- media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>( \
- media::AudioChannelLayout::LAYOUT_##n) \
+#define DEFINE_OUTPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_OUT_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
}
DEFINE_OUTPUT_LAYOUT(MONO),
@@ -464,11 +468,11 @@
const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
static const detail::AudioChannelPairs pairs = {
-#define DEFINE_VOICE_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_IN_VOICE_##n, \
- media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>( \
- media::AudioChannelLayout::VOICE_##n) \
+#define DEFINE_VOICE_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_VOICE_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>( \
+ AudioChannelLayout::VOICE_##n) \
}
DEFINE_VOICE_LAYOUT(UPLINK_MONO),
DEFINE_VOICE_LAYOUT(DNLINK_MONO),
@@ -671,25 +675,25 @@
return pairs;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+ AudioFormatDescription result;
result.type = type;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
- auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+ auto result = make_AudioFormatDescription(AudioFormatType::PCM);
result.pcm = pcm;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ AudioFormatDescription result;
result.encoding = encoding;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
const std::string& encoding) {
auto result = make_AudioFormatDescription(encoding);
result.pcm = transport;
@@ -700,31 +704,30 @@
static const detail::AudioFormatPairs pairs = {{
{
AUDIO_FORMAT_INVALID,
- make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID)
+ make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)
},
{
- AUDIO_FORMAT_DEFAULT, media::AudioFormatDescription{}
+ AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}
},
{
- AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(media::PcmType::INT_16_BIT)
+ AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)
},
{
- AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(media::PcmType::UINT_8_BIT)
+ AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)
},
{
- AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(media::PcmType::INT_32_BIT)
+ AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)
},
{
- AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(media::PcmType::FIXED_Q_8_24)
+ AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)
},
{
- AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(media::PcmType::FLOAT_32_BIT)
+ AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)
},
{
- AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(media::PcmType::INT_24_BIT)
+ AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)
},
{
- // See the comment in MediaDefs.h.
AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
},
{
@@ -734,52 +737,41 @@
AUDIO_FORMAT_AMR_WB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB)
},
{
- // Note: in MediaDefs.cpp MEDIA_MIMETYPE_AUDIO_AAC = "audio/mp4a-latm".
- AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_FORMAT)
+ AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MP4)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription("audio/aac.main")
+ AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MAIN)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription("audio/aac.lc")
+ AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LC)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription("audio/aac.ssr")
+ AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SSR)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription("audio/aac.ltp")
+ AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LTP)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription("audio/aac.he.v1")
+ AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V1)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_SCALABLE, make_AudioFormatDescription("audio/aac.scalable")
+ AUDIO_FORMAT_AAC_SCALABLE,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription("audio/aac.erlc")
+ AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ERLC)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription("audio/aac.ld")
+ AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LD)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription("audio/aac.he.v2")
+ AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V2)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription("audio/aac.eld")
+ AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ELD)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription("audio/aac.xhe")
+ AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_XHE)
},
// AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
// ago.
@@ -796,7 +788,6 @@
AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3)
},
{
- // Note: not in the IANA registry.
AUDIO_FORMAT_E_AC3_JOC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3_JOC)
},
{
@@ -809,13 +800,12 @@
// nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
// specify the format of the encapsulated bitstream.
{
- // Note: not in the IANA registry.
AUDIO_FORMAT_IEC61937,
- make_AudioFormatDescription(media::PcmType::INT_16_BIT, "audio/x-iec61937")
+ make_AudioFormatDescription(PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_DOLBY_TRUEHD, make_AudioFormatDescription("audio/vnd.dolby.truehd")
+ AUDIO_FORMAT_DOLBY_TRUEHD,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD)
},
{
AUDIO_FORMAT_EVRC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRC)
@@ -830,11 +820,9 @@
AUDIO_FORMAT_EVRCNW, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCNW)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription("audio/aac.adif")
+ AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADIF)
},
{
- // Note: not in the IANA registry.
AUDIO_FORMAT_WMA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_WMA)
},
{
@@ -845,7 +833,6 @@
AUDIO_FORMAT_AMR_WB_PLUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)
},
{
- // Note: not in the IANA registry.
AUDIO_FORMAT_MP2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)
},
{
@@ -859,7 +846,6 @@
AUDIO_FORMAT_FLAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_FLAC)
},
{
- // Note: not in the IANA registry.
AUDIO_FORMAT_ALAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_ALAC)
},
{
@@ -867,52 +853,49 @@
AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")
},
{
- // Note: not in the IANA registry.
AUDIO_FORMAT_AAC_ADTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_MAIN, make_AudioFormatDescription("audio/aac-adts.main")
+ AUDIO_FORMAT_AAC_ADTS_MAIN,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription("audio/aac-adts.lc")
+ AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_SSR, make_AudioFormatDescription("audio/aac-adts.ssr")
+ AUDIO_FORMAT_AAC_ADTS_SSR,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_LTP, make_AudioFormatDescription("audio/aac-adts.ltp")
+ AUDIO_FORMAT_AAC_ADTS_LTP,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_HE_V1, make_AudioFormatDescription("audio/aac-adts.he.v1")
+ AUDIO_FORMAT_AAC_ADTS_HE_V1,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_SCALABLE, make_AudioFormatDescription("audio/aac-adts.scalable")
+ AUDIO_FORMAT_AAC_ADTS_SCALABLE,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_ERLC, make_AudioFormatDescription("audio/aac-adts.erlc")
+ AUDIO_FORMAT_AAC_ADTS_ERLC,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription("audio/aac-adts.ld")
+ AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_HE_V2, make_AudioFormatDescription("audio/aac-adts.he.v2")
+ AUDIO_FORMAT_AAC_ADTS_HE_V2,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_ELD, make_AudioFormatDescription("audio/aac-adts.eld")
+ AUDIO_FORMAT_AAC_ADTS_ELD,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_ADTS_XHE, make_AudioFormatDescription("audio/aac-adts.xhe")
+ AUDIO_FORMAT_AAC_ADTS_XHE,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE)
},
{
// Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
@@ -926,7 +909,6 @@
AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")
},
{
- // Note: not in the IANA registry. Matches MediaDefs.cpp.
AUDIO_FORMAT_AC4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC4)
},
{
@@ -934,35 +916,36 @@
AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MAT, make_AudioFormatDescription("audio/vnd.dolby.mat")
+ AUDIO_FORMAT_MAT, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT)
},
{
// Note: not in the IANA registry.
- AUDIO_FORMAT_MAT_1_0, make_AudioFormatDescription("audio/vnd.dolby.mat.1.0")
+ AUDIO_FORMAT_MAT_1_0,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".1.0"))
},
{
// Note: not in the IANA registry.
- AUDIO_FORMAT_MAT_2_0, make_AudioFormatDescription("audio/vnd.dolby.mat.2.0")
+ AUDIO_FORMAT_MAT_2_0,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.0"))
},
{
// Note: not in the IANA registry.
- AUDIO_FORMAT_MAT_2_1, make_AudioFormatDescription("audio/vnd.dolby.mat.2.1")
+ AUDIO_FORMAT_MAT_2_1,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.1"))
},
{
AUDIO_FORMAT_AAC_LATM, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription("audio/mp4a-latm.lc")
+ AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_LATM_HE_V1, make_AudioFormatDescription("audio/mp4a-latm.he.v1")
+ AUDIO_FORMAT_AAC_LATM_HE_V1,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_AAC_LATM_HE_V2, make_AudioFormatDescription("audio/mp4a-latm.he.v2")
+ AUDIO_FORMAT_AAC_LATM_HE_V2,
+ make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2)
},
{
// Note: not in the IANA registry.
@@ -989,29 +972,23 @@
AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MPEGH, make_AudioFormatDescription("audio/x-mpegh")
+ AUDIO_FORMAT_MPEGH, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription("audio/x-mpegh.bl.l3")
+ AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription("audio/x-mpegh.bl.l4")
+ AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription("audio/x-mpegh.lc.l3")
+ AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3)
},
{
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription("audio/x-mpegh.lc.l4")
+ AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4)
},
{
- // Note: not in the IANA registry.
AUDIO_FORMAT_IEC60958,
- make_AudioFormatDescription(media::PcmType::INT_24_BIT, "audio/x-iec60958")
+ make_AudioFormatDescription(PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
},
{
AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
@@ -1055,14 +1032,14 @@
} // namespace
ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- const media::AudioChannelLayout& aidl, bool isInput) {
- using ReverseMap = std::unordered_map<media::AudioChannelLayout, audio_channel_mask_t>;
- using Tag = media::AudioChannelLayout::Tag;
+ const AudioChannelLayout& aidl, bool isInput) {
+ using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
+ using Tag = AudioChannelLayout::Tag;
static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
- auto convert = [](const media::AudioChannelLayout& aidl, const ReverseMap& m,
+ auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
if (auto it = m.find(aidl); it != m.end()) {
return it->second;
@@ -1100,16 +1077,16 @@
return unexpected(BAD_VALUE);
}
-ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
audio_channel_mask_t legacy, bool isInput) {
- using DirectMap = std::unordered_map<audio_channel_mask_t, media::AudioChannelLayout>;
- using Tag = media::AudioChannelLayout::Tag;
+ using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
+ using Tag = AudioChannelLayout::Tag;
static const DirectMap mInAndVoice = make_DirectMap(
getInAudioChannelPairs(), getVoiceAudioChannelPairs());
static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
- const char* func, const char* type) -> ConversionResult<media::AudioChannelLayout> {
+ const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
if (auto it = m.find(legacy); it != m.end()) {
return it->second;
} else {
@@ -1120,9 +1097,9 @@
};
if (legacy == AUDIO_CHANNEL_NONE) {
- return media::AudioChannelLayout{};
+ return AudioChannelLayout{};
} else if (legacy == AUDIO_CHANNEL_INVALID) {
- return media::AudioChannelLayout::make<Tag::invalid>(0);
+ return AudioChannelLayout::make<Tag::invalid>(0);
}
const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
@@ -1130,7 +1107,7 @@
if (audio_channel_mask_is_valid(legacy)) {
const int indexMask = VALUE_OR_RETURN(
convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
- return media::AudioChannelLayout::make<Tag::indexMask>(indexMask);
+ return AudioChannelLayout::make<Tag::indexMask>(indexMask);
} else {
ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
return unexpected(BAD_VALUE);
@@ -1171,8 +1148,8 @@
}
ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
- const media::AudioFormatDescription& aidl) {
- static const std::unordered_map<media::AudioFormatDescription, audio_format_t> m =
+ const AudioFormatDescription& aidl) {
+ static const std::unordered_map<AudioFormatDescription, audio_format_t> m =
make_ReverseMap(getAudioFormatPairs());
if (auto it = m.find(aidl); it != m.end()) {
return it->second;
@@ -1182,9 +1159,9 @@
}
}
-ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
+ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
audio_format_t legacy) {
- static const std::unordered_map<audio_format_t, media::AudioFormatDescription> m =
+ static const std::unordered_map<audio_format_t, AudioFormatDescription> m =
make_DirectMap(getAudioFormatPairs());
if (auto it = m.find(legacy); it != m.end()) {
return it->second;
@@ -2153,6 +2130,10 @@
return AUDIO_FLAG_NO_SYSTEM_CAPTURE;
case media::AudioFlag::CAPTURE_PRIVATE:
return AUDIO_FLAG_CAPTURE_PRIVATE;
+ case media::AudioFlag::CONTENT_SPATIALIZED:
+ return AUDIO_FLAG_CONTENT_SPATIALIZED;
+ case media::AudioFlag::NEVER_SPATIALIZE:
+ return AUDIO_FLAG_NEVER_SPATIALIZE;
}
return unexpected(BAD_VALUE);
}
@@ -2190,6 +2171,10 @@
return media::AudioFlag::NO_SYSTEM_CAPTURE;
case AUDIO_FLAG_CAPTURE_PRIVATE:
return media::AudioFlag::CAPTURE_PRIVATE;
+ case AUDIO_FLAG_CONTENT_SPATIALIZED:
+ return media::AudioFlag::CONTENT_SPATIALIZED;
+ case AUDIO_FLAG_NEVER_SPATIALIZE:
+ return media::AudioFlag::NEVER_SPATIALIZE;
}
return unexpected(BAD_VALUE);
}
@@ -2711,7 +2696,7 @@
}
RETURN_IF_ERROR(
convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
- [isInput](const media::AudioChannelLayout& l) {
+ [isInput](const AudioChannelLayout& l) {
return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
}));
legacy.num_channel_masks = aidl.channelMasks.size();
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index fef6bec..0a26051 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -51,6 +51,7 @@
"PolicyAidlConversion.cpp"
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -71,6 +72,7 @@
include_dirs: ["system/media/audio_utils/include"],
export_include_dirs: ["include"],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -111,6 +113,7 @@
"TrackPlayerBase.cpp",
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -228,6 +231,7 @@
"libaudioclient_aidl_conversion_util",
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libbase",
"libbinder",
@@ -239,6 +243,7 @@
"framework-permission-aidl-cpp",
],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libbase",
"shared-file-region-aidl-cpp",
@@ -307,7 +312,6 @@
local_include_dir: "aidl",
srcs: [
"aidl/android/media/AudioAttributesInternal.aidl",
- "aidl/android/media/AudioChannelLayout.aidl",
"aidl/android/media/AudioClient.aidl",
"aidl/android/media/AudioConfig.aidl",
"aidl/android/media/AudioConfigBase.aidl",
@@ -319,8 +323,6 @@
"aidl/android/media/AudioEncapsulationMode.aidl",
"aidl/android/media/AudioEncapsulationMetadataType.aidl",
"aidl/android/media/AudioEncapsulationType.aidl",
- "aidl/android/media/AudioFormatDescription.aidl",
- "aidl/android/media/AudioFormatType.aidl",
"aidl/android/media/AudioFlag.aidl",
"aidl/android/media/AudioGain.aidl",
"aidl/android/media/AudioGainConfig.aidl",
@@ -360,7 +362,6 @@
"aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
"aidl/android/media/ExtraAudioDescriptor.aidl",
- "aidl/android/media/PcmType.aidl",
"aidl/android/media/TrackSecondaryOutputInfo.aidl",
],
imports: [
@@ -407,6 +408,7 @@
"aidl/android/media/SpatializerHeadTrackingMode.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
],
backend: {
@@ -449,6 +451,7 @@
"aidl/android/media/IAudioTrackCallback.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"av-types-aidl",
"effect-aidl",
@@ -486,6 +489,7 @@
"aidl/android/media/IAudioPolicyServiceClient.aidl",
],
imports: [
+ "android.media.audio.common.types",
"audioclient-types-aidl",
"audiopolicy-types-aidl",
"capture_state_listener-aidl",
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 65bf97d..057befd 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -51,8 +51,9 @@
namespace android {
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
+using content::AttributionSourceState;
using media::IAudioPolicyService;
-using android::content::AttributionSourceState;
+using media::audio::common::AudioFormatDescription;
// client singleton for AudioFlinger binder interface
Mutex AudioSystem::gLock;
@@ -1877,7 +1878,7 @@
media::Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::AudioFormatDescription> surroundFormatsAidl;
+ std::vector<AudioFormatDescription> surroundFormatsAidl;
std::vector<bool> surroundFormatsEnabledAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl,
@@ -1904,7 +1905,7 @@
media::Int numSurroundFormatsAidl;
numSurroundFormatsAidl.value =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*numSurroundFormats));
- std::vector<media::AudioFormatDescription> surroundFormatsAidl;
+ std::vector<AudioFormatDescription> surroundFormatsAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getReportedSurroundFormats(&numSurroundFormatsAidl, &surroundFormatsAidl)));
@@ -1920,7 +1921,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
+ AudioFormatDescription audioFormatAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_format_t_AudioFormatDescription(audioFormat));
return statusTFromBinderStatus(
aps->setSurroundFormatEnabled(audioFormatAidl, enabled));
@@ -1982,7 +1983,7 @@
& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- std::vector<media::AudioFormatDescription> formatsAidl;
+ std::vector<AudioFormatDescription> formatsAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getHwOffloadEncodingFormatsSupportedForA2DP(&formatsAidl)));
*formats = VALUE_OR_RETURN_STATUS(
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 6d2ec93..ee64894 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -30,6 +30,8 @@
using aidl_utils::statusTFromBinderStatus;
using binder::Status;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
#define MAX_ITEMS_PER_LIST 1024
@@ -252,7 +254,7 @@
audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
auto result = [&]() -> ConversionResult<audio_format_t> {
int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
- media::AudioFormatDescription aidlRet;
+ AudioFormatDescription aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
return aidl2legacy_AudioFormatDescription_audio_format_t(aidlRet);
}();
@@ -420,9 +422,9 @@
audio_channel_mask_t channelMask) const {
auto result = [&]() -> ConversionResult<size_t> {
int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
- media::AudioFormatDescription formatAidl = VALUE_OR_RETURN(
+ AudioFormatDescription formatAidl = VALUE_OR_RETURN(
legacy2aidl_audio_format_t_AudioFormatDescription(format));
- media::AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
+ AudioChannelLayout channelMaskAidl = VALUE_OR_RETURN(
legacy2aidl_audio_channel_mask_t_AudioChannelLayout(channelMask, true /*isInput*/));
int64_t aidlRet;
RETURN_IF_ERROR(statusTFromBinderStatus(
@@ -820,7 +822,7 @@
}
Status AudioFlingerServerAdapter::format(int32_t output,
- media::AudioFormatDescription* _aidl_return) {
+ AudioFormatDescription* _aidl_return) {
audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(output));
*_aidl_return = VALUE_OR_RETURN_BINDER(
@@ -948,8 +950,8 @@
}
Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
- const media::AudioFormatDescription& format,
- const media::AudioChannelLayout& channelMask,
+ const AudioFormatDescription& format,
+ const AudioChannelLayout& channelMask,
int64_t* _aidl_return) {
uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
diff --git a/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl b/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
deleted file mode 100644
index 3259105..0000000
--- a/media/libaudioclient/aidl/android/media/AudioChannelLayout.aidl
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * This structure describes a layout of a multi-channel stream.
- * There are two possible ways for representing a layout:
- *
- * - indexed mask, which tells what channels of an audio frame are used, but
- * doesn't label them in any way, thus a correspondence between channels in
- * the same position of frames originating from different streams must be
- * established externally;
- *
- * - layout mask, which gives a label to each channel, thus allowing to
- * match channels between streams of different layouts.
- *
- * Both representations are agnostic of the direction of audio transfer. Also,
- * by construction, the number of bits set to '1' in the mask indicates the
- * number of channels in the audio frame. A channel mask per se only defines the
- * presence or absence of a channel, not the order. Please see 'INTERLEAVE_*'
- * constants for the platform convention of order.
- *
- * The structure also defines a "voice mask" which is a special case of
- * layout mask, intended for processing voice audio from telecommunication
- * use cases.
- */
-union AudioChannelLayout {
- /**
- * This variant is used for representing the "null" ("none") value
- * for the channel layout. The field value must always be '0'.
- */
- int none = 0;
- /**
- * This variant is used for indicating an "invalid" layout for use by the
- * framework only. HAL implementations must not accept or emit
- * AudioChannelLayout values for this variant. The field value must always
- * be '0'.
- */
- int invalid = 0;
- /**
- * This variant is used for representing indexed masks. The mask indicates
- * what channels are used. For example, the mask that specifies to use only
- * channels 1 and 3 when interacting with a multi-channel device is defined
- * as a combination of the 1st and the 3rd bits and thus is equal to 5. See
- * also the 'INDEX_MASK_*' constants. The 'indexMask' field must have at
- * least one bit set.
- */
- int indexMask;
- /**
- * This variant is used for representing layout masks.
- * It is recommended to use one of 'LAYOUT_*' values. The 'layoutMask' field
- * must have at least one bit set.
- */
- int layoutMask;
- /**
- * This variant is used for processing of voice audio input and output.
- * It is recommended to use one of 'VOICE_*' values. The 'voiceMask' field
- * must have at least one bit set.
- */
- int voiceMask;
-
- /**
- * 'INDEX_MASK_*' constants define how many channels are used.
- * The mask constants below are 'canonical' masks. Each 'INDEX_MASK_N'
- * constant declares that all N channels are used and arranges
- * them starting from the LSB.
- */
- const int INDEX_MASK_1 = (1 << 1) - 1;
- const int INDEX_MASK_2 = (1 << 2) - 1;
- const int INDEX_MASK_3 = (1 << 3) - 1;
- const int INDEX_MASK_4 = (1 << 4) - 1;
- const int INDEX_MASK_5 = (1 << 5) - 1;
- const int INDEX_MASK_6 = (1 << 6) - 1;
- const int INDEX_MASK_7 = (1 << 7) - 1;
- const int INDEX_MASK_8 = (1 << 8) - 1;
- const int INDEX_MASK_9 = (1 << 9) - 1;
- const int INDEX_MASK_10 = (1 << 10) - 1;
- const int INDEX_MASK_11 = (1 << 11) - 1;
- const int INDEX_MASK_12 = (1 << 12) - 1;
- const int INDEX_MASK_13 = (1 << 13) - 1;
- const int INDEX_MASK_14 = (1 << 14) - 1;
- const int INDEX_MASK_15 = (1 << 15) - 1;
- const int INDEX_MASK_16 = (1 << 16) - 1;
- const int INDEX_MASK_17 = (1 << 17) - 1;
- const int INDEX_MASK_18 = (1 << 18) - 1;
- const int INDEX_MASK_19 = (1 << 19) - 1;
- const int INDEX_MASK_20 = (1 << 20) - 1;
- const int INDEX_MASK_21 = (1 << 21) - 1;
- const int INDEX_MASK_22 = (1 << 22) - 1;
- const int INDEX_MASK_23 = (1 << 23) - 1;
- const int INDEX_MASK_24 = (1 << 24) - 1;
-
- /**
- * 'LAYOUT_*' constants define channel layouts recognized by
- * the audio system. The order of the channels in the frame is assumed
- * to be from the LSB to MSB for all the bits set to '1'.
- */
- const int LAYOUT_MONO = CHANNEL_FRONT_LEFT;
- const int LAYOUT_STEREO =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT;
- const int LAYOUT_2POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_TRI =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER;
- const int LAYOUT_TRI_BACK =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_BACK_CENTER;
- const int LAYOUT_3POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT | CHANNEL_FRONT_CENTER |
- CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_2POINT0POINT2 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_2POINT1POINT2 =
- LAYOUT_2POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_3POINT0POINT2 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_3POINT1POINT2 =
- LAYOUT_3POINT0POINT2 | CHANNEL_LOW_FREQUENCY;
- const int LAYOUT_QUAD =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
- const int LAYOUT_QUAD_SIDE =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
- const int LAYOUT_SURROUND =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
- const int LAYOUT_PENTA = LAYOUT_QUAD | CHANNEL_FRONT_CENTER;
- const int LAYOUT_5POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
- CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT;
- const int LAYOUT_5POINT1_SIDE =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
- const int LAYOUT_5POINT1POINT2 = LAYOUT_5POINT1 |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_5POINT1POINT4 = LAYOUT_5POINT1 |
- CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
- CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
- const int LAYOUT_6POINT1 =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER | CHANNEL_LOW_FREQUENCY |
- CHANNEL_BACK_LEFT | CHANNEL_BACK_RIGHT | CHANNEL_BACK_CENTER;
- const int LAYOUT_7POINT1 = LAYOUT_5POINT1 |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT;
- const int LAYOUT_7POINT1POINT2 = LAYOUT_7POINT1 |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_7POINT1POINT4 = LAYOUT_7POINT1 |
- CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
- CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT;
- const int LAYOUT_9POINT1POINT4 = LAYOUT_7POINT1POINT4 |
- CHANNEL_FRONT_WIDE_LEFT | CHANNEL_FRONT_WIDE_RIGHT;
- const int LAYOUT_9POINT1POINT6 = LAYOUT_9POINT1POINT4 |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT;
- const int LAYOUT_13POINT_360RA =
- CHANNEL_FRONT_LEFT | CHANNEL_FRONT_RIGHT |
- CHANNEL_FRONT_CENTER |
- CHANNEL_SIDE_LEFT | CHANNEL_SIDE_RIGHT |
- CHANNEL_TOP_FRONT_LEFT | CHANNEL_TOP_FRONT_RIGHT |
- CHANNEL_TOP_FRONT_CENTER |
- CHANNEL_TOP_BACK_LEFT | CHANNEL_TOP_BACK_RIGHT |
- CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
- CHANNEL_BOTTOM_FRONT_CENTER;
- const int LAYOUT_22POINT2 = LAYOUT_7POINT1POINT4 |
- CHANNEL_FRONT_LEFT_OF_CENTER | CHANNEL_FRONT_RIGHT_OF_CENTER |
- CHANNEL_BACK_CENTER | CHANNEL_TOP_CENTER |
- CHANNEL_TOP_FRONT_CENTER | CHANNEL_TOP_BACK_CENTER |
- CHANNEL_TOP_SIDE_LEFT | CHANNEL_TOP_SIDE_RIGHT |
- CHANNEL_BOTTOM_FRONT_LEFT | CHANNEL_BOTTOM_FRONT_RIGHT |
- CHANNEL_BOTTOM_FRONT_CENTER |
- CHANNEL_LOW_FREQUENCY_2;
- const int LAYOUT_MONO_HAPTIC_A =
- LAYOUT_MONO | CHANNEL_HAPTIC_A;
- const int LAYOUT_STEREO_HAPTIC_A =
- LAYOUT_STEREO | CHANNEL_HAPTIC_A;
- const int LAYOUT_HAPTIC_AB =
- CHANNEL_HAPTIC_A | CHANNEL_HAPTIC_B;
- const int LAYOUT_MONO_HAPTIC_AB =
- LAYOUT_MONO | LAYOUT_HAPTIC_AB;
- const int LAYOUT_STEREO_HAPTIC_AB =
- LAYOUT_STEREO | LAYOUT_HAPTIC_AB;
- const int LAYOUT_FRONT_BACK =
- CHANNEL_FRONT_CENTER | CHANNEL_BACK_CENTER;
-
- /**
- * Expresses the convention when stereo audio samples are stored interleaved
- * in an array. This should improve readability by allowing code to use
- * symbolic indices instead of hard-coded [0] and [1].
- *
- * For multi-channel beyond stereo, the platform convention is that channels
- * are interleaved in order from least significant channel mask bit to most
- * significant channel mask bit, with unused bits skipped. Any exceptions
- * to this convention will be noted at the appropriate API.
- */
- const int INTERLEAVE_LEFT = 0;
- const int INTERLEAVE_RIGHT = 1;
-
- /**
- * 'CHANNEL_*' constants are used to build 'LAYOUT_*' masks. Each constant
- * must have exactly one bit set. The values do not match
- * 'android.media.AudioFormat.CHANNEL_OUT_*' constants from the SDK
- * for better efficiency in masks processing.
- */
- const int CHANNEL_FRONT_LEFT = 1 << 0;
- const int CHANNEL_FRONT_RIGHT = 1 << 1;
- const int CHANNEL_FRONT_CENTER = 1 << 2;
- const int CHANNEL_LOW_FREQUENCY = 1 << 3;
- const int CHANNEL_BACK_LEFT = 1 << 4;
- const int CHANNEL_BACK_RIGHT = 1 << 5;
- const int CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6;
- const int CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7;
- const int CHANNEL_BACK_CENTER = 1 << 8;
- const int CHANNEL_SIDE_LEFT = 1 << 9;
- const int CHANNEL_SIDE_RIGHT = 1 << 10;
- const int CHANNEL_TOP_CENTER = 1 << 11;
- const int CHANNEL_TOP_FRONT_LEFT = 1 << 12;
- const int CHANNEL_TOP_FRONT_CENTER = 1 << 13;
- const int CHANNEL_TOP_FRONT_RIGHT = 1 << 14;
- const int CHANNEL_TOP_BACK_LEFT = 1 << 15;
- const int CHANNEL_TOP_BACK_CENTER = 1 << 16;
- const int CHANNEL_TOP_BACK_RIGHT = 1 << 17;
- const int CHANNEL_TOP_SIDE_LEFT = 1 << 18;
- const int CHANNEL_TOP_SIDE_RIGHT = 1 << 19;
- const int CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20;
- const int CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21;
- const int CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22;
- const int CHANNEL_LOW_FREQUENCY_2 = 1 << 23;
- const int CHANNEL_FRONT_WIDE_LEFT = 1 << 24;
- const int CHANNEL_FRONT_WIDE_RIGHT = 1 << 25;
- /**
- * Haptic channels are not part of multichannel standards, however they
- * enhance user experience when playing so they are packed together with the
- * channels of the program. To avoid collision with positional channels the
- * values for haptic channels start at the MSB of an integer (after the sign
- * bit) and move down to LSB.
- */
- const int CHANNEL_HAPTIC_B = 1 << 29;
- const int CHANNEL_HAPTIC_A = 1 << 30;
-
- /**
- * 'VOICE_*' constants define layouts for voice audio. The order of the
- * channels in the frame is assumed to be from the LSB to MSB for all the
- * bits set to '1'.
- */
- const int VOICE_UPLINK_MONO = CHANNEL_VOICE_UPLINK;
- const int VOICE_DNLINK_MONO = CHANNEL_VOICE_DNLINK;
- const int VOICE_CALL_MONO = CHANNEL_VOICE_UPLINK | CHANNEL_VOICE_DNLINK;
-
- /**
- * 'CHANNEL_VOICE_*' constants are used to build 'VOICE_*' masks. Each
- * constant must have exactly one bit set. Use the same values as
- * 'android.media.AudioFormat.CHANNEL_IN_VOICE_*' constants from the SDK.
- */
- const int CHANNEL_VOICE_UPLINK = 0x4000;
- const int CHANNEL_VOICE_DNLINK = 0x8000;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfig.aidl b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
index 6996d42..aea7a34 100644
--- a/media/libaudioclient/aidl/android/media/AudioConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioConfig.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
import android.media.AudioOffloadInfo;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
index e84161b..54b1780 100644
--- a/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioConfigBase.aidl
@@ -16,8 +16,8 @@
package android.media;
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 58b493b..91361fb 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -34,4 +34,6 @@
MUTE_HAPTIC = 11,
NO_SYSTEM_CAPTURE = 12,
CAPTURE_PRIVATE = 13,
+ CONTENT_SPATIALIZED = 14,
+ NEVER_SPATIALIZE = 15,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl b/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
deleted file mode 100644
index a656348..0000000
--- a/media/libaudioclient/aidl/android/media/AudioFormatDescription.aidl
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.AudioFormatType;
-import android.media.PcmType;
-
-/**
- * An extensible type for specifying audio formats. All formats are largely
- * divided into two classes: PCM and non-PCM (bitstreams). Bitstreams can
- * be encapsulated into PCM streams.
- *
- * The type defined in a way to make each format uniquely identifiable, so
- * that if the framework and the HAL construct a value for the same type
- * (e.g. PCM 16 bit), they will produce identical parcelables which will have
- * identical hashes. This makes possible deduplicating type descriptions
- * by the framework when they are received from different HAL modules without
- * relying on having some centralized registry of enumeration values.
- *
- * {@hide}
- */
-parcelable AudioFormatDescription {
- /**
- * The type of the audio format. See the 'AudioFormatType' for the
- * list of supported values.
- */
- AudioFormatType type = AudioFormatType.DEFAULT;
- /**
- * The type of the PCM stream or the transport stream for PCM
- * encapsulations. See 'PcmType' for the list of supported values.
- */
- PcmType pcm = PcmType.DEFAULT;
- /**
- * Optional encoding specification. Must be left empty when:
- *
- * - 'type == DEFAULT && pcm == DEFAULT' -- that means "default" type;
- * - 'type == PCM' -- that means a regular PCM stream (not an encapsulation
- * of an encoded bitstream).
- *
- * For PCM encapsulations of encoded bitstreams (e.g. an encapsulation
- * according to IEC-61937 standard), the value of the 'pcm' field must
- * be set accordingly, as an example, PCM_INT_16_BIT must be used for
- * IEC-61937. Note that 'type == NON_PCM' in this case.
- *
- * Encoding names mostly follow IANA standards for media types (MIME), and
- * frameworks/av/media/libstagefright/foundation/MediaDefs.cpp with the
- * latter having priority. Since there are still many audio types not found
- * in any of these lists, the following rules are applied:
- *
- * - If there is a direct MIME type for the encoding, the MIME type name
- * is used as is, e.g. "audio/eac3" for the EAC-3 format.
- * - If the encoding is a "subformat" of a MIME-registered format,
- * the latter is augmented with a suffix, e.g. "audio/eac3-joc" for the
- * JOC extension of EAC-3.
- * - If it's a proprietary format, a "vnd." prefix is added, similar to
- * IANA rules, e.g. "audio/vnd.dolby.truehd".
- * - Otherwise, "x-" prefix is added, e.g. "audio/x-iec61937".
- * - All MIME types not found in the IANA formats list have an associated
- * comment.
- *
- * For PCM encapsulations with a known bitstream format, the latter
- * is added to the encapsulation encoding as a suffix, after a "+" char.
- * For example, an IEC61937 encapsulation of AC3 has the following
- * representation:
- * type = NON_PCM,
- * pcm = PcmType.INT_16_BIT,
- * encoding = "audio/x-iec61937+audio/ac3"
- */
- @utf8InCpp String encoding;
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioFormatType.aidl b/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
deleted file mode 100644
index 31ed2be..0000000
--- a/media/libaudioclient/aidl/android/media/AudioFormatType.aidl
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The type of the audio format. Only used as part of 'AudioFormatDescription'
- * structure.
- */
-@Backing(type="byte")
-enum AudioFormatType {
- /**
- * "Default" type is used when the client does not care about the actual
- * format. All fields of 'AudioFormatDescription' must have default / empty
- * / null values.
- */
- DEFAULT = 0,
- /**
- * When the 'encoding' field of 'AudioFormatDescription' is not empty, it
- * specifies the codec used for bitstream (non-PCM) data. It is also used
- * in the case when the bitstream data is encapsulated into a PCM stream,
- * see the documentation for 'AudioFormatDescription'.
- */
- NON_PCM = DEFAULT,
- /**
- * PCM type. The 'pcm' field of 'AudioFormatDescription' is used to specify
- * the actual sample size and representation.
- */
- PCM = 1,
- /**
- * Value reserved for system use only. HALs must never return this value to
- * the system or accept it from the system.
- */
- SYS_RESERVED_INVALID = -1,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioGain.aidl b/media/libaudioclient/aidl/android/media/AudioGain.aidl
index 4cfa96e..ff85b50 100644
--- a/media/libaudioclient/aidl/android/media/AudioGain.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGain.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioChannelLayout;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
index afa3aca..f60c461 100644
--- a/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioGainConfig.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioChannelLayout;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index efdf99b..b01f902 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioChannelLayout;
-import android.media.AudioFormatDescription;
import android.media.AudioPatch;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
index be32a69..3908cb1 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
@@ -16,14 +16,14 @@
package android.media;
-import android.media.AudioChannelLayout;
import android.media.AudioGainConfig;
import android.media.AudioIoFlags;
import android.media.AudioPortConfigExt;
import android.media.AudioPortConfigType;
import android.media.AudioPortRole;
import android.media.AudioPortType;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/AudioProfile.aidl b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
index 9fb8d49..3e234de 100644
--- a/media/libaudioclient/aidl/android/media/AudioProfile.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioProfile.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioChannelLayout;
import android.media.AudioEncapsulationType;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index 16f70c1..7b02a9d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,7 +16,6 @@
package android.media;
-import android.media.AudioChannelLayout;
import android.media.AudioMode;
import android.media.AudioPatch;
import android.media.AudioPort;
@@ -42,7 +41,8 @@
import android.media.MicrophoneInfoData;
import android.media.RenderPosition;
import android.media.TrackSecondaryOutputInfo;
-import android.media.AudioFormatDescription;
+import android.media.audio.common.AudioChannelLayout;
+import android.media.audio.common.AudioFormatDescription;
/**
* {@hide}
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index 7022b9d..74bfa05 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -16,7 +16,6 @@
package android.media;
-import android.media.AudioFormatDescription;
import android.content.AttributionSourceState;
import android.media.AudioAttributesEx;
@@ -53,6 +52,7 @@
import android.media.INativeSpatializerCallback;
import android.media.Int;
import android.media.SoundTriggerSession;
+import android.media.audio.common.AudioFormatDescription;
/**
* IAudioPolicyService interface (see AudioPolicyInterface for method descriptions).
diff --git a/media/libaudioclient/aidl/android/media/PcmType.aidl b/media/libaudioclient/aidl/android/media/PcmType.aidl
deleted file mode 100644
index c9e327c..0000000
--- a/media/libaudioclient/aidl/android/media/PcmType.aidl
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The type of the encoding used for representing PCM samples. Only used as
- * part of 'AudioFormatDescription' structure.
- */
-@Backing(type="byte")
-enum PcmType {
- /**
- * "Default" value used when the type 'AudioFormatDescription' is "default".
- */
- DEFAULT = 0,
- /**
- * Unsigned 8-bit integer.
- */
- UINT_8_BIT = DEFAULT,
- /**
- * Signed 16-bit integer.
- */
- INT_16_BIT = 1,
- /**
- * Signed 32-bit integer.
- */
- INT_32_BIT = 2,
- /**
- * Q8.24 fixed point format.
- */
- FIXED_Q_8_24 = 3,
- /**
- * IEEE 754 32-bit floating point format.
- */
- FLOAT_32_BIT = 4,
- /**
- * Signed 24-bit integer.
- */
- INT_24_BIT = 5,
-}
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index b290aa8..969e3e6 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -46,6 +46,7 @@
],
shared_libs: [
"android.hardware.audio.common-util",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 2cf127c..7e87a4e 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -22,7 +22,6 @@
#include <system/audio.h>
#include <android/media/AudioAttributesInternal.h>
-#include <android/media/AudioChannelLayout.h>
#include <android/media/AudioClient.h>
#include <android/media/AudioConfig.h>
#include <android/media/AudioConfigBase.h>
@@ -32,7 +31,6 @@
#include <android/media/AudioEncapsulationMetadataType.h>
#include <android/media/AudioEncapsulationType.h>
#include <android/media/AudioFlag.h>
-#include <android/media/AudioFormatDescription.h>
#include <android/media/AudioGain.h>
#include <android/media/AudioGainMode.h>
#include <android/media/AudioInputFlags.h>
@@ -54,6 +52,8 @@
#include <android/media/EffectDescriptor.h>
#include <android/media/ExtraAudioDescriptor.h>
#include <android/media/TrackSecondaryOutputInfo.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
#include <android/media/SharedFileRegion.h>
#include <binder/IMemory.h>
@@ -132,9 +132,9 @@
audio_port_type_t legacy);
ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- const media::AudioChannelLayout& aidl, bool isInput);
-ConversionResult<media::AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
- audio_channel_mask_t legacy, bool isInput);
+ const android::media::audio::common::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<android::media::audio::common::AudioChannelLayout>
+legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
const media::AudioDeviceDescription& aidl);
@@ -142,9 +142,9 @@
audio_devices_t legacy);
ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
- const media::AudioFormatDescription& aidl);
-ConversionResult<media::AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
- audio_format_t legacy);
+ const media::audio::common::AudioFormatDescription& aidl);
+ConversionResult<media::audio::common::AudioFormatDescription>
+legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
ConversionResult<audio_gain_mode_t>
aidl2legacy_AudioGainMode_audio_gain_mode_t(media::AudioGainMode aidl);
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index bd341e3..b63f389 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -19,9 +19,9 @@
#include <functional>
-#include <android/media/AudioChannelLayout.h>
#include <android/media/AudioDeviceDescription.h>
-#include <android/media/AudioFormatDescription.h>
+#include <android/media/audio/common/AudioChannelLayout.h>
+#include <android/media/audio/common/AudioFormatDescription.h>
#include <binder/Parcelable.h>
#include <system/audio.h>
#include <system/audio_policy.h>
@@ -42,10 +42,11 @@
// possibility of processing types belonging to different versions of the type,
// e.g. a HAL may be using a previous version of the AIDL interface.
-template<> struct hash<android::media::AudioChannelLayout>
+template<> struct hash<android::media::audio::common::AudioChannelLayout>
{
- std::size_t operator()(const android::media::AudioChannelLayout& acl) const noexcept {
- using Tag = android::media::AudioChannelLayout::Tag;
+ std::size_t operator()(
+ const android::media::audio::common::AudioChannelLayout& acl) const noexcept {
+ using Tag = android::media::audio::common::AudioChannelLayout::Tag;
const size_t seed = std::hash<Tag>{}(acl.getTag());
switch (acl.getTag()) {
case Tag::none:
@@ -72,13 +73,14 @@
}
};
-template<> struct hash<android::media::AudioFormatDescription>
+template<> struct hash<android::media::audio::common::AudioFormatDescription>
{
- std::size_t operator()(const android::media::AudioFormatDescription& afd) const noexcept {
+ std::size_t operator()(
+ const android::media::audio::common::AudioFormatDescription& afd) const noexcept {
return hash_combine(
- std::hash<android::media::AudioFormatType>{}(afd.type),
+ std::hash<android::media::audio::common::AudioFormatType>{}(afd.type),
hash_combine(
- std::hash<android::media::PcmType>{}(afd.pcm),
+ std::hash<android::media::audio::common::PcmType>{}(afd.pcm),
std::hash<std::string>{}(afd.encoding)));
}
};
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index a2ce145..a74661a 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -570,7 +570,8 @@
Status createRecord(const media::CreateRecordRequest& request,
media::CreateRecordResponse* _aidl_return) override;
Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
- Status format(int32_t output, media::AudioFormatDescription* _aidl_return) override;
+ Status format(int32_t output,
+ media::audio::common::AudioFormatDescription* _aidl_return) override;
Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
Status latency(int32_t output, int32_t* _aidl_return) override;
Status setMasterVolume(float value) override;
@@ -592,8 +593,9 @@
Status
getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
- Status getInputBufferSize(int32_t sampleRate, const media::AudioFormatDescription& format,
- const media::AudioChannelLayout& channelMask,
+ Status getInputBufferSize(int32_t sampleRate,
+ const media::audio::common::AudioFormatDescription& format,
+ const media::audio::common::AudioChannelLayout& channelMask,
int64_t* _aidl_return) override;
Status openOutput(const media::OpenOutputRequest& request,
media::OpenOutputResponse* _aidl_return) override;
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 2279244..891293e 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -33,6 +33,7 @@
"libutils",
],
static_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
"libstagefright_foundation",
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
index 424b387..253ccac 100644
--- a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -22,38 +22,43 @@
using namespace android;
using namespace android::aidl_utils;
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::PcmType;
+
namespace {
template<typename T> size_t hash(const T& t) {
return std::hash<T>{}(t);
}
-media::AudioChannelLayout make_ACL_None() {
- return media::AudioChannelLayout{};
+AudioChannelLayout make_ACL_None() {
+ return AudioChannelLayout{};
}
-media::AudioChannelLayout make_ACL_Invalid() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::invalid>(0);
+AudioChannelLayout make_ACL_Invalid() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::invalid>(0);
}
-media::AudioChannelLayout make_ACL_Stereo() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::layoutMask>(
- media::AudioChannelLayout::LAYOUT_STEREO);
+AudioChannelLayout make_ACL_Stereo() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
+ AudioChannelLayout::LAYOUT_STEREO);
}
-media::AudioChannelLayout make_ACL_ChannelIndex2() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>(
- media::AudioChannelLayout::INDEX_MASK_2);
+AudioChannelLayout make_ACL_ChannelIndex2() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(
+ AudioChannelLayout::INDEX_MASK_2);
}
-media::AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
+AudioChannelLayout make_ACL_ChannelIndexArbitrary() {
// Use channels 1 and 3.
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::indexMask>(5);
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::indexMask>(5);
}
-media::AudioChannelLayout make_ACL_VoiceCall() {
- return media::AudioChannelLayout::make<media::AudioChannelLayout::Tag::voiceMask>(
- media::AudioChannelLayout::VOICE_CALL_MONO);
+AudioChannelLayout make_ACL_VoiceCall() {
+ return AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>(
+ AudioChannelLayout::VOICE_CALL_MONO);
}
media::AudioDeviceDescription make_AudioDeviceDescription(media::AudioDeviceType type,
@@ -86,52 +91,52 @@
media::AudioDeviceDescription::CONNECTION_BT_SCO());
}
-media::AudioFormatDescription make_AudioFormatDescription(media::AudioFormatType type) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+ AudioFormatDescription result;
result.type = type;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType pcm) {
- auto result = make_AudioFormatDescription(media::AudioFormatType::PCM);
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+ auto result = make_AudioFormatDescription(AudioFormatType::PCM);
result.pcm = pcm;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
- media::AudioFormatDescription result;
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ AudioFormatDescription result;
result.encoding = encoding;
return result;
}
-media::AudioFormatDescription make_AudioFormatDescription(media::PcmType transport,
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
const std::string& encoding) {
auto result = make_AudioFormatDescription(encoding);
result.pcm = transport;
return result;
}
-media::AudioFormatDescription make_AFD_Default() {
- return media::AudioFormatDescription{};
+AudioFormatDescription make_AFD_Default() {
+ return AudioFormatDescription{};
}
-media::AudioFormatDescription make_AFD_Invalid() {
- return make_AudioFormatDescription(media::AudioFormatType::SYS_RESERVED_INVALID);
+AudioFormatDescription make_AFD_Invalid() {
+ return make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID);
}
-media::AudioFormatDescription make_AFD_Pcm16Bit() {
- return make_AudioFormatDescription(media::PcmType::INT_16_BIT);
+AudioFormatDescription make_AFD_Pcm16Bit() {
+ return make_AudioFormatDescription(PcmType::INT_16_BIT);
}
-media::AudioFormatDescription make_AFD_Bitstream() {
+AudioFormatDescription make_AFD_Bitstream() {
return make_AudioFormatDescription("example");
}
-media::AudioFormatDescription make_AFD_Encap() {
- return make_AudioFormatDescription(media::PcmType::INT_16_BIT, "example.encap");
+AudioFormatDescription make_AFD_Encap() {
+ return make_AudioFormatDescription(PcmType::INT_16_BIT, "example.encap");
}
-media::AudioFormatDescription make_AFD_Encap_with_Enc() {
+AudioFormatDescription make_AFD_Encap_with_Enc() {
auto afd = make_AFD_Encap();
afd.encoding += "+example";
return afd;
@@ -160,7 +165,7 @@
};
TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
- verifyHashIdentity<media::AudioChannelLayout>({
+ verifyHashIdentity<AudioChannelLayout>({
make_ACL_None, make_ACL_Invalid, make_ACL_Stereo, make_ACL_ChannelIndex2,
make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
}
@@ -172,12 +177,12 @@
}
TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
- verifyHashIdentity<media::AudioFormatDescription>({
+ verifyHashIdentity<AudioFormatDescription>({
make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
make_AFD_Encap, make_AFD_Encap_with_Enc});
}
-using ChannelLayoutParam = std::tuple<media::AudioChannelLayout, bool /*isInput*/>;
+using ChannelLayoutParam = std::tuple<AudioChannelLayout, bool /*isInput*/>;
class AudioChannelLayoutRoundTripTest :
public testing::TestWithParam<ChannelLayoutParam> {};
TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
@@ -192,7 +197,7 @@
INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
AudioChannelLayoutRoundTripTest,
testing::Combine(
- testing::Values(media::AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
+ testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
make_ACL_ChannelIndex2(), make_ACL_ChannelIndexArbitrary()),
testing::Values(false, true)));
INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
@@ -216,7 +221,7 @@
make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
class AudioFormatDescriptionRoundTripTest :
- public testing::TestWithParam<media::AudioFormatDescription> {};
+ public testing::TestWithParam<AudioFormatDescription> {};
TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
const auto initial = GetParam();
auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
@@ -227,4 +232,4 @@
}
INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
AudioFormatDescriptionRoundTripTest,
- testing::Values(make_AFD_Invalid(), media::AudioFormatDescription{}, make_AFD_Pcm16Bit()));
+ testing::Values(make_AFD_Invalid(), AudioFormatDescription{}, make_AFD_Pcm16Bit()));
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 3bef55b..727b86f 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -24,9 +24,11 @@
"libmedia_helper_headers",
],
static_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
],
export_static_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
],
host_supported: true,
@@ -52,6 +54,7 @@
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
"libaudioutils",
@@ -63,6 +66,7 @@
],
export_shared_lib_headers: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
],
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 47b2d54..2d44f4a 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -27,6 +27,8 @@
namespace android {
+using media::audio::common::AudioChannelLayout;
+
bool operator == (const AudioProfile &left, const AudioProfile &right)
{
return (left.getFormat() == right.getFormat()) &&
@@ -160,7 +162,7 @@
parcelable.name = mName;
parcelable.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(mFormat));
parcelable.channelMasks = VALUE_OR_RETURN(
- convertContainer<std::vector<media::AudioChannelLayout>>(
+ convertContainer<std::vector<AudioChannelLayout>>(
mChannelMasks,
[isInput](audio_channel_mask_t m) {
return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
@@ -184,7 +186,7 @@
aidl2legacy_AudioFormatDescription_audio_format_t(parcelable.format));
legacy->mChannelMasks = VALUE_OR_RETURN(
convertContainer<ChannelMaskSet>(parcelable.channelMasks,
- [isInput](const media::AudioChannelLayout& l) {
+ [isInput](const AudioChannelLayout& l) {
return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
}));
legacy->mSamplingRates = VALUE_OR_RETURN(
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index f3cd446..3f1fbea 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -18,6 +18,7 @@
],
static_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
diff --git a/media/libeffects/preprocessing/.clang-format b/media/libeffects/preprocessing/.clang-format
deleted file mode 120000
index f1b4f69..0000000
--- a/media/libeffects/preprocessing/.clang-format
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../build/soong/scripts/system-clang-format
\ No newline at end of file
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
index 0142d56..8e6c2ff 100644
--- a/media/libheadtracking/SensorPoseProvider.cpp
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -144,6 +144,9 @@
bool waitInitFinished() { return mInitPromise.get_future().get(); }
void threadFunc(const char* packageName) {
+ // Obtain looper.
+ mLooper = ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+
// The number 19 is arbitrary, only useful if using multiple objects on the same looper.
constexpr int kIdent = 19;
@@ -155,9 +158,6 @@
return;
}
- // Obtain looper.
- mLooper = ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
-
// Create event queue.
ASensorEventQueue* queue =
ASensorManager_createEventQueue(sensor_manager, mLooper, kIdent, nullptr, nullptr);
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 9c1b563..4a2523f 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -347,6 +347,7 @@
shared_libs: [
"android.hidl.token@1.0-utils",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"av-types-aidl-cpp",
"liblog",
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index d3a517f..97b5b95 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -50,6 +50,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_MUTE_HAPTIC),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_SYSTEM_CAPTURE),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CAPTURE_PRIVATE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CONTENT_SPATIALIZED),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NEVER_SPATIALIZE),
TERMINATOR
};
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 0ec5ad5..5c4ec17 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -60,6 +60,10 @@
const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1 = "audio/mha1";
const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1 = "audio/mhm1";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3 = "audio/mhm1.03";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4 = "audio/mhm1.04";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3 = "audio/mhm1.0d";
+const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4 = "audio/mhm1.0e";
const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
@@ -75,7 +79,47 @@
const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb+";
const char *MEDIA_MIMETYPE_AUDIO_APTX = "audio/aptx";
const char *MEDIA_MIMETYPE_AUDIO_DRA = "audio/vnd.dra";
-const char *MEDIA_MIMETYPE_AUDIO_AAC_FORMAT = "audio/aac";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT = "audio/vnd.dolby.mat";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0 = "audio/vnd.dolby.mat.1.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0 = "audio/vnd.dolby.mat.2.0";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1 = "audio/vnd.dolby.mat.2.1";
+const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD = "audio/vnd.dolby.mlp";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4 = "audio/mp4a.40";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN = "audio/mp4a.40.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LC = "audio/mp4a.40.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR = "audio/mp4a.40.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP = "audio/mp4a.40.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1 = "audio/mp4a.40.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE = "audio/mp4a.40.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC = "audio/mp4a.40.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LD = "audio/mp4a.40.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2 = "audio/mp4a.40.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD = "audio/mp4a.40.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE = "audio/mp4a.40.42";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF = "audio/aac-adif";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN = "audio/aac-adts.01";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC = "audio/aac-adts.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR = "audio/aac-adts.03";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP = "audio/aac-adts.04";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1 = "audio/aac-adts.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE = "audio/aac-adts.06";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC = "audio/aac-adts.17";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD = "audio/aac-adts.23";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2 = "audio/aac-adts.29";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD = "audio/aac-adts.39";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE = "audio/aac-adts.42";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC = "audio/mp4a-latm.02";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1 = "audio/mp4a-latm.05";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2 = "audio/mp4a-latm.29";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC61937 = "audio/x-iec61937";
+// Note: not in the IANA registry.
+const char *MEDIA_MIMETYPE_AUDIO_IEC60958 = "audio/x-iec60958";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index afa0c6d..fb8c299 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -62,6 +62,10 @@
extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4;
extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
extern const char *MEDIA_MIMETYPE_AUDIO_WMA;
@@ -77,7 +81,40 @@
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
extern const char *MEDIA_MIMETYPE_AUDIO_APTX;
extern const char *MEDIA_MIMETYPE_AUDIO_DRA;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC_FORMAT;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_1_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_0;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_MAT_2_1;
+extern const char *MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MP4;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADIF;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC61937;
+extern const char *MEDIA_MIMETYPE_AUDIO_IEC60958;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 0b0acbf..7acf735 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -29,7 +29,6 @@
],
include_dirs: [
- "frameworks/av/media/libstagefright",
"frameworks/native/include/media/openmax",
],
@@ -65,6 +64,8 @@
header_libs: [
"libbase_headers",
+ "libstagefright_headers",
+ "libstagefright_httplive_headers",
],
static_libs: [
@@ -74,3 +75,8 @@
],
}
+
+cc_library_headers {
+ name: "libstagefright_httplive_headers",
+ export_include_dirs: ["."],
+}
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 3bad015..0d7cadd 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -23,7 +23,7 @@
#include "M3UParser.h"
#include "PlaylistFetcher.h"
-#include "mpeg2ts/AnotherPacketSource.h"
+#include <AnotherPacketSource.h>
#include <cutils/properties.h>
#include <media/MediaHTTPService.h>
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 7a6d487..ceea41d 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -24,7 +24,7 @@
#include <utils/String8.h>
-#include "mpeg2ts/ATSParser.h"
+#include <ATSParser.h>
namespace android {
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index b23aa8a..907b326 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -24,9 +24,9 @@
#include "HTTPDownloader.h"
#include "LiveSession.h"
#include "M3UParser.h"
-#include "include/ID3.h"
-#include "mpeg2ts/AnotherPacketSource.h"
-#include "mpeg2ts/HlsSampleDecryptor.h"
+#include <ID3.h>
+#include <AnotherPacketSource.h>
+#include <HlsSampleDecryptor.h>
#include <datasource/DataURISource.h>
#include <media/stagefright/foundation/ABitReader.h>
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 5d3f9c1..2e28164 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -21,7 +21,7 @@
#include <media/stagefright/foundation/AHandler.h>
#include <openssl/aes.h>
-#include "mpeg2ts/ATSParser.h"
+#include <ATSParser.h>
#include "LiveSession.h"
namespace android {
diff --git a/media/libstagefright/httplive/fuzzer/Android.bp b/media/libstagefright/httplive/fuzzer/Android.bp
new file mode 100644
index 0000000..14097b0
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/Android.bp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_media_libstagefright_httplive_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: [
+ "frameworks_av_media_libstagefright_httplive_license",
+ ],
+}
+
+cc_fuzz {
+ name: "httplive_fuzzer",
+ srcs: [
+ "httplive_fuzzer.cpp",
+ ],
+ static_libs: [
+ "libstagefright_httplive",
+ "libstagefright_id3",
+ "libstagefright_metadatautils",
+ "libstagefright_mpeg2support",
+ "liblog",
+ "libcutils",
+ "libdatasource",
+ "libmedia",
+ "libstagefright",
+ "libutils",
+ ],
+ header_libs: [
+ "libbase_headers",
+ "libstagefright_foundation_headers",
+ "libstagefright_headers",
+ "libstagefright_httplive_headers",
+ ],
+ shared_libs: [
+ "libcrypto",
+ "libstagefright_foundation",
+ "libhidlbase",
+ "libhidlmemory",
+ "android.hidl.allocator@1.0",
+ ],
+ corpus: ["corpus/*"],
+ dictionary: "httplive_fuzzer.dict",
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
diff --git a/media/libstagefright/httplive/fuzzer/README.md b/media/libstagefright/httplive/fuzzer/README.md
new file mode 100644
index 0000000..3a64ea4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/README.md
@@ -0,0 +1,56 @@
+# Fuzzer for libstagefright_httplive
+
+## Plugin Design Considerations
+The fuzzer plugin for libstagefright_httplive is designed based on the understanding of the library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data.Also, several .m3u8 files are hand-crafted and added to the corpus directory to increase the code coverage. This ensures more code paths are reached by the fuzzer.
+
+libstagefright_httplive supports the following parameters:
+1. Final Result (parameter name: `finalResult`)
+2. Flags (parameter name: `flags`)
+3. Time Us (parameter name: `timeUs`)
+4. Track Index (parameter name: `trackIndex`)
+5. Index (parameter name: `index`)
+6. Select (parameter name: `select`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `finalResult` | `-34` to `-1` | Value obtained from FuzzedDataProvider|
+| `flags` | `0` to `1` | Value obtained from FuzzedDataProvider|
+| `timeUs` | `0` to `10000000` | Value obtained from FuzzedDataProvider|
+| `trackIndex` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `index` | `UINT32_MIN` to `UINT32_MAX` | Value obtained from FuzzedDataProvider|
+| `select` | `True` to `False` | Value obtained from FuzzedDataProvider|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the httplive module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build httplive_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) httplive_fuzzer
+```
+#### Steps to run
+To run on device
+```
+ $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/lib /data/fuzz/$(TARGET_ARCH)/lib
+ $ adb push $ANDROID_PRODUCT_OUT/data/fuzz/$(TARGET_ARCH)/httplive_fuzzer /data/fuzz/$(TARGET_ARCH)/httplive_fuzzer
+ $ adb shell /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/httplive_fuzzer /data/fuzz/${TARGET_ARCH}/httplive_fuzzer/corpus
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libstagefright/httplive/fuzzer/corpus/crypt.key b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
new file mode 100644
index 0000000..f9d5d7f
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/crypt.key
@@ -0,0 +1,2 @@
+Û
+ÏüÐ5Ð_xïHÎ3
diff --git a/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
new file mode 100644
index 0000000..32b0eac
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/encrypted.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-ALLOW-CACHE:YES
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-VERSION:3
+#EXT-X-MEDIA-SEQUENCE:1
+#EXT-X-KEY:METHOD=AES-128,URI="../../fuzz/arm64/httplive_fuzzer/corpus/crypt.key"
+#EXTINF:10.000,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5.092,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
new file mode 100644
index 0000000..9338e04
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/hls.m3u8
@@ -0,0 +1,8 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:10
+#EXT-X-MEDIA-SEQUENCE:0
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10, no desc
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
new file mode 100644
index 0000000..e1eff58
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index1.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
new file mode 100644
index 0000000..37a0189
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index2.m3u8
@@ -0,0 +1,6 @@
+#EXTM3U
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=165340,RESOLUTION=256x144,CODECS="mp4a.40.5,avc1.42c00b"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index.m3u8
+#EXT-X-STREAM-INF:CLOSED-CAPTIONS=NONE,BANDWIDTH=344388,RESOLUTION=426x240,CODECS="mp4a.40.5,avc1.4d4015"
+https://non.existentsite.com/test-doesnt-dereference-these-paths/prog_index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
new file mode 100644
index 0000000..1b7f489
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index3.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
new file mode 100644
index 0000000..89ba37c
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index4.m3u8
@@ -0,0 +1,15 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0X99b74007b6254e4bd1c6e03631cad15b
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence0.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
new file mode 100644
index 0000000..2120de4
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index5.m3u8
@@ -0,0 +1,14 @@
+#EXTM3U
+#EXT-X-TARGETDURATION:11
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-MEDIA-SEQUENCE:0
+#EXT-X-VERSION:4
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:10@0
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:20@10
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:10.0,
+#EXT-X-BYTERANGE:80
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
new file mode 100644
index 0000000..588368a
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index6.m3u8
@@ -0,0 +1,12 @@
+#EXTM3U
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=AES-128,URI="data:text/plain;charset=utf-8,a4cd9995a1aa91e1",IV=0x30303030303030303030303030303030
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence2.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence3.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
new file mode 100644
index 0000000..b09948e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index7.m3u8
@@ -0,0 +1,46 @@
+#EXTM3U
+#EXT-X-VERSION:4
+## Created with Unified Streaming Platform (version=1.11.3-24438)
+#EXT-X-SESSION-KEY:METHOD=AES-128,URI="https://demo.unified-streaming.com/video/tears-of-steel/aes.key"
+
+# AUDIO groups
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-64",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio-aacl-128",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,CHANNELS="2"
+
+# SUBTITLES groups
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="en",NAME="English",DEFAULT=YES,AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_eng=1000.m3u8"
+#EXT-X-MEDIA:TYPE=SUBTITLES,GROUP-ID="textstream",LANGUAGE="ru",NAME="Russian",AUTOSELECT=YES,URI="tears-of-steel-aes-textstream_rus=1000.m3u8"
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=494000,CODECS="mp4a.40.2,avc1.42C00D",RESOLUTION=224x100,FRAME-RATE=24,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng=401000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=933000,CODECS="mp4a.40.2,avc1.42C016",RESOLUTION=448x200,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=751000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1198000,CODECS="mp4a.40.2,avc1.4D401F",RESOLUTION=784x350,FRAME-RATE=24,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1001000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=1501000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2469000,CODECS="mp4a.40.2,avc1.640028",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng=2200000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=1025000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=1680x750,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-64",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=64008-video_eng_1=902000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1368000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=2576x1150,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1161000.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1815000,CODECS="mp4a.40.2,hvc1.1.6.L150.90",RESOLUTION=3360x1500,FRAME-RATE=24,VIDEO-RANGE=SDR,AUDIO="audio-aacl-128",SUBTITLES="textstream",CLOSED-CAPTIONS=NONE
+tears-of-steel-aes-audio_eng=128002-video_eng_1=1583000.m3u8
+
+# variants
+#EXT-X-STREAM-INF:BANDWIDTH=69000,CODECS="mp4a.40.2",AUDIO="audio-aacl-64",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=64008.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=137000,CODECS="mp4a.40.2",AUDIO="audio-aacl-128",SUBTITLES="textstream"
+tears-of-steel-aes-audio_eng=128002.m3u8
+
+# keyframes
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=54000,CODECS="avc1.42C00D",RESOLUTION=224x100,URI="keyframes/tears-of-steel-aes-video_eng=401000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=100000,CODECS="avc1.42C016",RESOLUTION=448x200,URI="keyframes/tears-of-steel-aes-video_eng=751000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=133000,CODECS="avc1.4D401F",RESOLUTION=784x350,URI="keyframes/tears-of-steel-aes-video_eng=1001000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=120000,CODECS="hvc1.1.6.L150.90",RESOLUTION=1680x750,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=902000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=154000,CODECS="hvc1.1.6.L150.90",RESOLUTION=2576x1150,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1161000.m3u8"
+#EXT-X-I-FRAME-STREAM-INF:BANDWIDTH=210000,CODECS="hvc1.1.6.L150.90",RESOLUTION=3360x1500,VIDEO-RANGE=SDR,URI="keyframes/tears-of-steel-aes-video_eng_1=1583000.m3u8"
diff --git a/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
new file mode 100644
index 0000000..353d589
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/index8.m3u8
@@ -0,0 +1,13 @@
+#EXTM3U
+#EXT-X-VERSION:5
+
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="English stereo",LANGUAGE="en",AUTOSELECT=YES,URI="../../fuzz/arm64/httplive_fuzzer/index1.m3u8"
+
+#EXT-X-STREAM-INF:BANDWIDTH=628000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=320x180,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=928000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=480x270,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index2.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=1728000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=640x360,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index3.m3u8
+#EXT-X-STREAM-INF:BANDWIDTH=2528000,CODECS="avc1.42c00d,mp4a.40.2",RESOLUTION=960x540,AUDIO="audio"
+../../fuzz/arm64/httplive_fuzzer/index1.m3u8
diff --git a/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8 b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
new file mode 100644
index 0000000..eb88422
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/corpus/prog_index.m3u8
@@ -0,0 +1,17 @@
+#EXTM3U
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="eng",NAME="English",AUTOSELECT=YES,DEFAULT=YES,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="fre",NAME="Français",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",LANGUAGE="sp",NAME="Espanol",AUTOSELECT=YES,DEFAULT=NO,URI="corpus/index1.m3u8"
+#EXT-X-VERSION:4
+#EXT-X-TARGETDURATION:5
+#EXT-X-KEY:METHOD=NONE
+#EXT-X-DISCONTINUITY-SEQUENCE:0
+#EXT-X-PLAYLIST-TYPE:VOD
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-DISCONTINUITY
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXTINF:5,
+https://non.existentsite.com/test-doesnt-dereference-these-paths/fileSequence1.ts
+#EXT-X-ENDLIST
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
new file mode 100644
index 0000000..348d526
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <LiveDataSource.h>
+#include <LiveSession.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+#include <media/mediaplayer_common.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/ALooperRoster.h>
+#include <string>
+#include <utils/Log.h>
+
+using namespace std;
+using namespace android;
+
+constexpr char kFileNamePrefix[] = "/data/local/tmp/httplive-";
+constexpr char kFileNameSuffix[] = ".m3u8";
+constexpr char kFileUrlPrefix[] = "file://";
+constexpr int64_t kOffSet = 0;
+constexpr int32_t kReadyMarkMs = 5000;
+constexpr int32_t kPrepareMarkMs = 1500;
+constexpr int32_t kErrorNoMax = -1;
+constexpr int32_t kErrorNoMin = -34;
+constexpr int32_t kMaxTimeUs = 1000;
+constexpr int32_t kRandomStringLength = 64;
+constexpr int32_t kRangeMin = 0;
+constexpr int32_t kRangeMax = 1000;
+
+constexpr LiveSession::StreamType kValidStreamType[] = {
+ LiveSession::STREAMTYPE_AUDIO, LiveSession::STREAMTYPE_VIDEO,
+ LiveSession::STREAMTYPE_SUBTITLES, LiveSession::STREAMTYPE_METADATA};
+
+constexpr MediaSource::ReadOptions::SeekMode kValidSeekMode[] = {
+ MediaSource::ReadOptions::SeekMode::SEEK_PREVIOUS_SYNC,
+ MediaSource::ReadOptions::SeekMode::SEEK_NEXT_SYNC,
+ MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST_SYNC,
+ MediaSource::ReadOptions::SeekMode::SEEK_CLOSEST,
+ MediaSource::ReadOptions::SeekMode::SEEK_FRAME_INDEX};
+
+constexpr media_track_type kValidMediaTrackType[] = {
+ MEDIA_TRACK_TYPE_UNKNOWN, MEDIA_TRACK_TYPE_VIDEO,
+ MEDIA_TRACK_TYPE_AUDIO, MEDIA_TRACK_TYPE_TIMEDTEXT,
+ MEDIA_TRACK_TYPE_SUBTITLE, MEDIA_TRACK_TYPE_METADATA};
+
+struct TestAHandler : public AHandler {
+public:
+ TestAHandler(std::function<void()> signalEosFunction)
+ : mSignalEosFunction(signalEosFunction) {}
+ virtual ~TestAHandler() {}
+
+protected:
+ void onMessageReceived(const sp<AMessage> &msg) override {
+ int32_t what = -1;
+ msg->findInt32("what", &what);
+ switch (what) {
+ case LiveSession::kWhatError:
+ case LiveSession::kWhatPrepared:
+ case LiveSession::kWhatPreparationFailed: {
+ mSignalEosFunction();
+ break;
+ }
+ }
+ return;
+ }
+
+private:
+ std::function<void()> mSignalEosFunction;
+};
+
+struct TestMediaHTTPConnection : public MediaHTTPConnection {
+public:
+ TestMediaHTTPConnection() {}
+ virtual ~TestMediaHTTPConnection() {}
+
+ virtual bool connect(const char * /*uri*/,
+ const KeyedVector<String8, String8> * /*headers*/) {
+ return true;
+ }
+
+ virtual void disconnect() { return; }
+
+ virtual ssize_t readAt(off64_t /*offset*/, void * /*data*/, size_t size) {
+ return size;
+ }
+
+ virtual off64_t getSize() { return 0; }
+ virtual status_t getMIMEType(String8 * /*mimeType*/) { return NO_ERROR; }
+ virtual status_t getUri(String8 * /*uri*/) { return NO_ERROR; }
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPConnection);
+};
+
+struct TestMediaHTTPService : public MediaHTTPService {
+public:
+ TestMediaHTTPService() {}
+ ~TestMediaHTTPService(){};
+
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() {
+ mediaHTTPConnection = sp<TestMediaHTTPConnection>::make();
+ return mediaHTTPConnection;
+ }
+
+private:
+ sp<TestMediaHTTPConnection> mediaHTTPConnection = nullptr;
+ DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPService);
+};
+
+class HttpLiveFuzzer {
+public:
+ void process(const uint8_t *data, size_t size);
+ void deInitLiveSession();
+ ~HttpLiveFuzzer() { deInitLiveSession(); }
+
+private:
+ void invokeLiveDataSource();
+ void createM3U8File(const uint8_t *data, size_t size);
+ void initLiveDataSource();
+ void invokeLiveSession();
+ void initLiveSession();
+ void invokeDequeueAccessUnit();
+ void invokeConnectAsync();
+ void invokeSeekTo();
+ void invokeGetConfig();
+ void signalEos();
+ string generateFileName();
+ sp<LiveDataSource> mLiveDataSource = nullptr;
+ sp<LiveSession> mLiveSession = nullptr;
+ sp<ALooper> mLiveLooper = nullptr;
+ sp<TestMediaHTTPService> httpService = nullptr;
+ sp<TestAHandler> mHandler = nullptr;
+ FuzzedDataProvider *mFDP = nullptr;
+ bool mEosReached = false;
+ std::mutex mDownloadCompleteMutex;
+ std::condition_variable mConditionalVariable;
+};
+
+string HttpLiveFuzzer::generateFileName() {
+ return kFileNamePrefix + to_string(getpid()) + kFileNameSuffix;
+}
+
+void HttpLiveFuzzer::createM3U8File(const uint8_t *data, size_t size) {
+ ofstream m3u8File;
+ string currentFileName = generateFileName();
+ m3u8File.open(currentFileName, ios::out | ios::binary);
+ m3u8File.write((char *)data, size);
+ m3u8File.close();
+}
+
+void HttpLiveFuzzer::initLiveDataSource() {
+ mLiveDataSource = sp<LiveDataSource>::make();
+}
+
+void HttpLiveFuzzer::invokeLiveDataSource() {
+ initLiveDataSource();
+ size_t size = mFDP->ConsumeIntegralInRange<size_t>(kRangeMin, kRangeMax);
+ sp<ABuffer> buffer = new ABuffer(size);
+ mLiveDataSource->queueBuffer(buffer);
+ uint8_t *data = new uint8_t[size];
+ mLiveDataSource->readAtNonBlocking(kOffSet, data, size);
+ int32_t finalResult = mFDP->ConsumeIntegralInRange(kErrorNoMin, kErrorNoMax);
+ mLiveDataSource->queueEOS(finalResult);
+ mLiveDataSource->reset();
+ mLiveDataSource->countQueuedBuffers();
+ mLiveDataSource->initCheck();
+ delete[] data;
+}
+
+void HttpLiveFuzzer::initLiveSession() {
+ ALooperRoster looperRoster;
+ mHandler =
+ sp<TestAHandler>::make(std::bind(&HttpLiveFuzzer::signalEos, this));
+ mLiveLooper = sp<ALooper>::make();
+ mLiveLooper->setName("http live");
+ mLiveLooper->start();
+ sp<AMessage> notify = sp<AMessage>::make(0, mHandler);
+ httpService = new TestMediaHTTPService();
+ uint32_t flags = mFDP->ConsumeIntegral<uint32_t>();
+ mLiveSession = sp<LiveSession>::make(notify, flags, httpService);
+ mLiveLooper->registerHandler(mLiveSession);
+ looperRoster.registerHandler(mLiveLooper, mHandler);
+}
+
+void HttpLiveFuzzer::invokeDequeueAccessUnit() {
+ LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+ sp<ABuffer> buffer;
+ mLiveSession->dequeueAccessUnit(stream, &buffer);
+}
+
+void HttpLiveFuzzer::invokeSeekTo() {
+ int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(0, kMaxTimeUs);
+ MediaSource::ReadOptions::SeekMode mode =
+ mFDP->PickValueInArray(kValidSeekMode);
+ mLiveSession->seekTo(timeUs, mode);
+}
+
+void HttpLiveFuzzer::invokeGetConfig() {
+ mLiveSession->getTrackCount();
+ size_t trackIndex = mFDP->ConsumeIntegral<size_t>();
+ mLiveSession->getTrackInfo(trackIndex);
+ media_track_type type = mFDP->PickValueInArray(kValidMediaTrackType);
+ mLiveSession->getSelectedTrack(type);
+ sp<MetaData> meta;
+ LiveSession::StreamType stream = mFDP->PickValueInArray(kValidStreamType);
+ mLiveSession->getStreamFormatMeta(stream, &meta);
+ mLiveSession->getKeyForStream(stream);
+ if (stream != LiveSession::STREAMTYPE_SUBTITLES) {
+ mLiveSession->getSourceTypeForStream(stream);
+ }
+}
+
+void HttpLiveFuzzer::invokeConnectAsync() {
+ string currentFileName = generateFileName();
+ size_t fileUrlLength =
+ strlen(currentFileName.c_str()) + strlen(kFileUrlPrefix);
+ char *url = new char[fileUrlLength + 1];
+ strcpy(url, kFileUrlPrefix);
+ strcat(url, currentFileName.c_str());
+ string str_1 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+ string str_2 = mFDP->ConsumeRandomLengthString(kRandomStringLength);
+
+ KeyedVector<String8, String8> headers;
+ headers.add(String8(str_1.c_str()), String8(str_2.c_str()));
+ mLiveSession->connectAsync(url, &headers);
+}
+
+void HttpLiveFuzzer::invokeLiveSession() {
+ initLiveSession();
+ BufferingSettings bufferingSettings;
+ bufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+ bufferingSettings.mResumePlaybackMarkMs = kReadyMarkMs;
+ mLiveSession->setBufferingSettings(bufferingSettings);
+ invokeConnectAsync();
+ std::unique_lock waitForDownloadComplete(mDownloadCompleteMutex);
+ mConditionalVariable.wait(waitForDownloadComplete,
+ [this] { return mEosReached; });
+ if (mLiveSession->isSeekable()) {
+ invokeSeekTo();
+ }
+ invokeDequeueAccessUnit();
+ size_t index = mFDP->ConsumeIntegral<size_t>();
+ bool select = mFDP->ConsumeBool();
+ mLiveSession->selectTrack(index, select);
+ mLiveSession->hasDynamicDuration();
+ int64_t firstTimeUs =
+ mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+ int64_t timeUs = mFDP->ConsumeIntegralInRange<int64_t>(kRangeMin, kRangeMax);
+ int32_t discontinuitySeq = mFDP->ConsumeIntegral<int32_t>();
+ mLiveSession->calculateMediaTimeUs(firstTimeUs, timeUs, discontinuitySeq);
+ invokeGetConfig();
+}
+
+void HttpLiveFuzzer::process(const uint8_t *data, size_t size) {
+ mFDP = new FuzzedDataProvider(data, size);
+ createM3U8File(data, size);
+ invokeLiveDataSource();
+ invokeLiveSession();
+ delete mFDP;
+}
+
+void HttpLiveFuzzer::deInitLiveSession() {
+ if (mLiveSession != nullptr) {
+ mLiveSession->disconnect();
+ mLiveLooper->unregisterHandler(mLiveSession->id());
+ mLiveLooper->stop();
+ }
+ mLiveSession.clear();
+ mLiveLooper.clear();
+}
+
+void HttpLiveFuzzer::signalEos() {
+ mEosReached = true;
+ {
+ std::lock_guard<std::mutex> waitForDownloadComplete(mDownloadCompleteMutex);
+ }
+ mConditionalVariable.notify_one();
+ return;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ HttpLiveFuzzer httpliveFuzzer;
+ httpliveFuzzer.process(data, size);
+ return 0;
+}
diff --git a/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
new file mode 100644
index 0000000..703cc7e
--- /dev/null
+++ b/media/libstagefright/httplive/fuzzer/httplive_fuzzer.dict
@@ -0,0 +1,15 @@
+#m3u8-Tags
+kw1="#EXTM3U"
+kw2="#EXT-X-VERSION:"
+kw3="#EXT-X-TARGETDURATION:"
+kw4="#EXT-X-PLAYLIST-TYPE:"
+kw5="#EXTINF:"
+kw6="#EXT-X-ENDLIST"
+kw7="#EXT-X-MEDIA-SEQUENCE:"
+kw8="#EXT-X-KEY:METHOD=NONE"
+kw9="#EXT-X-DISCONTINUITY:"
+kw10="#EXT-X-DISCONTINUITY-SEQUENCE:0"
+kw11="#EXT-X-STREAM-INF:BANDWIDTH="
+kw12="#EXT-X-STREAM-INF:CODECS="
+kw13="#EXT-X-BYTERANGE:"
+kw14="#EXT-X-MEDIA"
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index b035e5a..69ab242 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -200,8 +200,11 @@
AString tmp;
if (mData->mFormat->findString(name, &tmp)) {
String8 ret(tmp.c_str());
- mData->mStringCache.add(String8(name), ret);
- *out = ret.string();
+ ssize_t i = mData->mStringCache.add(String8(name), ret);
+ if (i < 0) {
+ return false;
+ }
+ *out = mData->mStringCache.valueAt(i).string();
return true;
}
return false;
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 22e5d1a..ea85d04 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -54,6 +54,7 @@
],
shared_libs: [
+ "android.media.audio.common.types-V1-cpp",
"audioflinger-aidl-cpp",
"audioclient-types-aidl-cpp",
"av-types-aidl-cpp",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 8a32b72..33b71c5 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -566,10 +566,12 @@
String8 result;
result.append("Clients:\n");
+ result.append(" pid heap_size\n");
for (size_t i = 0; i < mClients.size(); ++i) {
sp<Client> client = mClients.valueAt(i).promote();
if (client != 0) {
- result.appendFormat(" pid: %d\n", client->pid());
+ result.appendFormat("%6d %12zu\n", client->pid(),
+ client->heap()->getMemoryHeap()->getSize());
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 7c7f02d..123011a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -307,6 +307,8 @@
DeviceVector mDevices; /**< current devices this output is routed to */
wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
+ virtual uint32_t getRecommendedMuteDurationMs() const { return 0; }
+
protected:
const sp<PolicyAudioPort> mPolicyAudioPort;
AudioPolicyClientInterface * const mClientInterface;
@@ -415,6 +417,8 @@
*/
DeviceVector filterSupportedDevices(const DeviceVector &devices) const;
+ uint32_t getRecommendedMuteDurationMs() const override;
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index a74cefa..81828ed 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -207,6 +207,8 @@
// Number of streams currently active for this profile. This is not the number of active clients
// (AudioTrack or AudioRecord) but the number of active HAL streams.
uint32_t curActiveCount;
+ // Mute duration while changing device on this output profile.
+ uint32_t recommendedMuteDurationMs = 0;
private:
DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index f3d2326..34b5c1a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -670,6 +670,15 @@
return NO_ERROR;
}
+uint32_t SwAudioOutputDescriptor::getRecommendedMuteDurationMs() const
+{
+ if (isDuplicated()) {
+ return std::max(mOutput1->getRecommendedMuteDurationMs(),
+ mOutput2->getRecommendedMuteDurationMs());
+ }
+ return mProfile->recommendedMuteDurationMs;
+}
+
// HwAudioOutputDescriptor implementation
HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 09b614d..624ad95 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -137,6 +137,7 @@
maxOpenCount, curOpenCount);
dst->appendFormat(" - maxActiveCount: %u - curActiveCount: %u\n",
maxActiveCount, curActiveCount);
+ dst->appendFormat(" - recommendedMuteDurationMs: %u ms\n", recommendedMuteDurationMs);
}
void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 84ed656..4dfef73 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -123,6 +123,7 @@
static constexpr const char *flags = "flags";
static constexpr const char *maxOpenCount = "maxOpenCount";
static constexpr const char *maxActiveCount = "maxActiveCount";
+ static constexpr const char *recommendedMuteDurationMs = "recommendedMuteDurationMs";
};
// Children: GainTraits
@@ -496,6 +497,13 @@
if (!maxActiveCount.empty()) {
convertTo(maxActiveCount, mixPort->maxActiveCount);
}
+
+ std::string recommendedmuteDurationMsLiteral =
+ getXmlAttribute(child, Attributes::recommendedMuteDurationMs);
+ if (!recommendedmuteDurationMsLiteral.empty()) {
+ convertTo(recommendedmuteDurationMsLiteral, mixPort->recommendedMuteDurationMs);
+ }
+
// Deserialize children
AudioGainTraits::Collection gains;
status = deserializeCollection<AudioGainTraits>(child, &gains, NULL);
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
index 7000cd9..8584702 100644
--- a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -163,7 +163,9 @@
AUDIO_FLAG_BYPASS_MUTE, AUDIO_FLAG_LOW_LATENCY,
AUDIO_FLAG_DEEP_BUFFER, AUDIO_FLAG_NO_MEDIA_PROJECTION,
AUDIO_FLAG_MUTE_HAPTIC, AUDIO_FLAG_NO_SYSTEM_CAPTURE,
- AUDIO_FLAG_CAPTURE_PRIVATE};
+ AUDIO_FLAG_CAPTURE_PRIVATE, AUDIO_FLAG_CONTENT_SPATIALIZED,
+ AUDIO_FLAG_NEVER_SPATIALIZE,
+ };
std::vector<audio_policy_dev_state_t> kAudioPolicyDeviceStates = {
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 6f87bf0..87f4694 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -6294,11 +6294,18 @@
// different per device volumes
if (outputDesc->isActive() && (devices != prevDevices)) {
uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
- // temporary mute duration is conservatively set to 4 times the reported latency
- uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
+
if (muteWaitMs < tempMuteWaitMs) {
muteWaitMs = tempMuteWaitMs;
}
+
+ // If recommended duration is defined, replace temporary mute duration to avoid
+ // truncated notifications at beginning, which depends on duration of changing path in HAL.
+ // Otherwise, temporary mute duration is conservatively set to 4 times the reported latency.
+ uint32_t tempRecommendedMuteDuration = outputDesc->getRecommendedMuteDurationMs();
+ uint32_t tempMuteDurationMs = tempRecommendedMuteDuration > 0 ?
+ tempRecommendedMuteDuration : outputDesc->latency() * 4;
+
for (const auto &activeVs : outputDesc->getActiveVolumeSources()) {
// make sure that we do not start the temporary mute period too early in case of
// delayed device change
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index 2c8aaad..3723a69 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -17,6 +17,7 @@
"AudioPolicyService.cpp",
"CaptureStateNotifier.cpp",
"Spatializer.cpp",
+ "SpatializerPoseController.cpp",
],
include_dirs: [
@@ -25,6 +26,7 @@
shared_libs: [
"libactivitymanager_aidl",
+ "libandroid",
"libaudioclient",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
@@ -36,14 +38,18 @@
"libcutils",
"libeffectsconfig",
"libhardware_legacy",
+ "libheadtracking",
+ "libheadtracking-binding",
"liblog",
"libmedia_helper",
"libmediametrics",
"libmediautils",
"libpermission",
+ "libsensor",
"libsensorprivacy",
"libshmemcompat",
"libutils",
+ "android.media.audio.common.types-V1-cpp",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -76,6 +82,8 @@
export_shared_lib_headers: [
"libactivitymanager_aidl",
+ "libheadtracking",
+ "libheadtracking-binding",
"libsensorprivacy",
"framework-permission-aidl-cpp",
],
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 95c1bab..3ff927d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -44,6 +44,7 @@
using binder::Status;
using aidl_utils::binderStatusFromStatusT;
using content::AttributionSourceState;
+using media::audio::common::AudioFormatDescription;
const std::vector<audio_usage_t>& SYSTEM_USAGES = {
AUDIO_USAGE_CALL_ASSISTANT,
@@ -99,7 +100,7 @@
const media::AudioDevice& deviceAidl,
media::AudioPolicyDeviceState stateAidl,
const std::string& deviceNameAidl,
- const media::AudioFormatDescription& encodedFormatAidl) {
+ const AudioFormatDescription& encodedFormatAidl) {
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
audio_policy_dev_state_t state = VALUE_OR_RETURN_BINDER_STATUS(
@@ -149,7 +150,7 @@
Status AudioPolicyService::handleDeviceConfigChange(
const media::AudioDevice& deviceAidl,
const std::string& deviceNameAidl,
- const media::AudioFormatDescription& encodedFormatAidl) {
+ const AudioFormatDescription& encodedFormatAidl) {
audio_devices_t device = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioDeviceDescription_audio_devices_t(deviceAidl.type));
audio_format_t encodedFormat = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1827,7 +1828,7 @@
}
Status AudioPolicyService::getSurroundFormats(media::Int* count,
- std::vector<media::AudioFormatDescription>* formats,
+ std::vector<AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
@@ -1860,7 +1861,7 @@
}
Status AudioPolicyService::getReportedSurroundFormats(
- media::Int* count, std::vector<media::AudioFormatDescription>* formats) {
+ media::Int* count, std::vector<AudioFormatDescription>* formats) {
unsigned int numSurroundFormats = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
if (numSurroundFormats > MAX_ITEMS_PER_LIST) {
@@ -1887,7 +1888,7 @@
}
Status AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::AudioFormatDescription>* _aidl_return) {
+ std::vector<AudioFormatDescription>* _aidl_return) {
std::vector<audio_format_t> formats;
if (mAudioPolicyManager == NULL) {
@@ -1898,14 +1899,14 @@
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(&formats)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<media::AudioFormatDescription>>(
+ convertContainer<std::vector<AudioFormatDescription>>(
formats,
legacy2aidl_audio_format_t_AudioFormatDescription));
return Status::ok();
}
Status AudioPolicyService::setSurroundFormatEnabled(
- const media::AudioFormatDescription& audioFormatAidl, bool enabled) {
+ const AudioFormatDescription& audioFormatAidl, bool enabled) {
audio_format_t audioFormat = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioFormatDescription_audio_format_t(audioFormatAidl));
if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 97d4c00..129c757 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -49,6 +49,7 @@
namespace android {
using content::AttributionSourceState;
+using media::audio::common::AudioFormatDescription;
// ----------------------------------------------------------------------------
@@ -74,13 +75,13 @@
const media::AudioDevice& device,
media::AudioPolicyDeviceState state,
const std::string& deviceName,
- const media::AudioFormatDescription& encodedFormat) override;
+ const AudioFormatDescription& encodedFormat) override;
binder::Status getDeviceConnectionState(const media::AudioDevice& device,
media::AudioPolicyDeviceState* _aidl_return) override;
binder::Status handleDeviceConfigChange(
const media::AudioDevice& device,
const std::string& deviceName,
- const media::AudioFormatDescription& encodedFormat) override;
+ const AudioFormatDescription& encodedFormat) override;
binder::Status setPhoneState(media::AudioMode state, int32_t uid) override;
binder::Status setForceUse(media::AudioPolicyForceUse usage,
media::AudioPolicyForcedConfig config) override;
@@ -199,13 +200,13 @@
const media::AudioDeviceDescription& device,
float* _aidl_return) override;
binder::Status getSurroundFormats(media::Int* count,
- std::vector<media::AudioFormatDescription>* formats,
+ std::vector<AudioFormatDescription>* formats,
std::vector<bool>* formatsEnabled) override;
binder::Status getReportedSurroundFormats(
- media::Int* count, std::vector<media::AudioFormatDescription>* formats) override;
+ media::Int* count, std::vector<AudioFormatDescription>* formats) override;
binder::Status getHwOffloadEncodingFormatsSupportedForA2DP(
- std::vector<media::AudioFormatDescription>* _aidl_return) override;
- binder::Status setSurroundFormatEnabled(const media::AudioFormatDescription& audioFormat,
+ std::vector<AudioFormatDescription>* _aidl_return) override;
+ binder::Status setSurroundFormatEnabled(const AudioFormatDescription& audioFormat,
bool enabled) override;
binder::Status setAssistantUid(int32_t uid) override;
binder::Status setHotwordDetectionServiceUid(int32_t uid) override;
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
new file mode 100644
index 0000000..f0d7f7c
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "SpatializerPoseController.h"
+
+#define LOG_TAG "VirtualizerStageController"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+#include <utils/SystemClock.h>
+
+namespace android {
+
+using media::createHeadTrackingProcessor;
+using media::HeadTrackingMode;
+using media::HeadTrackingProcessor;
+using media::Pose3f;
+using media::SensorPoseProvider;
+using media::Twist3f;
+
+using namespace std::chrono_literals;
+
+namespace {
+
+// This is how fast, in m/s, we allow position to shift during rate-limiting.
+constexpr auto kMaxTranslationalVelocity = 2 ;
+
+// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
+constexpr auto kMaxRotationalVelocity = 4 * M_PI ;
+
+// This should be set to the typical time scale that the translation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kTranslationalDriftTimeConstant = 20s;
+
+// This should be set to the typical time scale that the rotation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kRotationalDriftTimeConstant = 20s;
+
+// This is how far into the future we predict the head pose, using linear extrapolation based on
+// twist (velocity). It should be set to a value that matches the characteristic durations of moving
+// one's head. The higher we set this, the more latency we are able to reduce, but setting this too
+// high will result in high prediction errors whenever the head accelerates (changes velocity).
+constexpr auto kPredictionDuration = 10ms;
+
+// After losing this many consecutive samples from either sensor, we would treat the measurement as
+// stale;
+constexpr auto kMaxLostSamples = 4;
+
+// Time units for system clock ticks. This is what the Sensor Framework timestamps represent and
+// what we use for pose filtering.
+using Ticks = std::chrono::nanoseconds;
+
+// How many ticks in a second.
+constexpr auto kTicksPerSecond = Ticks::period::den;
+
+} // namespace
+
+SpatializerPoseController::SpatializerPoseController(Listener* listener,
+ std::chrono::microseconds sensorPeriod,
+ std::chrono::microseconds maxUpdatePeriod)
+ : mListener(listener),
+ mSensorPeriod(sensorPeriod),
+ mPoseProvider(SensorPoseProvider::create("headtracker", this)),
+ mProcessor(createHeadTrackingProcessor(HeadTrackingProcessor::Options{
+ .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
+ .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
+ .translationalDriftTimeConstant = Ticks(kTranslationalDriftTimeConstant).count(),
+ .rotationalDriftTimeConstant = Ticks(kRotationalDriftTimeConstant).count(),
+ .freshnessTimeout = Ticks(sensorPeriod * kMaxLostSamples).count(),
+ .predictionDuration = Ticks(kPredictionDuration).count(),
+ })),
+ mThread([this, maxUpdatePeriod] {
+ while (true) {
+ {
+ std::unique_lock lock(mMutex);
+ mCondVar.wait_for(lock, maxUpdatePeriod,
+ [this] { return mShouldExit || mShouldCalculate; });
+ if (mShouldExit) {
+ ALOGV("Exiting thread");
+ return;
+ }
+ calculate_l();
+ if (!mCalculated) {
+ mCalculated = true;
+ mCondVar.notify_all();
+ }
+ mShouldCalculate = false;
+ }
+ }
+ }) {}
+
+SpatializerPoseController::~SpatializerPoseController() {
+ {
+ std::unique_lock lock(mMutex);
+ mShouldExit = true;
+ mCondVar.notify_all();
+ }
+ mThread.join();
+}
+
+void SpatializerPoseController::setHeadSensor(const ASensor* sensor) {
+ std::lock_guard lock(mMutex);
+ // Stop current sensor, if valid.
+ if (mHeadSensor != SensorPoseProvider::INVALID_HANDLE) {
+ mPoseProvider->stopSensor(mHeadSensor);
+ }
+ // Start new sensor, if valid.
+ mHeadSensor = sensor != nullptr ? mPoseProvider->startSensor(sensor, mSensorPeriod)
+ : SensorPoseProvider::INVALID_HANDLE;
+ mProcessor->recenter();
+}
+
+void SpatializerPoseController::setScreenSensor(const ASensor* sensor) {
+ std::lock_guard lock(mMutex);
+ // Stop current sensor, if valid.
+ if (mScreenSensor != SensorPoseProvider::INVALID_HANDLE) {
+ mPoseProvider->stopSensor(mScreenSensor);
+ }
+ // Start new sensor, if valid.
+ mScreenSensor = sensor != nullptr ? mPoseProvider->startSensor(sensor, mSensorPeriod)
+ : SensorPoseProvider::INVALID_HANDLE;
+ mProcessor->recenter();
+}
+
+void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setDesiredMode(mode);
+}
+
+void SpatializerPoseController::setScreenToStagePose(const Pose3f& screenToStage) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setScreenToStagePose(screenToStage);
+}
+
+void SpatializerPoseController::setDisplayOrientation(float physicalToLogicalAngle) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setDisplayOrientation(physicalToLogicalAngle);
+}
+
+void SpatializerPoseController::calculateAsync() {
+ std::lock_guard lock(mMutex);
+ mShouldCalculate = true;
+ mCondVar.notify_all();
+}
+
+void SpatializerPoseController::waitUntilCalculated() {
+ std::unique_lock lock(mMutex);
+ mCondVar.wait(lock, [this] { return mCalculated; });
+}
+
+void SpatializerPoseController::calculate_l() {
+ Pose3f headToStage;
+ HeadTrackingMode mode;
+ mProcessor->calculate(elapsedRealtimeNano());
+ headToStage = mProcessor->getHeadToStagePose();
+ mode = mProcessor->getActualMode();
+ mListener->onHeadToStagePose(headToStage);
+ if (!mActualMode.has_value() || mActualMode.value() != mode) {
+ mActualMode = mode;
+ mListener->onActualModeChange(mode);
+ }
+}
+
+void SpatializerPoseController::recenter() {
+ std::lock_guard lock(mMutex);
+ mProcessor->recenter();
+}
+
+void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
+ const std::optional<Twist3f>& twist) {
+ std::lock_guard lock(mMutex);
+ if (sensor == mHeadSensor) {
+ mProcessor->setWorldToHeadPose(timestamp, pose, twist.value_or(Twist3f()));
+ } else if (sensor == mScreenSensor) {
+ mProcessor->setWorldToScreenPose(timestamp, pose);
+ }
+}
+
+} // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
new file mode 100644
index 0000000..619dc7b
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <condition_variable>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include <media/HeadTrackingProcessor.h>
+#include <media/SensorPoseProvider.h>
+
+namespace android {
+
+/**
+ * This class encapsulates the logic for pose processing, intended for driving a spatializer effect.
+ * This includes integration with the Sensor sub-system for retrieving sensor data, doing all the
+ * necessary processing, etc.
+ *
+ * Calculations happen on a dedicated thread and published to the client via the Listener interface.
+ * A calculation may be triggered in one of two ways:
+ * - By calling calculateAsync() - calculation will be kicked off in the background.
+ * - By setting a timeout in the ctor, a calculation will be triggered after the timeout elapsed
+ * from the last calculateAsync() call.
+ *
+ * This class is thread-safe. Callbacks are invoked with the lock held, so it is illegal to call
+ * into this module from the callbacks.
+ */
+class SpatializerPoseController : private media::SensorPoseProvider::Listener {
+ public:
+ /**
+ * Listener interface for getting pose and mode updates.
+ * Methods will always be invoked from a designated thread. Calling into the parent class from
+ * within the callbacks is disallowed (will result in a deadlock).
+ */
+ class Listener {
+ public:
+ virtual ~Listener() = default;
+
+ virtual void onHeadToStagePose(const media::Pose3f&) = 0;
+ virtual void onActualModeChange(media::HeadTrackingMode) = 0;
+ };
+
+ /**
+ * Ctor.
+ * sensorPeriod determines how often to receive updates from the sensors (input rate).
+ * maxUpdatePeriod determines how often to produce an output when calculateAsync() isn't
+ * invoked.
+ */
+ SpatializerPoseController(Listener* listener, std::chrono::microseconds sensorPeriod,
+ std::chrono::microseconds maxUpdatePeriod);
+
+ /** Dtor. */
+ ~SpatializerPoseController();
+
+ /**
+ * Set the sensor that is to be used for head-tracking.
+ * nullptr can be used to disable head-tracking.
+ */
+ void setHeadSensor(const ASensor* sensor);
+
+ /**
+ * Set the sensor that is to be used for screen-tracking.
+ * nullptr can be used to disable screen-tracking.
+ */
+ void setScreenSensor(const ASensor* sensor);
+
+ /** Sets the desired head-tracking mode. */
+ void setDesiredMode(media::HeadTrackingMode mode);
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ void setScreenToStagePose(const media::Pose3f& screenToStage);
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ void setDisplayOrientation(float physicalToLogicalAngle);
+
+ /**
+ * This causes the current poses for both the head and screen to be considered "center".
+ */
+ void recenter();
+
+ /**
+ * This call triggers the recalculation of the output and the invocation of the relevant
+ * callbacks. This call is async and the callbacks will be triggered shortly after.
+ */
+ void calculateAsync();
+
+ /**
+ * Blocks until calculation and invocation of the respective callbacks has happened at least
+ * once.
+ */
+ void waitUntilCalculated();
+
+ private:
+ mutable std::mutex mMutex;
+ Listener* const mListener;
+ const std::chrono::microseconds mSensorPeriod;
+ std::unique_ptr<media::SensorPoseProvider> mPoseProvider;
+ std::unique_ptr<media::HeadTrackingProcessor> mProcessor;
+ int32_t mHeadSensor = media::SensorPoseProvider::INVALID_HANDLE;
+ int32_t mScreenSensor = media::SensorPoseProvider::INVALID_HANDLE;
+ std::optional<media::HeadTrackingMode> mActualMode;
+ std::thread mThread;
+ std::condition_variable mCondVar;
+ bool mShouldCalculate = true;
+ bool mShouldExit = false;
+ bool mCalculated = false;
+
+ void onPose(int64_t timestamp, int32_t sensor, const media::Pose3f& pose,
+ const std::optional<media::Twist3f>& twist) override;
+
+ void calculate_l();
+};
+
+} // namespace android
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 19a111d..8967fc7 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1336,7 +1336,7 @@
auto clientSp = current->getValue();
if (clientSp.get() != nullptr) { // should never be needed
if (!clientSp->canCastToApiClient(effectiveApiLevel)) {
- ALOGW("CameraService connect called from same client, but with a different"
+ ALOGW("CameraService connect called with a different"
" API level, evicting prior client...");
} else if (clientSp->getRemote() == remoteCallback) {
ALOGI("CameraService::connect X (PID %d) (second call from same"