Merge "Specify version for aidl_interface explicitly" into sc-dev
diff --git a/METADATA b/METADATA
index d97975c..1fbda08 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# *** THIS PACKAGE HAS SPECIAL LICENSING CONDITIONS. PLEASE
+# CONSULT THE OWNERS AND opensource-licensing@google.com BEFORE
+# DEPENDING ON IT IN YOUR PROJECT. ***
third_party {
- license_type: NOTICE
+ # would be NOTICE save for drm/mediadrm/plugins/clearkey/hidl/
+ license_type: BY_EXCEPTION_ONLY
}
diff --git a/apex/Android.bp b/apex/Android.bp
index bf91bf7..d8a0b91 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -15,7 +15,10 @@
apex_defaults {
name: "com.android.media-defaults",
updatable: true,
- java_libs: ["updatable-media"],
+ java_libs: [
+ "updatable-media",
+ "service-media-s",
+ ],
multilib: {
first: {
// Extractor process runs only with the primary ABI.
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index 3cf94d0..5e1e43e 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -62,7 +62,7 @@
],
cflags: [
"-fvisibility=hidden",
- "-DEXPORT=__attribute__ ((visibility (\"default\")))",
+ "-DEXPORT=__attribute__((visibility(\"default\")))",
"-Wall",
"-Wextra",
"-Werror",
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index 07176cf..6c1cf33 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -45,8 +45,6 @@
__BEGIN_DECLS
-#if __ANDROID_API__ >= 24
-
/**
* ACameraCaptureSession is an opaque type that manages frame captures of a camera device.
*
@@ -593,10 +591,6 @@
camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session)
__INTRODUCED_IN(24);
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 28
-
typedef struct ACaptureSessionOutput ACaptureSessionOutput;
/**
@@ -641,9 +635,7 @@
*/
camera_status_t ACameraCaptureSession_updateSharedOutput(ACameraCaptureSession* session,
ACaptureSessionOutput* output) __INTRODUCED_IN(28);
-#endif /* __ANDROID_API__ >= 28 */
-#if __ANDROID_API__ >= 29
/**
* The definition of final capture result callback with logical multi-camera support.
*
@@ -788,8 +780,6 @@
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId) __INTRODUCED_IN(29);
-#endif /* __ANDROID_API__ >= 29 */
-
__END_DECLS
#endif /* _NDK_CAMERA_CAPTURE_SESSION_H */
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index 1537bde..f72fe8d 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -44,8 +44,6 @@
__BEGIN_DECLS
-#if __ANDROID_API__ >= 24
-
/**
* ACameraDevice is opaque type that provides access to a camera device.
*
@@ -687,10 +685,6 @@
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session) __INTRODUCED_IN(24);
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 28
-
/**
* Create a shared ACaptureSessionOutput object.
*
@@ -782,10 +776,6 @@
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session) __INTRODUCED_IN(28);
-#endif /* __ANDROID_API__ >= 28 */
-
-#if __ANDROID_API__ >= 29
-
/**
* Create a ACaptureSessionOutput object used for streaming from a physical
* camera as part of a logical camera device.
@@ -890,8 +880,6 @@
const ACameraDevice* device,
const ACaptureSessionOutputContainer* sessionOutputContainer) __INTRODUCED_IN(29);
-#endif /* __ANDROID_API__ >= 29 */
-
__END_DECLS
#endif /* _NDK_CAMERA_DEVICE_H */
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index fc618ee..9d77eb4 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -40,8 +40,6 @@
__BEGIN_DECLS
-#if __ANDROID_API__ >= 24
-
typedef enum {
ACAMERA_OK = 0,
@@ -138,8 +136,6 @@
ACAMERA_ERROR_UNSUPPORTED_OPERATION = ACAMERA_ERROR_BASE - 14,
} camera_status_t;
-#endif /* __ANDROID_API__ >= 24 */
-
__END_DECLS
#endif /* _NDK_CAMERA_ERROR_H */
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index 0a2ee57..be32b11 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -44,8 +44,6 @@
__BEGIN_DECLS
-#if __ANDROID_API__ >= 24
-
/**
* ACameraManager is opaque type that provides access to camera service.
*
@@ -293,10 +291,6 @@
ACameraDevice_StateCallbacks* callback,
/*out*/ACameraDevice** device) __INTRODUCED_IN(24);
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 29
-
/**
* Definition of camera access permission change callback.
*
@@ -419,8 +413,6 @@
__INTRODUCED_IN(29);
#endif
-#endif /* __ANDROID_API__ >= 29 */
-
__END_DECLS
#endif /* _NDK_CAMERA_MANAGER_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index a840bd1..0d5e6c4 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -41,9 +41,7 @@
#include <sys/cdefs.h>
#ifndef __ANDROID_VNDK__
-#if __ANDROID_API__ >= 30
#include "jni.h"
-#endif /* __ANDROID_API__ >= 30 */
#endif /* __ANDROID_VNDK__ */
#include "NdkCameraError.h"
@@ -51,8 +49,6 @@
__BEGIN_DECLS
-#if __ANDROID_API__ >= 24
-
/**
* ACameraMetadata is opaque type that provides access to read-only camera metadata like camera
* characteristics (via {@link ACameraManager_getCameraCharacteristics}) or capture results (via
@@ -238,10 +234,6 @@
*/
void ACameraMetadata_free(ACameraMetadata* metadata) __INTRODUCED_IN(24);
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 29
-
/**
* Helper function to check if a camera is logical multi-camera.
*
@@ -260,10 +252,7 @@
/*out*/size_t* numPhysicalCameras, /*out*/const char* const** physicalCameraIds)
__INTRODUCED_IN(29);
-#endif /* __ANDROID_API__ >= 29 */
-
#ifndef __ANDROID_VNDK__
-#if __ANDROID_API__ >= 30
/**
* Return a {@link ACameraMetadata} that references the same data as
@@ -290,7 +279,6 @@
ACameraMetadata* ACameraMetadata_fromCameraMetadata(JNIEnv* env, jobject cameraMetadata)
__INTRODUCED_IN(30);
-#endif /* __ANDROID_API__ >= 30 */
#endif /* __ANDROID_VNDK__ */
__END_DECLS
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index c7c3dd5..a2aa529 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -40,8 +40,6 @@
__BEGIN_DECLS
-#if __ANDROID_API__ >= 24
-
typedef enum acamera_metadata_section {
ACAMERA_COLOR_CORRECTION,
ACAMERA_CONTROL,
@@ -9145,8 +9143,6 @@
-#endif /* __ANDROID_API__ >= 24 */
-
__END_DECLS
#endif /* _NDK_CAMERA_METADATA_TAGS_H */
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index d3f8826..a4dc374 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -44,8 +44,6 @@
__BEGIN_DECLS
-#if __ANDROID_API__ >= 24
-
// Container for output targets
typedef struct ACameraOutputTargets ACameraOutputTargets;
@@ -304,10 +302,6 @@
*/
void ACaptureRequest_free(ACaptureRequest* request) __INTRODUCED_IN(24);
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 28
-
/**
* Associate an arbitrary user context pointer to the {@link ACaptureRequest}
*
@@ -356,10 +350,6 @@
*/
ACaptureRequest* ACaptureRequest_copy(const ACaptureRequest* src) __INTRODUCED_IN(28);
-#endif /* __ANDROID_API__ >= 28 */
-
-#if __ANDROID_API__ >= 29
-
/**
* Get a metadata entry from input {@link ACaptureRequest} for
* a physical camera backing a logical multi-camera device.
@@ -569,8 +559,6 @@
ACaptureRequest* request, const char* physicalId, uint32_t tag,
uint32_t count, const ACameraMetadata_rational* data) __INTRODUCED_IN(29);
-#endif /* __ANDROID_API__ >= 29 */
-
__END_DECLS
#endif /* _NDK_CAPTURE_REQUEST_H */
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
index 1b8b8c1..6ac3510 100644
--- a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -109,6 +109,7 @@
}
void DrmPlugin::setPlayPolicy() {
+ android::Mutex::Autolock lock(mPlayPolicyLock);
mPlayPolicy.clear();
mPlayPolicy.add(kQueryKeyLicenseType, kStreaming);
mPlayPolicy.add(kQueryKeyPlayAllowed, kTrue);
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
index 4fa42e5..aa9b59d 100644
--- a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -262,6 +262,7 @@
void initProperties();
void setPlayPolicy();
+ android::Mutex mPlayPolicyLock;
android::KeyedVector<String8, String8> mPlayPolicy;
android::KeyedVector<String8, String8> mStringProperties;
android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index 1495703..d278633 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -119,7 +119,11 @@
return Void();
}
- if (source.offset + offset + source.size > sourceBase->getSize()) {
+ size_t totalSize = 0;
+ if (__builtin_add_overflow(source.offset, offset, &totalSize) ||
+ __builtin_add_overflow(totalSize, source.size, &totalSize) ||
+ totalSize > sourceBase->getSize()) {
+ android_errorWriteLog(0x534e4554, "176496160");
_hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "invalid buffer size");
return Void();
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index f87f830..a77759e 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -304,6 +304,7 @@
}
void DrmPlugin::setPlayPolicy() {
+ android::Mutex::Autolock lock(mPlayPolicyLock);
mPlayPolicy.clear();
KeyValue policy;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index 3de7589..076beb8 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -406,6 +406,7 @@
int64_t mCloseSessionOkCount;
int64_t mCloseSessionNotOpenedCount;
uint32_t mNextSecureStopId;
+ android::Mutex mPlayPolicyLock;
// set by property to mock error scenarios
Status_V1_2 mMockError;
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index 82c061a..b1cf388 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -30,6 +30,7 @@
namespace android {
constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
+constexpr size_t kMaxDimension = 1920;
constexpr char COMPONENT_NAME[] = "c2.android.mpeg2.decoder";
class C2SoftMpeg2Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -64,8 +65,8 @@
DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
.withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
.withFields({
- C2F(mSize, width).inRange(16, 1920, 4),
- C2F(mSize, height).inRange(16, 1088, 4),
+ C2F(mSize, width).inRange(16, kMaxDimension, 2),
+ C2F(mSize, height).inRange(16, kMaxDimension, 2),
})
.withSetter(SizeSetter)
.build());
@@ -91,8 +92,8 @@
DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
.withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 320, 240))
.withFields({
- C2F(mSize, width).inRange(2, 1920, 2),
- C2F(mSize, height).inRange(2, 1088, 2),
+ C2F(mSize, width).inRange(2, kMaxDimension, 2),
+ C2F(mSize, height).inRange(2, kMaxDimension, 2),
})
.withSetter(MaxPictureSizeSetter, mSize)
.build());
@@ -204,8 +205,8 @@
const C2P<C2StreamPictureSizeInfo::output> &size) {
(void)mayBlock;
// TODO: get max width/height from the size's field helpers vs. hardcoding
- me.set().width = c2_min(c2_max(me.v.width, size.v.width), 1920u);
- me.set().height = c2_min(c2_max(me.v.height, size.v.height), 1088u);
+ me.set().width = c2_min(c2_max(me.v.width, size.v.width), kMaxDimension);
+ me.set().height = c2_min(c2_max(me.v.height, size.v.height), kMaxDimension);
return C2R::Ok();
}
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index a7cc037..ddd312f 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -35,8 +35,10 @@
namespace android {
constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
#ifdef MPEG4
+constexpr size_t kMaxDimension = 1920;
constexpr char COMPONENT_NAME[] = "c2.android.mpeg4.decoder";
#else
+constexpr size_t kMaxDimension = 352;
constexpr char COMPONENT_NAME[] = "c2.android.h263.decoder";
#endif
@@ -75,13 +77,8 @@
DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
.withDefault(new C2StreamPictureSizeInfo::output(0u, 176, 144))
.withFields({
-#ifdef MPEG4
- C2F(mSize, width).inRange(2, 1920, 2),
- C2F(mSize, height).inRange(2, 1088, 2),
-#else
- C2F(mSize, width).inRange(2, 352, 2),
- C2F(mSize, height).inRange(2, 288, 2),
-#endif
+ C2F(mSize, width).inRange(2, kMaxDimension, 2),
+ C2F(mSize, height).inRange(2, kMaxDimension, 2),
})
.withSetter(SizeSetter)
.build());
@@ -130,19 +127,10 @@
addParameter(
DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
-#ifdef MPEG4
- .withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 1920, 1088))
-#else
.withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 352, 288))
-#endif
.withFields({
-#ifdef MPEG4
- C2F(mSize, width).inRange(2, 1920, 2),
- C2F(mSize, height).inRange(2, 1088, 2),
-#else
- C2F(mSize, width).inRange(2, 352, 2),
- C2F(mSize, height).inRange(2, 288, 2),
-#endif
+ C2F(mSize, width).inRange(2, kMaxDimension, 2),
+ C2F(mSize, height).inRange(2, kMaxDimension, 2),
})
.withSetter(MaxPictureSizeSetter, mSize)
.build());
@@ -200,13 +188,8 @@
const C2P<C2StreamPictureSizeInfo::output> &size) {
(void)mayBlock;
// TODO: get max width/height from the size's field helpers vs. hardcoding
-#ifdef MPEG4
- me.set().width = c2_min(c2_max(me.v.width, size.v.width), 1920u);
- me.set().height = c2_min(c2_max(me.v.height, size.v.height), 1088u);
-#else
- me.set().width = c2_min(c2_max(me.v.width, size.v.width), 352u);
- me.set().height = c2_min(c2_max(me.v.height, size.v.height), 288u);
-#endif
+ me.set().width = c2_min(c2_max(me.v.width, size.v.width), kMaxDimension);
+ me.set().height = c2_min(c2_max(me.v.height, size.v.height), kMaxDimension);
return C2R::Ok();
}
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 38f7389..752140a 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -151,6 +151,7 @@
/* protected content */
kParamIndexSecureMode,
+ kParamIndexEncryptedBuffer, // info-buffer, used with SM_READ_PROTECTED_WITH_ENCRYPTED
// deprecated
kParamIndexDelayRequest = kParamIndexDelay | C2Param::CoreIndex::IS_REQUEST_FLAG,
@@ -221,6 +222,7 @@
kParamIndexDrcEffectType, // drc, enum
kParamIndexDrcOutputLoudness, // drc, float (dBFS)
kParamIndexDrcAlbumMode, // drc, enum
+ kParamIndexAudioFrameSize, // int
/* ============================== platform-defined parameters ============================== */
@@ -1144,6 +1146,8 @@
C2ENUM(C2Config::secure_mode_t, uint32_t,
SM_UNPROTECTED, ///< no content protection
SM_READ_PROTECTED, ///< input and output buffers shall be protected from reading
+ /// both read protected and readable encrypted buffers are used
+ SM_READ_PROTECTED_WITH_ENCRYPTED,
)
typedef C2GlobalParam<C2Tuning, C2SimpleValueStruct<C2Config::secure_mode_t>, kParamIndexSecureMode>
@@ -1969,9 +1973,20 @@
/**
* DRC output loudness in dBFS. Retrieved during decoding
*/
- typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexDrcOutputLoudness>
+typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexDrcOutputLoudness>
C2StreamDrcOutputLoudnessTuning;
- constexpr char C2_PARAMKEY_DRC_OUTPUT_LOUDNESS[] = "output.drc.output-loudness";
+constexpr char C2_PARAMKEY_DRC_OUTPUT_LOUDNESS[] = "output.drc.output-loudness";
+
+/**
+ * Audio frame size in samples.
+ *
+ * Audio encoders can expose this parameter to signal the desired audio frame
+ * size that corresponds to a single coded access unit.
+ * Default value is 0, meaning that the encoder accepts input buffers of any size.
+ */
+typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexAudioFrameSize>
+ C2StreamAudioFrameSizeInfo;
+constexpr char C2_PARAMKEY_AUDIO_FRAME_SIZE[] = "raw.audio-frame-size";
/* --------------------------------------- AAC components --------------------------------------- */
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 1f0c856..72f7c43 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -895,13 +895,12 @@
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
- // TODO: C2InfoBuffer is not implemented.
- (void)d;
- (void)s;
- (void)bufferPoolSender;
- (void)baseBlocks;
- (void)baseBlockIndices;
- LOG(INFO) << "InfoBuffer not implemented.";
+ d->index = static_cast<ParamIndex>(s.index());
+ Buffer& dBuffer = d->buffer;
+ if (!objcpy(&dBuffer, s.data(), bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2InfoBuffer::data";
+ return false;
+ }
return true;
}
@@ -1336,6 +1335,68 @@
return true;
}
+// InfoBuffer -> C2InfoBuffer
+bool objcpy(std::vector<C2InfoBuffer> *d, const InfoBuffer& s,
+ const std::vector<C2BaseBlock>& baseBlocks) {
+
+ // Currently, a non-null C2InfoBufer must contain exactly 1 block.
+ if (s.buffer.blocks.size() == 0) {
+ return true;
+ } else if (s.buffer.blocks.size() != 1) {
+ LOG(ERROR) << "Invalid InfoBuffer::Buffer "
+ "Currently, a C2InfoBuffer must contain exactly 1 block.";
+ return false;
+ }
+
+ const Block &sBlock = s.buffer.blocks[0];
+ if (sBlock.index >= baseBlocks.size()) {
+ LOG(ERROR) << "Invalid InfoBuffer::Buffer::blocks[0].index: "
+ "Array index out of range.";
+ return false;
+ }
+ const C2BaseBlock &baseBlock = baseBlocks[sBlock.index];
+
+ // Parse meta.
+ std::vector<C2Param*> sBlockMeta;
+ if (!parseParamsBlob(&sBlockMeta, sBlock.meta)) {
+ LOG(ERROR) << "Invalid InfoBuffer::Buffer::blocks[0].meta.";
+ return false;
+ }
+
+ // Copy fence.
+ C2Fence dFence;
+ if (!objcpy(&dFence, sBlock.fence)) {
+ LOG(ERROR) << "Invalid InfoBuffer::Buffer::blocks[0].fence.";
+ return false;
+ }
+
+ // Construct a block.
+ switch (baseBlock.type) {
+ case C2BaseBlock::LINEAR:
+ if (sBlockMeta.size() == 1 && sBlockMeta[0] != nullptr &&
+ sBlockMeta[0]->size() == sizeof(C2Hidl_RangeInfo)) {
+ C2Hidl_RangeInfo *rangeInfo =
+ reinterpret_cast<C2Hidl_RangeInfo*>(sBlockMeta[0]);
+ d->emplace_back(C2InfoBuffer::CreateLinearBuffer(
+ s.index,
+ baseBlock.linear->share(
+ rangeInfo->offset, rangeInfo->length, dFence)));
+ return true;
+ }
+ LOG(ERROR) << "Invalid Meta for C2BaseBlock::Linear InfoBuffer.";
+ break;
+ case C2BaseBlock::GRAPHIC:
+ // It's not used now
+ LOG(ERROR) << "Non-Used C2BaseBlock::type for InfoBuffer.";
+ break;
+ default:
+ LOG(ERROR) << "Invalid C2BaseBlock::type for InfoBuffer.";
+ break;
+ }
+
+ return false;
+}
+
// FrameData -> C2FrameData
bool objcpy(C2FrameData* d, const FrameData& s,
const std::vector<C2BaseBlock>& baseBlocks) {
@@ -1370,8 +1431,18 @@
}
}
- // TODO: Implement this once C2InfoBuffer has constructors.
d->infoBuffers.clear();
+ if (s.infoBuffers.size() == 0) {
+ // InfoBuffer is optional
+ return true;
+ }
+ d->infoBuffers.reserve(s.infoBuffers.size());
+ for (const InfoBuffer &sInfoBuffer: s.infoBuffers) {
+ if (!objcpy(&(d->infoBuffers), sInfoBuffer, baseBlocks)) {
+ LOG(ERROR) << "Invalid Framedata::infoBuffers.";
+ return false;
+ }
+ }
return true;
}
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 94034b5..c3cfcce 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -11,6 +11,7 @@
"CCodecConfig.cpp",
"Codec2Buffer.cpp",
"Codec2InfoBuilder.cpp",
+ "FrameReassembler.cpp",
"PipelineWatcher.cpp",
"ReflectedParamUpdater.cpp",
],
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index c697b80..44ebf84 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -30,6 +30,7 @@
#include <android/hardware/cas/native/1.0/IDescrambler.h>
#include <android/hardware/drm/1.0/types.h>
+#include <android-base/properties.h>
#include <android-base/stringprintf.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryDealer.h>
@@ -143,7 +144,8 @@
mFrameIndex(0u),
mFirstValidFrameIndex(0u),
mMetaMode(MODE_NONE),
- mInputMetEos(false) {
+ mInputMetEos(false),
+ mSendEncryptedInfoBuffer(false) {
mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
{
Mutexed<Input>::Locked input(mInput);
@@ -159,6 +161,10 @@
output->outputDelay = 0u;
output->numSlots = kSmoothnessFactor;
}
+ {
+ Mutexed<BlockPools>::Locked pools(mBlockPools);
+ pools->outputPoolId = C2BlockPool::BASIC_LINEAR;
+ }
}
CCodecBufferChannel::~CCodecBufferChannel() {
@@ -188,7 +194,10 @@
return mInputSurface->signalEndOfInputStream();
}
-status_t CCodecBufferChannel::queueInputBufferInternal(sp<MediaCodecBuffer> buffer) {
+status_t CCodecBufferChannel::queueInputBufferInternal(
+ sp<MediaCodecBuffer> buffer,
+ std::shared_ptr<C2LinearBlock> encryptedBlock,
+ size_t blockSize) {
int64_t timeUs;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
@@ -209,6 +218,7 @@
flags |= C2FrameData::FLAG_CODEC_CONFIG;
}
ALOGV("[%s] queueInputBuffer: buffer->size() = %zu", mName, buffer->size());
+ std::list<std::unique_ptr<C2Work>> items;
std::unique_ptr<C2Work> work(new C2Work);
work->input.ordinal.timestamp = timeUs;
work->input.ordinal.frameIndex = mFrameIndex++;
@@ -218,9 +228,8 @@
work->input.ordinal.customOrdinal = timeUs;
work->input.buffers.clear();
- uint64_t queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
- std::vector<std::shared_ptr<C2Buffer>> queuedBuffers;
sp<Codec2Buffer> copy;
+ bool usesFrameReassembler = false;
if (buffer->size() > 0u) {
Mutexed<Input>::Locked input(mInput);
@@ -245,38 +254,48 @@
"buffer starvation on component.", mName);
}
}
- int32_t cvo = 0;
- if (buffer->meta()->findInt32("cvo", &cvo)) {
- int32_t rotation = cvo % 360;
- // change rotation to counter-clock wise.
- rotation = ((rotation <= 0) ? 0 : 360) - rotation;
- Mutexed<OutputSurface>::Locked output(mOutputSurface);
- output->rotation[queuedFrameIndex] = rotation;
+ if (input->frameReassembler) {
+ usesFrameReassembler = true;
+ input->frameReassembler.process(buffer, &items);
+ } else {
+ int32_t cvo = 0;
+ if (buffer->meta()->findInt32("cvo", &cvo)) {
+ int32_t rotation = cvo % 360;
+ // change rotation to counter-clock wise.
+ rotation = ((rotation <= 0) ? 0 : 360) - rotation;
+
+ Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ uint64_t frameIndex = work->input.ordinal.frameIndex.peeku();
+ output->rotation[frameIndex] = rotation;
+ }
+ work->input.buffers.push_back(c2buffer);
+ if (encryptedBlock) {
+ work->input.infoBuffers.emplace_back(C2InfoBuffer::CreateLinearBuffer(
+ kParamIndexEncryptedBuffer,
+ encryptedBlock->share(0, blockSize, C2Fence())));
+ }
}
- work->input.buffers.push_back(c2buffer);
- queuedBuffers.push_back(c2buffer);
} else if (eos) {
flags |= C2FrameData::FLAG_END_OF_STREAM;
}
- work->input.flags = (C2FrameData::flags_t)flags;
- // TODO: fill info's
+ if (usesFrameReassembler) {
+ if (!items.empty()) {
+ items.front()->input.configUpdate = std::move(mParamsToBeSet);
+ mFrameIndex = (items.back()->input.ordinal.frameIndex + 1).peek();
+ }
+ } else {
+ work->input.flags = (C2FrameData::flags_t)flags;
+ // TODO: fill info's
- work->input.configUpdate = std::move(mParamsToBeSet);
- work->worklets.clear();
- work->worklets.emplace_back(new C2Worklet);
+ work->input.configUpdate = std::move(mParamsToBeSet);
+ work->worklets.clear();
+ work->worklets.emplace_back(new C2Worklet);
- std::list<std::unique_ptr<C2Work>> items;
- items.push_back(std::move(work));
- mPipelineWatcher.lock()->onWorkQueued(
- queuedFrameIndex,
- std::move(queuedBuffers),
- PipelineWatcher::Clock::now());
- c2_status_t err = mComponent->queue(&items);
- if (err != C2_OK) {
- mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
+ items.push_back(std::move(work));
+
+ eos = eos && buffer->size() > 0u;
}
-
- if (err == C2_OK && eos && buffer->size() > 0u) {
+ if (eos) {
work.reset(new C2Work);
work->input.ordinal.timestamp = timeUs;
work->input.ordinal.frameIndex = mFrameIndex++;
@@ -285,23 +304,28 @@
work->input.buffers.clear();
work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
work->worklets.emplace_back(new C2Worklet);
-
- queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
- queuedBuffers.clear();
-
- items.clear();
items.push_back(std::move(work));
-
- mPipelineWatcher.lock()->onWorkQueued(
- queuedFrameIndex,
- std::move(queuedBuffers),
- PipelineWatcher::Clock::now());
- err = mComponent->queue(&items);
- if (err != C2_OK) {
- mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
- }
}
- if (err == C2_OK) {
+ c2_status_t err = C2_OK;
+ if (!items.empty()) {
+ {
+ Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+ PipelineWatcher::Clock::time_point now = PipelineWatcher::Clock::now();
+ for (const std::unique_ptr<C2Work> &work : items) {
+ watcher->onWorkQueued(
+ work->input.ordinal.frameIndex.peeku(),
+ std::vector(work->input.buffers),
+ now);
+ }
+ }
+ err = mComponent->queue(&items);
+ }
+ if (err != C2_OK) {
+ Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+ for (const std::unique_ptr<C2Work> &work : items) {
+ watcher->onWorkDone(work->input.ordinal.frameIndex.peeku());
+ }
+ } else {
Mutexed<Input>::Locked input(mInput);
bool released = false;
if (buffer) {
@@ -522,6 +546,40 @@
}
sp<EncryptedLinearBlockBuffer> encryptedBuffer((EncryptedLinearBlockBuffer *)buffer.get());
+ std::shared_ptr<C2LinearBlock> block;
+ size_t allocSize = buffer->size();
+ size_t bufferSize = 0;
+ c2_status_t blockRes = C2_OK;
+ bool copied = false;
+ if (mSendEncryptedInfoBuffer) {
+ static const C2MemoryUsage kDefaultReadWriteUsage{
+ C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+ constexpr int kAllocGranule0 = 1024 * 64;
+ constexpr int kAllocGranule1 = 1024 * 1024;
+ std::shared_ptr<C2BlockPool> pool = mBlockPools.lock()->inputPool;
+ // round up encrypted sizes to limit fragmentation and encourage buffer reuse
+ if (allocSize <= kAllocGranule1) {
+ bufferSize = align(allocSize, kAllocGranule0);
+ } else {
+ bufferSize = align(allocSize, kAllocGranule1);
+ }
+ blockRes = pool->fetchLinearBlock(
+ bufferSize, kDefaultReadWriteUsage, &block);
+
+ if (blockRes == C2_OK) {
+ C2WriteView view = block->map().get();
+ if (view.error() == C2_OK && view.size() == bufferSize) {
+ copied = true;
+ // TODO: only copy clear sections
+ memcpy(view.data(), buffer->data(), allocSize);
+ }
+ }
+ }
+
+ if (!copied) {
+ block.reset();
+ }
+
ssize_t result = -1;
ssize_t codecDataOffset = 0;
if (numSubSamples == 1
@@ -613,7 +671,8 @@
}
buffer->setRange(codecDataOffset, result - codecDataOffset);
- return queueInputBufferInternal(buffer);
+
+ return queueInputBufferInternal(buffer, block, bufferSize);
}
void CCodecBufferChannel::feedInputBufferIfAvailable() {
@@ -837,7 +896,12 @@
}
return result;
}
- ALOGV("[%s] queue buffer successful", mName);
+
+ if(android::base::GetBoolProperty("debug.stagefright.fps", false)) {
+ ALOGD("[%s] queue buffer successful", mName);
+ } else {
+ ALOGV("[%s] queue buffer successful", mName);
+ }
int64_t mediaTimeUs = 0;
(void)buffer->meta()->findInt64("timeUs", &mediaTimeUs);
@@ -898,27 +962,31 @@
bool buffersBoundToCodec) {
C2StreamBufferTypeSetting::input iStreamFormat(0u);
C2StreamBufferTypeSetting::output oStreamFormat(0u);
+ C2ComponentKindSetting kind;
C2PortReorderBufferDepthTuning::output reorderDepth;
C2PortReorderKeySetting::output reorderKey;
C2PortActualDelayTuning::input inputDelay(0);
C2PortActualDelayTuning::output outputDelay(0);
C2ActualPipelineDelayTuning pipelineDelay(0);
+ C2SecureModeTuning secureMode(C2Config::SM_UNPROTECTED);
c2_status_t err = mComponent->query(
{
&iStreamFormat,
&oStreamFormat,
+ &kind,
&reorderDepth,
&reorderKey,
&inputDelay,
&pipelineDelay,
&outputDelay,
+ &secureMode,
},
{},
C2_DONT_BLOCK,
nullptr);
if (err == C2_BAD_INDEX) {
- if (!iStreamFormat || !oStreamFormat) {
+ if (!iStreamFormat || !oStreamFormat || !kind) {
return UNKNOWN_ERROR;
}
} else if (err != C2_OK) {
@@ -935,18 +1003,26 @@
// TODO: get this from input format
bool secure = mComponent->getName().find(".secure") != std::string::npos;
+ // secure mode is a static parameter (shall not change in the executing state)
+ mSendEncryptedInfoBuffer = secureMode.value == C2Config::SM_READ_PROTECTED_WITH_ENCRYPTED;
+
std::shared_ptr<C2AllocatorStore> allocatorStore = GetCodec2PlatformAllocatorStore();
int poolMask = GetCodec2PoolMask();
C2PlatformAllocatorStore::id_t preferredLinearId = GetPreferredLinearAllocatorId(poolMask);
if (inputFormat != nullptr) {
bool graphic = (iStreamFormat.value == C2BufferData::GRAPHIC);
+ bool audioEncoder = !graphic && (kind.value == C2Component::KIND_ENCODER);
C2Config::api_feature_t apiFeatures = C2Config::api_feature_t(
API_REFLECTION |
API_VALUES |
API_CURRENT_VALUES |
API_DEPENDENCY |
API_SAME_INPUT_BUFFER);
+ C2StreamAudioFrameSizeInfo::input encoderFrameSize(0u);
+ C2StreamSampleRateInfo::input sampleRate(0u);
+ C2StreamChannelCountInfo::input channelCount(0u);
+ C2StreamPcmEncodingInfo::input pcmEncoding(0u);
std::shared_ptr<C2BlockPool> pool;
{
Mutexed<BlockPools>::Locked pools(mBlockPools);
@@ -959,7 +1035,19 @@
// from component, create the input block pool with given ID. Otherwise, use default IDs.
std::vector<std::unique_ptr<C2Param>> params;
C2ApiFeaturesSetting featuresSetting{apiFeatures};
- err = mComponent->query({ &featuresSetting },
+ std::vector<C2Param *> stackParams({&featuresSetting});
+ if (audioEncoder) {
+ stackParams.push_back(&encoderFrameSize);
+ stackParams.push_back(&sampleRate);
+ stackParams.push_back(&channelCount);
+ stackParams.push_back(&pcmEncoding);
+ } else {
+ encoderFrameSize.invalidate();
+ sampleRate.invalidate();
+ channelCount.invalidate();
+ pcmEncoding.invalidate();
+ }
+ err = mComponent->query(stackParams,
{ C2PortAllocatorsTuning::input::PARAM_TYPE },
C2_DONT_BLOCK,
¶ms);
@@ -1017,10 +1105,21 @@
input->numSlots = numInputSlots;
input->extraBuffers.flush();
input->numExtraSlots = 0u;
+ if (audioEncoder && encoderFrameSize && sampleRate && channelCount) {
+ input->frameReassembler.init(
+ pool,
+ {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+ encoderFrameSize.value,
+ sampleRate.value,
+ channelCount.value,
+ pcmEncoding ? pcmEncoding.value : C2Config::PCM_16);
+ }
bool conforming = (apiFeatures & API_SAME_INPUT_BUFFER);
// For encrypted content, framework decrypts source buffer (ashmem) into
// C2Buffers. Thus non-conforming codecs can process these.
- if (!buffersBoundToCodec && (hasCryptoOrDescrambler() || conforming)) {
+ if (!buffersBoundToCodec
+ && !input->frameReassembler
+ && (hasCryptoOrDescrambler() || conforming)) {
input->buffers.reset(new SlotInputBuffers(mName));
} else if (graphic) {
if (mInputSurface) {
@@ -1092,10 +1191,13 @@
bool graphic = (oStreamFormat.value == C2BufferData::GRAPHIC);
C2BlockPool::local_id_t outputPoolId_;
+ C2BlockPool::local_id_t prevOutputPoolId;
{
Mutexed<BlockPools>::Locked pools(mBlockPools);
+ prevOutputPoolId = pools->outputPoolId;
+
// set default allocator ID.
pools->outputAllocatorId = (graphic) ? C2PlatformAllocatorStore::GRALLOC
: preferredLinearId;
@@ -1189,6 +1291,15 @@
outputPoolId_ = pools->outputPoolId;
}
+ if (prevOutputPoolId != C2BlockPool::BASIC_LINEAR
+ && prevOutputPoolId != C2BlockPool::BASIC_GRAPHIC) {
+ c2_status_t err = mComponent->destroyBlockPool(prevOutputPoolId);
+ if (err != C2_OK) {
+ ALOGW("Failed to clean up previous block pool %llu - %s (%d)\n",
+ (unsigned long long) prevOutputPoolId, asString(err), err);
+ }
+ }
+
Mutexed<Output>::Locked output(mOutput);
output->outputDelay = outputDelayValue;
output->numSlots = numOutputSlots;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 1ef21aa..45da003 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -31,6 +31,7 @@
#include <media/stagefright/CodecBase.h>
#include "CCodecBuffers.h"
+#include "FrameReassembler.h"
#include "InputSurfaceWrapper.h"
#include "PipelineWatcher.h"
@@ -238,7 +239,9 @@
void feedInputBufferIfAvailable();
void feedInputBufferIfAvailableInternal();
- status_t queueInputBufferInternal(sp<MediaCodecBuffer> buffer);
+ status_t queueInputBufferInternal(sp<MediaCodecBuffer> buffer,
+ std::shared_ptr<C2LinearBlock> encryptedBlock = nullptr,
+ size_t blockSize = 0);
bool handleWork(
std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
const C2StreamInitDataInfo::output *initData);
@@ -269,6 +272,8 @@
size_t numExtraSlots;
uint32_t inputDelay;
uint32_t pipelineDelay;
+
+ FrameReassembler frameReassembler;
};
Mutexed<Input> mInput;
struct Output {
@@ -316,6 +321,7 @@
inline bool hasCryptoOrDescrambler() {
return mCrypto != nullptr || mDescrambler != nullptr;
}
+ std::atomic_bool mSendEncryptedInfoBuffer;
};
// Conversion of a c2_status_t value to a status_t value may depend on the
diff --git a/media/codec2/sfplugin/FrameReassembler.cpp b/media/codec2/sfplugin/FrameReassembler.cpp
new file mode 100644
index 0000000..9cec23f
--- /dev/null
+++ b/media/codec2/sfplugin/FrameReassembler.cpp
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameReassembler"
+
+#include <log/log.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "FrameReassembler.h"
+
+namespace android {
+
+static constexpr uint64_t kToleranceUs = 1000; // 1ms
+
+FrameReassembler::FrameReassembler()
+ : mUsage{0, 0},
+ mSampleRate(0u),
+ mChannelCount(0u),
+ mEncoding(C2Config::PCM_16),
+ mCurrentOrdinal({0, 0, 0}) {
+}
+
+void FrameReassembler::init(
+ const std::shared_ptr<C2BlockPool> &pool,
+ C2MemoryUsage usage,
+ uint32_t frameSize,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ C2Config::pcm_encoding_t encoding) {
+ mBlockPool = pool;
+ mUsage = usage;
+ mFrameSize = frameSize;
+ mSampleRate = sampleRate;
+ mChannelCount = channelCount;
+ mEncoding = encoding;
+}
+
+void FrameReassembler::updateFrameSize(uint32_t frameSize) {
+ finishCurrentBlock(&mPendingWork);
+ mFrameSize = frameSize;
+}
+
+void FrameReassembler::updateSampleRate(uint32_t sampleRate) {
+ finishCurrentBlock(&mPendingWork);
+ mSampleRate = sampleRate;
+}
+
+void FrameReassembler::updateChannelCount(uint32_t channelCount) {
+ finishCurrentBlock(&mPendingWork);
+ mChannelCount = channelCount;
+}
+
+void FrameReassembler::updatePcmEncoding(C2Config::pcm_encoding_t encoding) {
+ finishCurrentBlock(&mPendingWork);
+ mEncoding = encoding;
+}
+
+void FrameReassembler::reset() {
+ flush();
+ mCurrentOrdinal = {0, 0, 0};
+ mBlockPool.reset();
+ mFrameSize.reset();
+ mSampleRate = 0u;
+ mChannelCount = 0u;
+ mEncoding = C2Config::PCM_16;
+}
+
+FrameReassembler::operator bool() const {
+ return mFrameSize.has_value();
+}
+
+c2_status_t FrameReassembler::process(
+ const sp<MediaCodecBuffer> &buffer,
+ std::list<std::unique_ptr<C2Work>> *items) {
+ int64_t timeUs;
+ if (buffer->size() == 0u
+ || !buffer->meta()->findInt64("timeUs", &timeUs)) {
+ return C2_BAD_VALUE;
+ }
+
+ items->splice(items->end(), mPendingWork);
+
+ // Fill mCurrentBlock
+ if (mCurrentBlock) {
+ // First check the timestamp
+ c2_cntr64_t endTimestampUs = mCurrentOrdinal.timestamp;
+ endTimestampUs += bytesToSamples(mWriteView->size()) * 1000000 / mSampleRate;
+ if (timeUs < endTimestampUs.peek()) {
+ uint64_t diffUs = (endTimestampUs - timeUs).peeku();
+ if (diffUs > kToleranceUs) {
+ // The timestamp is going back in time in large amount.
+ // TODO: b/145702136
+ ALOGW("timestamp going back in time! from %lld to %lld",
+ endTimestampUs.peekll(), (long long)timeUs);
+ }
+ } else { // timeUs >= endTimestampUs.peek()
+ uint64_t diffUs = (timeUs - endTimestampUs).peeku();
+ if (diffUs > kToleranceUs) {
+ // The timestamp is going forward; add silence as necessary.
+ size_t gapSamples = usToSamples(diffUs);
+ size_t remainingSamples =
+ (mWriteView->capacity() - mWriteView->size())
+ / mChannelCount / bytesPerSample();
+ if (gapSamples < remainingSamples) {
+ size_t gapBytes = gapSamples * mChannelCount * bytesPerSample();
+ memset(mWriteView->base() + mWriteView->size(), 0u, gapBytes);
+ mWriteView->setSize(mWriteView->size() + gapBytes);
+ } else {
+ finishCurrentBlock(items);
+ }
+ }
+ }
+ }
+
+ if (mCurrentBlock) {
+ // Append the data at the end of the current block
+ size_t copySize = std::min(
+ buffer->size(),
+ size_t(mWriteView->capacity() - mWriteView->size()));
+ memcpy(mWriteView->base() + mWriteView->size(), buffer->data(), copySize);
+ buffer->setRange(buffer->offset() + copySize, buffer->size() - copySize);
+ mWriteView->setSize(mWriteView->size() + copySize);
+ if (mWriteView->size() == mWriteView->capacity()) {
+ finishCurrentBlock(items);
+ }
+ timeUs += bytesToSamples(copySize) * 1000000 / mSampleRate;
+ }
+
+ if (buffer->size() > 0) {
+ mCurrentOrdinal.timestamp = timeUs;
+ }
+
+ size_t frameSizeBytes = mFrameSize.value() * mChannelCount * bytesPerSample();
+ while (buffer->size() > 0) {
+ LOG_ALWAYS_FATAL_IF(
+ mCurrentBlock,
+ "There's remaining data but the pending block is not filled & finished");
+ std::unique_ptr<C2Work> work(new C2Work);
+ c2_status_t err = mBlockPool->fetchLinearBlock(frameSizeBytes, mUsage, &mCurrentBlock);
+ if (err != C2_OK) {
+ return err;
+ }
+ size_t copySize = std::min(buffer->size(), frameSizeBytes);
+ mWriteView = mCurrentBlock->map().get();
+ if (mWriteView->error() != C2_OK) {
+ return mWriteView->error();
+ }
+ ALOGV("buffer={offset=%zu size=%zu} copySize=%zu",
+ buffer->offset(), buffer->size(), copySize);
+ memcpy(mWriteView->base(), buffer->data(), copySize);
+ mWriteView->setOffset(0u);
+ mWriteView->setSize(copySize);
+ buffer->setRange(buffer->offset() + copySize, buffer->size() - copySize);
+ if (copySize == frameSizeBytes) {
+ finishCurrentBlock(items);
+ }
+ }
+
+ int32_t eos = 0;
+ if (buffer->meta()->findInt32("eos", &eos) && eos) {
+ finishCurrentBlock(items);
+ }
+
+ return C2_OK;
+}
+
+void FrameReassembler::flush() {
+ mPendingWork.clear();
+ mWriteView.reset();
+ mCurrentBlock.reset();
+}
+
+uint64_t FrameReassembler::bytesToSamples(size_t numBytes) const {
+ return numBytes / mChannelCount / bytesPerSample();
+}
+
+size_t FrameReassembler::usToSamples(uint64_t us) const {
+ return (us * mChannelCount * mSampleRate / 1000000);
+}
+
+uint32_t FrameReassembler::bytesPerSample() const {
+ return (mEncoding == C2Config::PCM_8) ? 1
+ : (mEncoding == C2Config::PCM_16) ? 2
+ : (mEncoding == C2Config::PCM_FLOAT) ? 4 : 0;
+}
+
+void FrameReassembler::finishCurrentBlock(std::list<std::unique_ptr<C2Work>> *items) {
+ if (!mCurrentBlock) {
+ // No-op
+ return;
+ }
+ if (mWriteView->size() < mWriteView->capacity()) {
+ memset(mWriteView->base() + mWriteView->size(), 0u,
+ mWriteView->capacity() - mWriteView->size());
+ mWriteView->setSize(mWriteView->capacity());
+ }
+ std::unique_ptr<C2Work> work{std::make_unique<C2Work>()};
+ work->input.ordinal = mCurrentOrdinal;
+ work->input.buffers.push_back(C2Buffer::CreateLinearBuffer(
+ mCurrentBlock->share(0, mCurrentBlock->capacity(), C2Fence())));
+ work->worklets.clear();
+ work->worklets.emplace_back(new C2Worklet);
+ items->push_back(std::move(work));
+
+ ++mCurrentOrdinal.frameIndex;
+ mCurrentOrdinal.timestamp += mFrameSize.value() * 1000000 / mSampleRate;
+ mCurrentBlock.reset();
+ mWriteView.reset();
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/FrameReassembler.h b/media/codec2/sfplugin/FrameReassembler.h
new file mode 100644
index 0000000..17ac06d
--- /dev/null
+++ b/media/codec2/sfplugin/FrameReassembler.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_REASSEMBLER_H_
+#define FRAME_REASSEMBLER_H_
+
+#include <set>
+#include <memory>
+
+#include <media/MediaCodecBuffer.h>
+
+#include <C2Config.h>
+#include <C2Work.h>
+
+namespace android {
+
+class FrameReassembler {
+public:
+ FrameReassembler();
+
+ void init(
+ const std::shared_ptr<C2BlockPool> &pool,
+ C2MemoryUsage usage,
+ uint32_t frameSize,
+ uint32_t sampleRate,
+ uint32_t channelCount,
+ C2Config::pcm_encoding_t encoding);
+ void updateFrameSize(uint32_t frameSize);
+ void updateSampleRate(uint32_t sampleRate);
+ void updateChannelCount(uint32_t channelCount);
+ void updatePcmEncoding(C2Config::pcm_encoding_t encoding);
+ void reset();
+ void flush();
+
+ explicit operator bool() const;
+
+ c2_status_t process(
+ const sp<MediaCodecBuffer> &buffer,
+ std::list<std::unique_ptr<C2Work>> *items);
+
+private:
+ std::shared_ptr<C2BlockPool> mBlockPool;
+ C2MemoryUsage mUsage;
+ std::optional<uint32_t> mFrameSize;
+ uint32_t mSampleRate;
+ uint32_t mChannelCount;
+ C2Config::pcm_encoding_t mEncoding;
+ std::list<std::unique_ptr<C2Work>> mPendingWork;
+ C2WorkOrdinalStruct mCurrentOrdinal;
+ std::shared_ptr<C2LinearBlock> mCurrentBlock;
+ std::optional<C2WriteView> mWriteView;
+
+ uint64_t bytesToSamples(size_t numBytes) const;
+ size_t usToSamples(uint64_t us) const;
+ uint32_t bytesPerSample() const;
+
+ void finishCurrentBlock(std::list<std::unique_ptr<C2Work>> *items);
+};
+
+} // namespace android
+
+#endif // FRAME_REASSEMBLER_H_
diff --git a/media/codec2/sfplugin/tests/Android.bp b/media/codec2/sfplugin/tests/Android.bp
index 5c774a2..d705cfd 100644
--- a/media/codec2/sfplugin/tests/Android.bp
+++ b/media/codec2/sfplugin/tests/Android.bp
@@ -5,6 +5,7 @@
srcs: [
"CCodecBuffers_test.cpp",
"CCodecConfig_test.cpp",
+ "FrameReassembler_test.cpp",
"ReflectedParamUpdater_test.cpp",
],
diff --git a/media/codec2/sfplugin/tests/FrameReassembler_test.cpp b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
new file mode 100644
index 0000000..6738ee7
--- /dev/null
+++ b/media/codec2/sfplugin/tests/FrameReassembler_test.cpp
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FrameReassembler.h"
+
+#include <gtest/gtest.h>
+
+#include <C2PlatformSupport.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+static size_t BytesPerSample(C2Config::pcm_encoding_t encoding) {
+ return encoding == PCM_8 ? 1
+ : encoding == PCM_16 ? 2
+ : encoding == PCM_FLOAT ? 4 : 0;
+}
+
+static uint64_t Diff(c2_cntr64_t a, c2_cntr64_t b) {
+ return std::abs((a - b).peek());
+}
+
+class FrameReassemblerTest : public ::testing::Test {
+public:
+ static const C2MemoryUsage kUsage;
+ static constexpr uint64_t kTimestampToleranceUs = 100;
+
+ FrameReassemblerTest() {
+ mInitStatus = GetCodec2BlockPool(C2BlockPool::BASIC_LINEAR, nullptr, &mPool);
+ }
+
+ status_t initStatus() const { return mInitStatus; }
+
+ void testPushSameSize(
+ size_t encoderFrameSize,
+ size_t sampleRate,
+ size_t channelCount,
+ C2Config::pcm_encoding_t encoding,
+ size_t inputFrameSizeInBytes,
+ size_t count,
+ size_t expectedOutputSize) {
+ FrameReassembler frameReassembler;
+ frameReassembler.init(
+ mPool,
+ kUsage,
+ encoderFrameSize,
+ sampleRate,
+ channelCount,
+ encoding);
+
+ ASSERT_TRUE(frameReassembler) << "FrameReassembler init failed";
+
+ size_t inputIndex = 0, outputIndex = 0;
+ size_t expectCount = 0;
+ for (size_t i = 0; i < count; ++i) {
+ sp<MediaCodecBuffer> buffer = new MediaCodecBuffer(
+ new AMessage, new ABuffer(inputFrameSizeInBytes));
+ buffer->setRange(0, inputFrameSizeInBytes);
+ buffer->meta()->setInt64(
+ "timeUs",
+ inputIndex * 1000000 / sampleRate / channelCount / BytesPerSample(encoding));
+ if (i == count - 1) {
+ buffer->meta()->setInt32("eos", 1);
+ }
+ for (size_t j = 0; j < inputFrameSizeInBytes; ++j, ++inputIndex) {
+ buffer->base()[j] = (inputIndex & 0xFF);
+ }
+ std::list<std::unique_ptr<C2Work>> items;
+ ASSERT_EQ(C2_OK, frameReassembler.process(buffer, &items));
+ while (!items.empty()) {
+ std::unique_ptr<C2Work> work = std::move(*items.begin());
+ items.erase(items.begin());
+ // Verify timestamp
+ uint64_t expectedTimeUs =
+ outputIndex * 1000000 / sampleRate / channelCount / BytesPerSample(encoding);
+ EXPECT_GE(
+ kTimestampToleranceUs,
+ Diff(expectedTimeUs, work->input.ordinal.timestamp))
+ << "expected timestamp: " << expectedTimeUs
+ << " actual timestamp: " << work->input.ordinal.timestamp.peeku()
+ << " output index: " << outputIndex;
+
+ // Verify buffer
+ ASSERT_EQ(1u, work->input.buffers.size());
+ std::shared_ptr<C2Buffer> buffer = work->input.buffers.front();
+ ASSERT_EQ(C2BufferData::LINEAR, buffer->data().type());
+ ASSERT_EQ(1u, buffer->data().linearBlocks().size());
+ C2ReadView view = buffer->data().linearBlocks().front().map().get();
+ ASSERT_EQ(C2_OK, view.error());
+ ASSERT_EQ(encoderFrameSize * BytesPerSample(encoding), view.capacity());
+ for (size_t j = 0; j < view.capacity(); ++j, ++outputIndex) {
+ ASSERT_TRUE(outputIndex < inputIndex
+ || inputIndex == inputFrameSizeInBytes * count);
+ uint8_t expected = outputIndex < inputIndex ? (outputIndex & 0xFF) : 0;
+ if (expectCount < 10) {
+ ++expectCount;
+ EXPECT_EQ(expected, view.data()[j]) << "output index = " << outputIndex;
+ }
+ }
+ }
+ }
+
+ ASSERT_EQ(inputFrameSizeInBytes * count, inputIndex);
+ size_t encoderFrameSizeInBytes =
+ encoderFrameSize * channelCount * BytesPerSample(encoding);
+ ASSERT_EQ(0, outputIndex % encoderFrameSizeInBytes)
+ << "output size must be multiple of frame size: output size = " << outputIndex
+ << " frame size = " << encoderFrameSizeInBytes;
+ ASSERT_EQ(expectedOutputSize, outputIndex)
+ << "output size must be smallest multiple of frame size, "
+ << "equal to or larger than input size. output size = " << outputIndex
+ << " input size = " << inputIndex << " frame size = " << encoderFrameSizeInBytes;
+ }
+
+private:
+ status_t mInitStatus;
+ std::shared_ptr<C2BlockPool> mPool;
+};
+
+const C2MemoryUsage FrameReassemblerTest::kUsage{C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+
+// Push frames with exactly the same size as the encoder requested.
+TEST_F(FrameReassemblerTest, PushExactFrameSize) {
+ ASSERT_EQ(OK, initStatus());
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1024 /* input frame size in bytes = 1024 samples * 1 channel * 1 bytes/sample */,
+ 10 /* count */,
+ 10240 /* expected output size = 10 * 1024 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 2048 /* input frame size in bytes = 1024 samples * 1 channel * 2 bytes/sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 10 * 2048 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 4096 /* input frame size in bytes = 1024 samples * 1 channel * 4 bytes/sample */,
+ 10 /* count */,
+ 40960 /* expected output size = 10 * 4096 bytes/frame */);
+}
+
+// Push frames with half the size that the encoder requested.
+TEST_F(FrameReassemblerTest, PushHalfFrameSize) {
+ ASSERT_EQ(OK, initStatus());
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 512 /* input frame size in bytes = 512 samples * 1 channel * 1 bytes per sample */,
+ 10 /* count */,
+ 5120 /* expected output size = 5 * 1024 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 1024 /* input frame size in bytes = 512 samples * 1 channel * 2 bytes per sample */,
+ 10 /* count */,
+ 10240 /* expected output size = 5 * 2048 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 2048 /* input frame size in bytes = 512 samples * 1 channel * 4 bytes per sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 5 * 4096 bytes/frame */);
+}
+
+// Push frames with twice the size that the encoder requested.
+TEST_F(FrameReassemblerTest, PushDoubleFrameSize) {
+ ASSERT_EQ(OK, initStatus());
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 2048 /* input frame size in bytes = 2048 samples * 1 channel * 1 bytes per sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 20 * 1024 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 4096 /* input frame size in bytes = 2048 samples * 1 channel * 2 bytes per sample */,
+ 10 /* count */,
+ 40960 /* expected output size = 20 * 2048 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 8192 /* input frame size in bytes = 2048 samples * 1 channel * 4 bytes per sample */,
+ 10 /* count */,
+ 81920 /* expected output size = 20 * 4096 bytes/frame */);
+}
+
+// Push frames with a little bit larger (+5 samples) than the requested size.
+TEST_F(FrameReassemblerTest, PushLittleLargerFrameSize) {
+ ASSERT_EQ(OK, initStatus());
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1029 /* input frame size in bytes = 1029 samples * 1 channel * 1 bytes per sample */,
+ 10 /* count */,
+ 11264 /* expected output size = 11 * 1024 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 2058 /* input frame size in bytes = 1029 samples * 1 channel * 2 bytes per sample */,
+ 10 /* count */,
+ 22528 /* expected output size = 11 * 2048 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 4116 /* input frame size in bytes = 1029 samples * 1 channel * 4 bytes per sample */,
+ 10 /* count */,
+ 45056 /* expected output size = 11 * 4096 bytes/frame */);
+}
+
+// Push frames with a little bit smaller (-5 samples) than the requested size.
+TEST_F(FrameReassemblerTest, PushLittleSmallerFrameSize) {
+ ASSERT_EQ(OK, initStatus());
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1019 /* input frame size in bytes = 1019 samples * 1 channel * 1 bytes per sample */,
+ 10 /* count */,
+ 10240 /* expected output size = 10 * 1024 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 2038 /* input frame size in bytes = 1019 samples * 1 channel * 2 bytes per sample */,
+ 10 /* count */,
+ 20480 /* expected output size = 10 * 2048 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 4076 /* input frame size in bytes = 1019 samples * 1 channel * 4 bytes per sample */,
+ 10 /* count */,
+ 40960 /* expected output size = 10 * 4096 bytes/frame */);
+}
+
+// Push single-byte frames
+TEST_F(FrameReassemblerTest, PushSingleByte) {
+ ASSERT_EQ(OK, initStatus());
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 1 /* input frame size in bytes */,
+ 100000 /* count */,
+ 100352 /* expected output size = 98 * 1024 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 1 /* input frame size in bytes */,
+ 100000 /* count */,
+ 100352 /* expected output size = 49 * 2048 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 1 /* input frame size in bytes */,
+ 100000 /* count */,
+ 102400 /* expected output size = 25 * 4096 bytes/frame */);
+}
+
+// Push one big chunk.
+TEST_F(FrameReassemblerTest, PushBigChunk) {
+ ASSERT_EQ(OK, initStatus());
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_8,
+ 100000 /* input frame size in bytes */,
+ 1 /* count */,
+ 100352 /* expected output size = 98 * 1024 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_16,
+ 100000 /* input frame size in bytes */,
+ 1 /* count */,
+ 100352 /* expected output size = 49 * 2048 bytes/frame */);
+ testPushSameSize(
+ 1024 /* frame size in samples */,
+ 48000 /* sample rate */,
+ 1 /* channel count */,
+ PCM_FLOAT,
+ 100000 /* input frame size in bytes */,
+ 1 /* count */,
+ 102400 /* expected output size = 25 * 4096 bytes/frame */);
+}
+
+} // namespace android
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index dee3bf6..74ef9ea 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -102,16 +102,30 @@
}
static bool using_ion(void) {
- static int cached_result = -1;
-
- if (cached_result == -1) {
+ static int cached_result = []()->int {
struct stat buffer;
- cached_result = (stat("/dev/ion", &buffer) == 0);
- if (cached_result)
+ int ret = (stat("/dev/ion", &buffer) == 0);
+
+ if (property_get_int32("debug.c2.use_dmabufheaps", 0)) {
+ /*
+ * Double check that the system heap is present so we
+ * can gracefully fail back to ION if we cannot satisfy
+ * the override
+ */
+ ret = (stat("/dev/dma_heap/system", &buffer) != 0);
+ if (ret)
+ ALOGE("debug.c2.use_dmabufheaps set, but no system heap. Ignoring override!");
+ else
+ ALOGD("debug.c2.use_dmabufheaps set, forcing DMABUF Heaps");
+ }
+
+ if (ret)
ALOGD("Using ION\n");
else
ALOGD("Using DMABUF Heaps\n");
- }
+ return ret;
+ }();
+
return (cached_result == 1);
}
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index 8f60f6b..2fc4584 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -18,6 +18,8 @@
#define LOG_TAG "AACExtractor"
#include <utils/Log.h>
+#include <inttypes.h>
+
#include "AACExtractor.h"
#include <media/MediaExtractorPluginApi.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -277,7 +279,22 @@
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
if (mFrameDurationUs > 0) {
- int64_t seekFrame = seekTimeUs / mFrameDurationUs;
+ int64_t seekFrame = 0;
+ switch(mode & 0x7) {
+ case ReadOptions::SEEK_NEXT_SYNC:
+ // "at or after"
+ seekFrame = (seekTimeUs + mFrameDurationUs - 1) / mFrameDurationUs;
+ break;
+ case ReadOptions::SEEK_CLOSEST_SYNC:
+ case ReadOptions::SEEK_CLOSEST:
+ seekFrame = (seekTimeUs + mFrameDurationUs/2) / mFrameDurationUs;
+ break;
+ case ReadOptions::SEEK_PREVIOUS_SYNC:
+ default:
+ // 'at or before'
+ seekFrame = seekTimeUs / mFrameDurationUs;
+ break;
+ }
if (seekFrame < 0 || seekFrame >= (int64_t)mOffsetVector.size()) {
android_errorWriteLog(0x534e4554, "70239507");
return AMEDIA_ERROR_MALFORMED;
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
index 26431a4..e26ff0a 100644
--- a/media/extractors/amr/AMRExtractor.cpp
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -18,6 +18,8 @@
#define LOG_TAG "AMRExtractor"
#include <utils/Log.h>
+#include <inttypes.h>
+
#include "AMRExtractor.h"
#include <media/stagefright/foundation/ADebug.h>
@@ -283,8 +285,22 @@
ReadOptions::SeekMode mode;
if (mOffsetTableLength > 0 && options && options->getSeekTo(&seekTimeUs, &mode)) {
size_t size;
- int64_t seekFrame = seekTimeUs / 20000LL; // 20ms per frame.
- mCurrentTimeUs = seekFrame * 20000LL;
+ const int64_t frameDurationUs = 20000LL; // 20ms per frame.
+ int64_t seekFrame = 0;
+ switch(mode & 0x7) {
+ case ReadOptions::SEEK_NEXT_SYNC:
+ seekFrame = (seekTimeUs + frameDurationUs - 1) / frameDurationUs;
+ break;
+ case ReadOptions::SEEK_CLOSEST_SYNC:
+ case ReadOptions::SEEK_CLOSEST:
+ seekFrame = (seekTimeUs + frameDurationUs/2) / frameDurationUs;
+ break;
+ case ReadOptions::SEEK_PREVIOUS_SYNC:
+ default:
+ seekFrame = seekTimeUs / frameDurationUs;
+ break;
+ }
+ mCurrentTimeUs = seekFrame * frameDurationUs;
size_t index = seekFrame < 0 ? 0 : seekFrame / 50;
if (index >= mOffsetTableLength) {
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 5165822..5bbabdf 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -425,8 +425,7 @@
if (mInitCheck != OK || index != 0) {
return AMEDIA_ERROR_UNKNOWN;
}
- AMediaFormat_copy(meta, mMeta);
- return AMEDIA_OK;
+ return AMediaFormat_copy(meta, mMeta);
}
////////////////////////////////////////////////////////////////////////////////
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 221bf4f..314a822 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -640,8 +640,7 @@
}
}
- AMediaFormat_copy(meta, track->meta);
- return AMEDIA_OK;
+ return AMediaFormat_copy(meta, track->meta);
}
status_t MPEG4Extractor::readMetaData() {
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index 9e093eb..2e68809 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -268,6 +268,9 @@
media_status_t MPEG2TSExtractor::getTrackMetaData(
AMediaFormat *meta,
size_t index, uint32_t /* flags */) {
+ if (meta == nullptr) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
sp<MetaData> implMeta = index < mSourceImpls.size()
? mSourceImpls.editItemAt(index)->getFormat() : NULL;
if (implMeta == NULL) {
diff --git a/media/extractors/tests/ExtractorUnitTest.cpp b/media/extractors/tests/ExtractorUnitTest.cpp
index d91fffa..84ec1f2 100644
--- a/media/extractors/tests/ExtractorUnitTest.cpp
+++ b/media/extractors/tests/ExtractorUnitTest.cpp
@@ -18,6 +18,8 @@
#define LOG_TAG "ExtractorUnitTest"
#include <utils/Log.h>
+#include <inttypes.h>
+
#include <datasource/FileSource.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaCodecConstants.h>
@@ -503,7 +505,7 @@
&trackSampleRate));
ASSERT_EQ(exChannelCount, trackChannelCount) << "ChannelCount not as expected";
ASSERT_EQ(exSampleRate, trackSampleRate) << "SampleRate not as expected";
- } else {
+ } else if (!strncmp(extractorMime, "video/", 6)) {
int32_t exWidth, exHeight;
int32_t trackWidth, trackHeight;
ASSERT_TRUE(AMediaFormat_getInt32(extractorFormat, AMEDIAFORMAT_KEY_WIDTH, &exWidth));
@@ -512,6 +514,8 @@
ASSERT_TRUE(AMediaFormat_getInt32(trackFormat, AMEDIAFORMAT_KEY_HEIGHT, &trackHeight));
ASSERT_EQ(exWidth, trackWidth) << "Width not as expected";
ASSERT_EQ(exHeight, trackHeight) << "Height not as expected";
+ } else {
+ ALOGV("non a/v track");
}
status = cTrack->stop(track);
ASSERT_EQ(OK, status) << "Failed to stop the track";
@@ -568,8 +572,9 @@
TEST_P(ExtractorFunctionalityTest, SeekTest) {
if (mDisableTest) return;
- ALOGV("Validates %s Extractor behaviour for different seek modes", mContainer.c_str());
string inputFileName = gEnv->getRes() + get<1>(GetParam());
+ ALOGV("Validates %s Extractor behaviour for different seek modes filename %s",
+ mContainer.c_str(), inputFileName.c_str());
int32_t status = setDataSource(inputFileName);
ASSERT_EQ(status, 0) << "SetDataSource failed for" << mContainer << "extractor";
@@ -680,7 +685,8 @@
if (seekIdx >= seekablePointsSize) seekIdx = seekablePointsSize - 1;
int64_t seekToTimeStamp = seekablePoints[seekIdx];
- if (seekablePointsSize > 1) {
+ if (seekIdx > 1) {
+ // pick a time just earlier than this seek point
int64_t prevTimeStamp = seekablePoints[seekIdx - 1];
seekToTimeStamp = seekToTimeStamp - ((seekToTimeStamp - prevTimeStamp) >> 3);
}
@@ -711,11 +717,7 @@
// CMediaTrackReadOptions::SEEK is 8. Using mask 0111b to get true modes
switch (mode & 0x7) {
case CMediaTrackReadOptions::SEEK_PREVIOUS_SYNC:
- if (seekablePointsSize == 1) {
- EXPECT_EQ(timeStamp, seekablePoints[seekIdx]);
- } else {
- EXPECT_EQ(timeStamp, seekablePoints[seekIdx - 1]);
- }
+ EXPECT_EQ(timeStamp, seekablePoints[seekIdx > 0 ? (seekIdx - 1) : 0]);
break;
case CMediaTrackReadOptions::SEEK_NEXT_SYNC:
case CMediaTrackReadOptions::SEEK_CLOSEST_SYNC:
@@ -743,8 +745,9 @@
// TODO(b/155630778): Enable test for wav extractors
if (mExtractorName == WAV) return;
- ALOGV("Validates %s Extractor behaviour for invalid seek points", mContainer.c_str());
string inputFileName = gEnv->getRes() + get<1>(GetParam());
+ ALOGV("Validates %s Extractor behaviour for invalid seek points, filename %s",
+ mContainer.c_str(), inputFileName.c_str());
int32_t status = setDataSource(inputFileName);
ASSERT_EQ(status, 0) << "SetDataSource failed for" << mContainer << "extractor";
@@ -832,8 +835,9 @@
// TODO(b/155626946): Enable test for MPEG2 TS/PS extractors
if (mExtractorName == MPEG2TS || mExtractorName == MPEG2PS) return;
- ALOGV("Validates %s Extractor behaviour for invalid tracks", mContainer.c_str());
string inputFileName = gEnv->getRes() + get<1>(GetParam());
+ ALOGV("Validates %s Extractor behaviour for invalid tracks - file %s",
+ mContainer.c_str(), inputFileName.c_str());
int32_t status = setDataSource(inputFileName);
ASSERT_EQ(status, 0) << "SetDataSource failed for" << mContainer << "extractor";
@@ -872,13 +876,17 @@
TEST_P(ConfigParamTest, ConfigParamValidation) {
if (mDisableTest) return;
+ const int trackNumber = 0;
+
string container = GetParam().first;
- ALOGV("Validates %s Extractor for input's file properties", container.c_str());
string inputFileName = gEnv->getRes();
inputID inputFileId = GetParam().second;
configFormat configParam;
getFileProperties(inputFileId, inputFileName, configParam);
+ ALOGV("Validates %s Extractor for input's file properties, file %s",
+ container.c_str(), inputFileName.c_str());
+
int32_t status = setDataSource(inputFileName);
ASSERT_EQ(status, 0) << "SetDataSource failed for " << container << "extractor";
@@ -888,7 +896,7 @@
int32_t numTracks = mExtractor->countTracks();
ASSERT_GT(numTracks, 0) << "Extractor didn't find any track for the given clip";
- MediaTrackHelper *track = mExtractor->getTrack(0);
+ MediaTrackHelper *track = mExtractor->getTrack(trackNumber);
ASSERT_NE(track, nullptr) << "Failed to get track for index 0";
AMediaFormat *trackFormat = AMediaFormat_new();
@@ -910,7 +918,7 @@
AMediaFormat_getInt32(trackFormat, AMEDIAFORMAT_KEY_SAMPLE_RATE, &trackSampleRate));
ASSERT_EQ(configParam.sampleRate, trackSampleRate) << "SampleRate not as expected";
ASSERT_EQ(configParam.channelCount, trackChannelCount) << "ChannelCount not as expected";
- } else {
+ } else if (!strncmp(trackMime, "video/", 6)) {
int32_t trackWidth, trackHeight;
ASSERT_TRUE(AMediaFormat_getInt32(trackFormat, AMEDIAFORMAT_KEY_WIDTH, &trackWidth));
ASSERT_TRUE(AMediaFormat_getInt32(trackFormat, AMEDIAFORMAT_KEY_HEIGHT, &trackHeight));
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index ea4fe04..9072886 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -1049,7 +1049,6 @@
// Stream Control
// ============================================================
-#if __ANDROID_API__ >= 30
/**
* Free the audio resources associated with a stream created by
* AAudioStreamBuilder_openStream().
@@ -1067,11 +1066,12 @@
* On other "Legacy" streams some audio resources will still be in use
* and some callbacks may still be in process after this call.
*
+ * Available since API level 30.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
AAUDIO_API aaudio_result_t AAudioStream_release(AAudioStream* stream) __INTRODUCED_IN(30);
-#endif // __ANDROID_API__
/**
* Delete the internal data structures associated with the stream created
@@ -1079,6 +1079,8 @@
*
* If AAudioStream_release() has not been called then it will be called automatically.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 431f0fa..a4beaf4 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -56,6 +56,12 @@
ALOGE_IF(mHasThread, "%s() callback thread never join()ed", __func__);
+ if (!mMetricsId.empty()) {
+ android::mediametrics::LogItem(mMetricsId)
+ .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM)
+ .record();
+ }
+
// If the stream is deleted when OPEN or in use then audio resources will leak.
// This would indicate an internal error. So we want to find this ASAP.
LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 5ce5974..14578d6 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -980,12 +980,14 @@
legacy2aidl_audio_config_t_AudioConfig(*config));
int32_t flagsAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
+ int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));
media::GetOutputForAttrResponse responseAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getOutputForAttr(attrAidl, sessionAidl, pidAidl, uidAidl, configAidl, flagsAidl,
- &responseAidl)));
+ selectedDeviceIdAidl, &responseAidl)));
*output = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_io_handle_t(responseAidl.output));
@@ -1077,12 +1079,15 @@
media::AudioConfigBase configAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_config_base_t_AudioConfigBase(*config));
int32_t flagsAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
+ int32_t selectedDeviceIdAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_handle_t_int32_t(*selectedDeviceId));
media::GetInputForAttrResponse response;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
aps->getInputForAttr(attrAidl, inputAidl, riidAidl, sessionAidl, pidAidl, uidAidl,
- opPackageNameAidl, configAidl, flagsAidl, &response)));
+ opPackageNameAidl, configAidl, flagsAidl, selectedDeviceIdAidl,
+ &response)));
*input = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.input));
*selectedDeviceId = VALUE_OR_RETURN_STATUS(
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index f3a086d..75dfc36 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -83,7 +83,8 @@
int /* pid_t */ pid,
int /* uid_t */ uid,
in AudioConfig config,
- int /* Bitmask, indexed by AudioOutputFlags */ flags);
+ int /* Bitmask, indexed by AudioOutputFlags */ flags,
+ int /* audio_port_handle_t */ selectedDeviceId);
void startOutput(int /* audio_port_handle_t */ portId);
@@ -99,7 +100,9 @@
int /* uid_t */ uid,
@utf8InCpp String opPackageName,
in AudioConfigBase config,
- int /* Bitmask, indexed by AudioInputFlags */ flags);
+ int /* Bitmask, indexed by AudioInputFlags */ flags,
+ int /* audio_port_handle_t */ selectedDeviceId);
+
void startInput(int /* audio_port_handle_t */ portId);
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
new file mode 100644
index 0000000..edfc5a5
--- /dev/null
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_fuzz {
+ name: "audioflinger_fuzzer",
+ srcs: [
+ "audioflinger_fuzzer.cpp",
+ ],
+ static_libs: [
+ "android.hardware.audio.common@7.0-enums",
+ "effect-aidl-unstable-cpp",
+ "libaudioclient",
+ "libbase",
+ "libcgrouprc",
+ "libcgrouprc_format",
+ "libcutils",
+ "libjsoncpp",
+ "liblog",
+ "libmediametrics",
+ "libmediametricsservice",
+ "libmedia_helper",
+ "libprocessgroup",
+ "shared-file-region-aidl-unstable-cpp",
+ ],
+ shared_libs: [
+ "android.hardware.audio.common-util",
+ "audioclient-types-aidl-unstable-cpp",
+ "audioflinger-aidl-unstable-cpp",
+ "audiopolicy-aidl-unstable-cpp",
+ "audiopolicy-types-aidl-unstable-cpp",
+ "av-types-aidl-unstable-cpp",
+ "capture_state_listener-aidl-unstable-cpp",
+ "libaudioclient_aidl_conversion",
+ "libaudioflinger",
+ "libaudiofoundation",
+ "libaudiomanager",
+ "libaudiopolicy",
+ "libaudioutils",
+ "libbinder",
+ "libdl",
+ "libmediautils",
+ "libnblog",
+ "libutils",
+ "libxml2",
+ "mediametricsservice-aidl-unstable-cpp",
+ ],
+ header_libs: [
+ "libaudiofoundation_headers",
+ "libmedia_headers",
+ ],
+ fuzz_config: {
+ cc: [
+ "android-media-fuzzing-reports@google.com",
+ ],
+ componentid: 155276,
+ },
+}
diff --git a/media/libaudioclient/fuzzer/README.md b/media/libaudioclient/fuzzer/README.md
new file mode 100644
index 0000000..ada6c49
--- /dev/null
+++ b/media/libaudioclient/fuzzer/README.md
@@ -0,0 +1,80 @@
+# Fuzzer for libaudioflinger
+
+## Plugin Design Considerations
+The fuzzer plugin for libaudioflinger is designed based on the understanding of the
+library and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer. The fuzzer
+covers libaudioflinger APIs as called from libaudioclient through IPC.
+
+libaudioflinger supports the following parameters:
+1. Unique IDs (parameter name: `uniqueId`)
+2. Audio Mode (parameter name: `mode`)
+3. Session ID (parameter name: `sessionId`)
+4. Encapsulation Mode (parameter name: `encapsulationMode`)
+5. Audio Port Role (parameter name: `portRole`)
+6. Audio Port Type (parameter name: `portType`)
+7. Audio Stream Type (parameter name: `streamType`)
+8. Audio Format (parameter name: `format`)
+9. Audio Channel Mask (parameter name: `channelMask`)
+10. Usage (parameter name: `usage`)
+11. Audio Content Type (parameter name: `contentType`)
+12. Input Source (parameter name: `inputSource`)
+13. Input Flags (parameter name: `inputFlags`)
+14. Output Flags (parameter name: `outputFlags`)
+15. Audio Gain Mode (parameter name: `gainMode`)
+16. Audio Device (parameter name: `device`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `uniqueId` | 0. `AUDIO_UNIQUE_ID_USE_UNSPECIFIED` 1. `AUDIO_UNIQUE_ID_USE_SESSION` 2. `AUDIO_UNIQUE_ID_USE_MODULE` 3. `AUDIO_UNIQUE_ID_USE_EFFECT` 4. `AUDIO_UNIQUE_ID_USE_PATCH` 5. `AUDIO_UNIQUE_ID_USE_OUTPUT` 6. `AUDIO_UNIQUE_ID_USE_INPUT` 7. `AUDIO_UNIQUE_ID_USE_CLIENT` 8. `AUDIO_UNIQUE_ID_USE_MAX` | Value obtained from FuzzedDataProvider
+| `mode` | 0.`AUDIO_MODE_INVALID` 1. `AUDIO_MODE_CURRENT` 2. ` AUDIO_MODE_NORMAL` 3. `AUDIO_MODE_RINGTONE` 4. `AUDIO_MODE_IN_CALL` 5. `AUDIO_MODE_IN_COMMUNICATION` 6. `AUDIO_MODE_CALL_SCREEN` | Value obtained from FuzzedDataProvider|
+| `sessionId` | 0. `AUDIO_SESSION_NONE` 1. `AUDIO_SESSION_OUTPUT_STAGE` 2. `AUDIO_SESSION_DEVICE` | Value obtained from FuzzedDataProvider|
+| `encapsulationMode` | 0. `AUDIO_ENCAPSULATION_MODE_NONE` 1. `AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM` 2. `AUDIO_ENCAPSULATION_MODE_HANDLE` | Value obtained from FuzzedDataProvider|
+| `portRole` | 0. `AUDIO_PORT_ROLE_NONE` 1. `AUDIO_PORT_ROLE_SOURCE` 2. `AUDIO_PORT_ROLE_SINK` | Value obtained from FuzzedDataProvider|
+| `portType` | 0. `AUDIO_PORT_TYPE_NONE` 1. `AUDIO_PORT_TYPE_DEVICE` 2. `AUDIO_PORT_TYPE_MIX` 3. `AUDIO_PORT_TYPE_SESSION`| Value obtained from FuzzedDataProvider|
+| `streamType` | 15 values of type `audio_stream_type_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `format` | 77 values of type `audio_format_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `channelMask` | 83 values of type `audio_channel_mask_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `usage` | 22 values of type `audio_usage_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `contentType` | 5 values of type `audio_content_type_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `inputSource` | 14 values of type `audio_source_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `inputFlags` | 9 values of type `audio_input_flags_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `outputFlags` | 16 values of type `audio_output_flags_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `gainMode` | 3 values of type `audio_gain_mode_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `device` | 66 values of type `audio_devices_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesn't `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build audioflinger_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) audioflinger_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR and copy some files to that folder
+Push this directory to device.
+
+To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/audioflinger_fuzzer/audioflinger_fuzzer CORPUS_DIR
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.co
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
new file mode 100644
index 0000000..db2b0b8
--- /dev/null
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -0,0 +1,730 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/**
+ * NOTE
+ * 1) The input to AudioFlinger binder calls are fuzzed in this fuzzer
+ * 2) AudioFlinger crashes due to the fuzzer are detected by the
+ Binder DeathRecipient, where the fuzzer aborts if AudioFlinger dies
+ */
+
+#include <android_audio_policy_configuration_V7_0-enums.h>
+#include <binder/IServiceManager.h>
+#include <binder/MemoryDealer.h>
+#include <media/AudioEffect.h>
+#include <media/AudioRecord.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
+#include <media/IAudioFlinger.h>
+#include "fuzzer/FuzzedDataProvider.h"
+
+#define MAX_STRING_LENGTH 256
+#define MAX_ARRAY_LENGTH 256
+
+using namespace std;
+using namespace android;
+
+namespace xsd {
+using namespace ::android::audio::policy::configuration::V7_0;
+}
+
+constexpr audio_unique_id_use_t kUniqueIds[] = {
+ AUDIO_UNIQUE_ID_USE_UNSPECIFIED, AUDIO_UNIQUE_ID_USE_SESSION, AUDIO_UNIQUE_ID_USE_MODULE,
+ AUDIO_UNIQUE_ID_USE_EFFECT, AUDIO_UNIQUE_ID_USE_PATCH, AUDIO_UNIQUE_ID_USE_OUTPUT,
+ AUDIO_UNIQUE_ID_USE_INPUT, AUDIO_UNIQUE_ID_USE_CLIENT, AUDIO_UNIQUE_ID_USE_MAX,
+};
+
+constexpr audio_mode_t kModes[] = {
+ AUDIO_MODE_INVALID, AUDIO_MODE_CURRENT, AUDIO_MODE_NORMAL, AUDIO_MODE_RINGTONE,
+ AUDIO_MODE_IN_CALL, AUDIO_MODE_IN_COMMUNICATION, AUDIO_MODE_CALL_SCREEN};
+
+constexpr audio_session_t kSessionId[] = {AUDIO_SESSION_NONE, AUDIO_SESSION_OUTPUT_STAGE,
+ AUDIO_SESSION_DEVICE};
+
+constexpr audio_encapsulation_mode_t kEncapsulation[] = {
+ AUDIO_ENCAPSULATION_MODE_NONE,
+ AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM,
+ AUDIO_ENCAPSULATION_MODE_HANDLE,
+};
+
+constexpr audio_port_role_t kPortRoles[] = {
+ AUDIO_PORT_ROLE_NONE,
+ AUDIO_PORT_ROLE_SOURCE,
+ AUDIO_PORT_ROLE_SINK,
+};
+
+constexpr audio_port_type_t kPortTypes[] = {
+ AUDIO_PORT_TYPE_NONE,
+ AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_PORT_TYPE_MIX,
+ AUDIO_PORT_TYPE_SESSION,
+};
+
+template <typename T, typename X, typename FUNC>
+std::vector<T> getFlags(const xsdc_enum_range<X> &range, const FUNC &func,
+ const std::string &findString = {}) {
+ std::vector<T> vec;
+ for (const auto &xsdEnumVal : range) {
+ T enumVal;
+ std::string enumString = toString(xsdEnumVal);
+ if (enumString.find(findString) != std::string::npos &&
+ func(enumString.c_str(), &enumVal)) {
+ vec.push_back(enumVal);
+ }
+ }
+ return vec;
+}
+
+static const std::vector<audio_stream_type_t> kStreamtypes =
+ getFlags<audio_stream_type_t, xsd::AudioStreamType, decltype(audio_stream_type_from_string)>(
+ xsdc_enum_range<xsd::AudioStreamType>{}, audio_stream_type_from_string);
+
+static const std::vector<audio_format_t> kFormats =
+ getFlags<audio_format_t, xsd::AudioFormat, decltype(audio_format_from_string)>(
+ xsdc_enum_range<xsd::AudioFormat>{}, audio_format_from_string);
+
+static const std::vector<audio_channel_mask_t> kChannelMasks =
+ getFlags<audio_channel_mask_t, xsd::AudioChannelMask, decltype(audio_channel_mask_from_string)>(
+ xsdc_enum_range<xsd::AudioChannelMask>{}, audio_channel_mask_from_string);
+
+static const std::vector<audio_usage_t> kUsages =
+ getFlags<audio_usage_t, xsd::AudioUsage, decltype(audio_usage_from_string)>(
+ xsdc_enum_range<xsd::AudioUsage>{}, audio_usage_from_string);
+
+static const std::vector<audio_content_type_t> kContentType =
+ getFlags<audio_content_type_t, xsd::AudioContentType, decltype(audio_content_type_from_string)>(
+ xsdc_enum_range<xsd::AudioContentType>{}, audio_content_type_from_string);
+
+static const std::vector<audio_source_t> kInputSources =
+ getFlags<audio_source_t, xsd::AudioSource, decltype(audio_source_from_string)>(
+ xsdc_enum_range<xsd::AudioSource>{}, audio_source_from_string);
+
+static const std::vector<audio_gain_mode_t> kGainModes =
+ getFlags<audio_gain_mode_t, xsd::AudioGainMode, decltype(audio_gain_mode_from_string)>(
+ xsdc_enum_range<xsd::AudioGainMode>{}, audio_gain_mode_from_string);
+
+static const std::vector<audio_devices_t> kDevices =
+ getFlags<audio_devices_t, xsd::AudioDevice, decltype(audio_device_from_string)>(
+ xsdc_enum_range<xsd::AudioDevice>{}, audio_device_from_string);
+
+static const std::vector<audio_input_flags_t> kInputFlags =
+ getFlags<audio_input_flags_t, xsd::AudioInOutFlag, decltype(audio_input_flag_from_string)>(
+ xsdc_enum_range<xsd::AudioInOutFlag>{}, audio_input_flag_from_string, "_INPUT_");
+
+static const std::vector<audio_output_flags_t> kOutputFlags =
+ getFlags<audio_output_flags_t, xsd::AudioInOutFlag, decltype(audio_output_flag_from_string)>(
+ xsdc_enum_range<xsd::AudioInOutFlag>{}, audio_output_flag_from_string, "_OUTPUT_");
+
+template <typename T, size_t size>
+T getValueFromArray(FuzzedDataProvider *fdp, const T (&arr)[size]) {
+ return arr[fdp->ConsumeIntegralInRange<int32_t>(0, size - 1)];
+}
+
+template <typename T, size_t size>
+T getValue(FuzzedDataProvider *fdp, const T (&arr)[size]) {
+ if (fdp->ConsumeBool()) {
+ return static_cast<T>(fdp->ConsumeIntegral<int32_t>());
+ }
+ return getValueFromArray(fdp, arr);
+}
+
+template <typename T>
+T getValueFromVector(FuzzedDataProvider *fdp, std::vector<T> vec) {
+ return vec[fdp->ConsumeIntegralInRange<int32_t>(0, vec.size() - 1)];
+}
+
+template <typename T>
+T getValue(FuzzedDataProvider *fdp, std::vector<T> vec) {
+ if (fdp->ConsumeBool()) {
+ return static_cast<T>(fdp->ConsumeIntegral<int32_t>());
+ }
+ return getValueFromVector(fdp, vec);
+}
+
+class DeathNotifier : public IBinder::DeathRecipient {
+ public:
+ void binderDied(const wp<IBinder> &) { abort(); }
+};
+
+class AudioFlingerFuzzer {
+ public:
+ AudioFlingerFuzzer(const uint8_t *data, size_t size);
+ void process();
+
+ private:
+ FuzzedDataProvider mFdp;
+ void invokeAudioTrack();
+ void invokeAudioRecord();
+ status_t invokeAudioEffect();
+ void invokeAudioSystem();
+ status_t invokeAudioInputDevice();
+ status_t invokeAudioOutputDevice();
+ void invokeAudioPatch();
+
+ sp<DeathNotifier> mDeathNotifier;
+};
+
+AudioFlingerFuzzer::AudioFlingerFuzzer(const uint8_t *data, size_t size) : mFdp(data, size) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.audio_flinger"));
+ if (binder == nullptr) {
+ return;
+ }
+ mDeathNotifier = new DeathNotifier();
+ binder->linkToDeath(mDeathNotifier);
+}
+
+void AudioFlingerFuzzer::invokeAudioTrack() {
+ uint32_t sampleRate = mFdp.ConsumeIntegral<uint32_t>();
+ audio_format_t format = getValue(&mFdp, kFormats);
+ audio_channel_mask_t channelMask = getValue(&mFdp, kChannelMasks);
+ size_t frameCount = static_cast<size_t>(mFdp.ConsumeIntegral<uint32_t>());
+ int32_t notificationFrames = mFdp.ConsumeIntegral<int32_t>();
+ uint32_t useSharedBuffer = mFdp.ConsumeBool();
+ audio_output_flags_t flags = getValue(&mFdp, kOutputFlags);
+ audio_session_t sessionId = getValue(&mFdp, kSessionId);
+ audio_usage_t usage = getValue(&mFdp, kUsages);
+ audio_content_type_t contentType = getValue(&mFdp, kContentType);
+ audio_attributes_t attributes = {};
+ sp<IMemory> sharedBuffer;
+ sp<MemoryDealer> heap = nullptr;
+ audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+
+ bool offload = false;
+ bool fast = ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0);
+
+ if (useSharedBuffer != 0) {
+ size_t heapSize = audio_channel_count_from_out_mask(channelMask) *
+ audio_bytes_per_sample(format) * frameCount;
+ heap = new MemoryDealer(heapSize, "AudioTrack Heap Base");
+ sharedBuffer = heap->allocate(heapSize);
+ frameCount = 0;
+ notificationFrames = 0;
+ }
+ if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ offloadInfo.sample_rate = sampleRate;
+ offloadInfo.channel_mask = channelMask;
+ offloadInfo.format = format;
+ offload = true;
+ }
+
+ attributes.content_type = contentType;
+ attributes.usage = usage;
+ sp<AudioTrack> track = new AudioTrack();
+
+ track->set(AUDIO_STREAM_DEFAULT, sampleRate, format, channelMask, frameCount, flags, nullptr,
+ nullptr, notificationFrames, sharedBuffer, false, sessionId,
+ ((fast && sharedBuffer == 0) || offload) ? AudioTrack::TRANSFER_CALLBACK
+ : AudioTrack::TRANSFER_DEFAULT,
+ offload ? &offloadInfo : nullptr, getuid(), getpid(), &attributes, false, 1.0f,
+ AUDIO_PORT_HANDLE_NONE);
+
+ status_t status = track->initCheck();
+ if (status != NO_ERROR) {
+ track.clear();
+ return;
+ }
+ track->getSampleRate();
+ track->latency();
+ track->getUnderrunCount();
+ track->streamType();
+ track->channelCount();
+ track->getNotificationPeriodInFrames();
+ uint32_t bufferSizeInFrames = mFdp.ConsumeIntegral<uint32_t>();
+ track->setBufferSizeInFrames(bufferSizeInFrames);
+ track->getBufferSizeInFrames();
+
+ int64_t duration = mFdp.ConsumeIntegral<int64_t>();
+ track->getBufferDurationInUs(&duration);
+ sp<IMemory> sharedBuffer2 = track->sharedBuffer();
+ track->setCallerName(mFdp.ConsumeRandomLengthString(MAX_STRING_LENGTH));
+
+ track->setVolume(mFdp.ConsumeFloatingPoint<float>(), mFdp.ConsumeFloatingPoint<float>());
+ track->setVolume(mFdp.ConsumeFloatingPoint<float>());
+ track->setAuxEffectSendLevel(mFdp.ConsumeFloatingPoint<float>());
+
+ float auxEffectSendLevel;
+ track->getAuxEffectSendLevel(&auxEffectSendLevel);
+ track->setSampleRate(mFdp.ConsumeIntegral<uint32_t>());
+ track->getSampleRate();
+ track->getOriginalSampleRate();
+
+ AudioPlaybackRate playbackRate = {};
+ playbackRate.mSpeed = mFdp.ConsumeFloatingPoint<float>();
+ playbackRate.mPitch = mFdp.ConsumeFloatingPoint<float>();
+ track->setPlaybackRate(playbackRate);
+ track->getPlaybackRate();
+ track->setLoop(mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeIntegral<uint32_t>(),
+ mFdp.ConsumeIntegral<uint32_t>());
+ track->setMarkerPosition(mFdp.ConsumeIntegral<uint32_t>());
+
+ uint32_t marker = {};
+ track->getMarkerPosition(&marker);
+ track->setPositionUpdatePeriod(mFdp.ConsumeIntegral<uint32_t>());
+
+ uint32_t updatePeriod = {};
+ track->getPositionUpdatePeriod(&updatePeriod);
+ track->setPosition(mFdp.ConsumeIntegral<uint32_t>());
+ uint32_t position = {};
+ track->getPosition(&position);
+ track->getBufferPosition(&position);
+ track->reload();
+ track->start();
+ track->pause();
+ track->flush();
+ track->stop();
+ track->stopped();
+}
+
+void AudioFlingerFuzzer::invokeAudioRecord() {
+ int32_t notificationFrames = mFdp.ConsumeIntegral<int32_t>();
+ uint32_t sampleRate = mFdp.ConsumeIntegral<uint32_t>();
+ size_t frameCount = static_cast<size_t>(mFdp.ConsumeIntegral<uint32_t>());
+ audio_format_t format = getValue(&mFdp, kFormats);
+ audio_channel_mask_t channelMask = getValue(&mFdp, kChannelMasks);
+ audio_input_flags_t flags = getValue(&mFdp, kInputFlags);
+ audio_session_t sessionId = getValue(&mFdp, kSessionId);
+ audio_source_t inputSource = getValue(&mFdp, kInputSources);
+
+ audio_attributes_t attributes = {};
+ bool fast = ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0);
+
+ attributes.source = inputSource;
+
+ sp<AudioRecord> record = new AudioRecord(String16(mFdp.ConsumeRandomLengthString().c_str()));
+ record->set(AUDIO_SOURCE_DEFAULT, sampleRate, format, channelMask, frameCount, nullptr, nullptr,
+ notificationFrames, false, sessionId,
+ fast ? AudioRecord::TRANSFER_CALLBACK : AudioRecord::TRANSFER_DEFAULT, flags,
+ getuid(), getpid(), &attributes, AUDIO_PORT_HANDLE_NONE);
+ status_t status = record->initCheck();
+ if (status != NO_ERROR) {
+ return;
+ }
+ record->latency();
+ record->format();
+ record->channelCount();
+ record->frameCount();
+ record->frameSize();
+ record->inputSource();
+ record->getNotificationPeriodInFrames();
+ record->start();
+ record->stop();
+ record->stopped();
+
+ uint32_t marker = mFdp.ConsumeIntegral<uint32_t>();
+ record->setMarkerPosition(marker);
+ record->getMarkerPosition(&marker);
+
+ uint32_t updatePeriod = mFdp.ConsumeIntegral<uint32_t>();
+ record->setPositionUpdatePeriod(updatePeriod);
+ record->getPositionUpdatePeriod(&updatePeriod);
+
+ uint32_t position;
+ record->getPosition(&position);
+
+ ExtendedTimestamp timestamp;
+ record->getTimestamp(×tamp);
+ record->getSessionId();
+ record->getCallerName();
+ android::AudioRecord::Buffer audioBuffer;
+ int32_t waitCount = mFdp.ConsumeIntegral<int32_t>();
+ size_t nonContig = static_cast<size_t>(mFdp.ConsumeIntegral<uint32_t>());
+ audioBuffer.frameCount = static_cast<size_t>(mFdp.ConsumeIntegral<uint32_t>());
+ record->obtainBuffer(&audioBuffer, waitCount, &nonContig);
+ bool blocking = false;
+ record->read(audioBuffer.raw, audioBuffer.size, blocking);
+ record->getInputFramesLost();
+ record->getFlags();
+
+ std::vector<media::MicrophoneInfo> activeMicrophones;
+ record->getActiveMicrophones(&activeMicrophones);
+ record->releaseBuffer(&audioBuffer);
+
+ audio_port_handle_t deviceId =
+ static_cast<audio_port_handle_t>(mFdp.ConsumeIntegral<int32_t>());
+ record->setInputDevice(deviceId);
+ record->getInputDevice();
+ record->getRoutedDeviceId();
+ record->getPortId();
+}
+
+struct EffectClient : public android::media::BnEffectClient {
+ EffectClient() {}
+ binder::Status controlStatusChanged(bool controlGranted __unused) override {
+ return binder::Status::ok();
+ }
+ binder::Status enableStatusChanged(bool enabled __unused) override {
+ return binder::Status::ok();
+ }
+ binder::Status commandExecuted(int32_t cmdCode __unused,
+ const std::vector<uint8_t> &cmdData __unused,
+ const std::vector<uint8_t> &replyData __unused) override {
+ return binder::Status::ok();
+ }
+};
+
+status_t AudioFlingerFuzzer::invokeAudioEffect() {
+ effect_uuid_t type;
+ type.timeLow = mFdp.ConsumeIntegral<uint32_t>();
+ type.timeMid = mFdp.ConsumeIntegral<uint16_t>();
+ type.timeHiAndVersion = mFdp.ConsumeIntegral<uint16_t>();
+ type.clockSeq = mFdp.ConsumeIntegral<uint16_t>();
+ for (int i = 0; i < 6; ++i) {
+ type.node[i] = mFdp.ConsumeIntegral<uint8_t>();
+ }
+
+ effect_descriptor_t descriptor = {};
+ descriptor.type = type;
+ descriptor.uuid = *EFFECT_UUID_NULL;
+
+ sp<EffectClient> effectClient(new EffectClient());
+
+ const int32_t priority = mFdp.ConsumeIntegral<int32_t>();
+ audio_session_t sessionId = static_cast<audio_session_t>(mFdp.ConsumeIntegral<int32_t>());
+ const audio_io_handle_t io = mFdp.ConsumeIntegral<int32_t>();
+ String16 opPackageName = static_cast<String16>(mFdp.ConsumeRandomLengthString().c_str());
+ AudioDeviceTypeAddr device;
+
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (!af) {
+ return NO_ERROR;
+ }
+
+ media::CreateEffectRequest request{};
+ request.desc =
+ VALUE_OR_RETURN_STATUS(legacy2aidl_effect_descriptor_t_EffectDescriptor(descriptor));
+ request.client = effectClient;
+ request.priority = priority;
+ request.output = io;
+ request.sessionId = sessionId;
+ request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(device));
+ request.opPackageName = VALUE_OR_RETURN_STATUS(legacy2aidl_String16_string(opPackageName));
+ request.pid = getpid();
+ request.probe = false;
+
+ media::CreateEffectResponse response{};
+ status_t status = af->createEffect(request, &response);
+
+ if (status != OK) {
+ return NO_ERROR;
+ }
+
+ descriptor =
+ VALUE_OR_RETURN_STATUS(aidl2legacy_EffectDescriptor_effect_descriptor_t(response.desc));
+
+ uint32_t numEffects;
+ af->queryNumberEffects(&numEffects);
+
+ uint32_t queryIndex = mFdp.ConsumeIntegral<uint32_t>();
+ af->queryEffect(queryIndex, &descriptor);
+
+ effect_descriptor_t getDescriptor;
+ uint32_t preferredTypeFlag = mFdp.ConsumeIntegral<int32_t>();
+ af->getEffectDescriptor(&descriptor.uuid, &descriptor.type, preferredTypeFlag, &getDescriptor);
+
+ sessionId = static_cast<audio_session_t>(mFdp.ConsumeIntegral<int32_t>());
+ audio_io_handle_t srcOutput = mFdp.ConsumeIntegral<int32_t>();
+ audio_io_handle_t dstOutput = mFdp.ConsumeIntegral<int32_t>();
+ af->moveEffects(sessionId, srcOutput, dstOutput);
+
+ int effectId = mFdp.ConsumeIntegral<int32_t>();
+ sessionId = static_cast<audio_session_t>(mFdp.ConsumeIntegral<int32_t>());
+ af->setEffectSuspended(effectId, sessionId, mFdp.ConsumeBool());
+ return NO_ERROR;
+}
+
+void AudioFlingerFuzzer::invokeAudioSystem() {
+ AudioSystem::muteMicrophone(mFdp.ConsumeBool());
+ AudioSystem::setMasterMute(mFdp.ConsumeBool());
+ AudioSystem::setMasterVolume(mFdp.ConsumeFloatingPoint<float>());
+ AudioSystem::setMasterBalance(mFdp.ConsumeFloatingPoint<float>());
+ AudioSystem::setVoiceVolume(mFdp.ConsumeFloatingPoint<float>());
+
+ float volume;
+ AudioSystem::getMasterVolume(&volume);
+
+ bool state;
+ AudioSystem::getMasterMute(&state);
+ AudioSystem::isMicrophoneMuted(&state);
+
+ audio_stream_type_t stream = getValue(&mFdp, kStreamtypes);
+ AudioSystem::setStreamMute(getValue(&mFdp, kStreamtypes), mFdp.ConsumeBool());
+
+ stream = getValue(&mFdp, kStreamtypes);
+ AudioSystem::setStreamVolume(stream, mFdp.ConsumeFloatingPoint<float>(),
+ mFdp.ConsumeIntegral<int32_t>());
+
+ audio_mode_t mode = getValue(&mFdp, kModes);
+ AudioSystem::setMode(mode);
+
+ size_t frameCount;
+ stream = getValue(&mFdp, kStreamtypes);
+ AudioSystem::getOutputFrameCount(&frameCount, stream);
+
+ uint32_t latency;
+ stream = getValue(&mFdp, kStreamtypes);
+ AudioSystem::getOutputLatency(&latency, stream);
+
+ stream = getValue(&mFdp, kStreamtypes);
+ AudioSystem::getStreamVolume(stream, &volume, mFdp.ConsumeIntegral<int32_t>());
+
+ stream = getValue(&mFdp, kStreamtypes);
+ AudioSystem::getStreamMute(stream, &state);
+
+ uint32_t samplingRate;
+ AudioSystem::getSamplingRate(mFdp.ConsumeIntegral<int32_t>(), &samplingRate);
+
+ AudioSystem::getFrameCount(mFdp.ConsumeIntegral<int32_t>(), &frameCount);
+ AudioSystem::getLatency(mFdp.ConsumeIntegral<int32_t>(), &latency);
+ AudioSystem::setVoiceVolume(mFdp.ConsumeFloatingPoint<float>());
+
+ uint32_t halFrames;
+ uint32_t dspFrames;
+ AudioSystem::getRenderPosition(mFdp.ConsumeIntegral<int32_t>(), &halFrames, &dspFrames);
+
+ AudioSystem::getInputFramesLost(mFdp.ConsumeIntegral<int32_t>());
+ AudioSystem::getInputFramesLost(mFdp.ConsumeIntegral<int32_t>());
+
+ audio_unique_id_use_t uniqueIdUse = getValue(&mFdp, kUniqueIds);
+ AudioSystem::newAudioUniqueId(uniqueIdUse);
+
+ audio_session_t sessionId = getValue(&mFdp, kSessionId);
+ pid_t pid = mFdp.ConsumeBool() ? getpid() : mFdp.ConsumeIntegral<int32_t>();
+ uid_t uid = mFdp.ConsumeBool() ? getuid() : mFdp.ConsumeIntegral<int32_t>();
+ AudioSystem::acquireAudioSessionId(sessionId, pid, uid);
+
+ pid = mFdp.ConsumeBool() ? getpid() : mFdp.ConsumeIntegral<int32_t>();
+ sessionId = getValue(&mFdp, kSessionId);
+ AudioSystem::releaseAudioSessionId(sessionId, pid);
+
+ sessionId = getValue(&mFdp, kSessionId);
+ AudioSystem::getAudioHwSyncForSession(sessionId);
+
+ AudioSystem::systemReady();
+ AudioSystem::getFrameCountHAL(mFdp.ConsumeIntegral<int32_t>(), &frameCount);
+
+ size_t buffSize;
+ uint32_t sampleRate = mFdp.ConsumeIntegral<uint32_t>();
+ audio_format_t format = getValue(&mFdp, kFormats);
+ audio_channel_mask_t channelMask = getValue(&mFdp, kChannelMasks);
+ AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &buffSize);
+
+ AudioSystem::getPrimaryOutputSamplingRate();
+ AudioSystem::getPrimaryOutputFrameCount();
+ AudioSystem::setLowRamDevice(mFdp.ConsumeBool(), mFdp.ConsumeIntegral<int64_t>());
+
+ std::vector<media::MicrophoneInfo> microphones;
+ AudioSystem::getMicrophones(µphones);
+
+ std::vector<pid_t> pids;
+ pids.insert(pids.begin(), getpid());
+ for (int i = 1; i < mFdp.ConsumeIntegralInRange<int32_t>(2, MAX_ARRAY_LENGTH); ++i) {
+ pids.insert(pids.begin() + i, static_cast<pid_t>(mFdp.ConsumeIntegral<int32_t>()));
+ }
+ AudioSystem::setAudioHalPids(pids);
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (!af) {
+ return;
+ }
+ af->setRecordSilenced(mFdp.ConsumeIntegral<uint32_t>(), mFdp.ConsumeBool());
+
+ float balance = mFdp.ConsumeFloatingPoint<float>();
+ af->getMasterBalance(&balance);
+ af->invalidateStream(static_cast<audio_stream_type_t>(mFdp.ConsumeIntegral<uint32_t>()));
+}
+
+status_t AudioFlingerFuzzer::invokeAudioInputDevice() {
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (!af) {
+ return NO_ERROR;
+ }
+
+ audio_config_t config = {};
+ audio_module_handle_t module = mFdp.ConsumeIntegral<int32_t>();
+ audio_io_handle_t input = mFdp.ConsumeIntegral<int32_t>();
+ config.frame_count = mFdp.ConsumeIntegral<uint32_t>();
+ String8 address = static_cast<String8>(mFdp.ConsumeRandomLengthString().c_str());
+
+ config.channel_mask = getValue(&mFdp, kChannelMasks);
+ config.format = getValue(&mFdp, kFormats);
+
+ config.offload_info = AUDIO_INFO_INITIALIZER;
+ config.offload_info.bit_rate = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.bit_width = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.content_id = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.channel_mask = getValue(&mFdp, kChannelMasks);
+ config.offload_info.duration_us = mFdp.ConsumeIntegral<int64_t>();
+ config.offload_info.encapsulation_mode = getValue(&mFdp, kEncapsulation);
+ config.offload_info.format = getValue(&mFdp, kFormats);
+ config.offload_info.has_video = mFdp.ConsumeBool();
+ config.offload_info.is_streaming = mFdp.ConsumeBool();
+ config.offload_info.sample_rate = (mFdp.ConsumeIntegral<uint32_t>());
+ config.offload_info.sync_id = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.stream_type = getValue(&mFdp, kStreamtypes);
+ config.offload_info.usage = getValue(&mFdp, kUsages);
+
+ config.sample_rate = mFdp.ConsumeIntegral<uint32_t>();
+
+ audio_devices_t device = getValue(&mFdp, kDevices);
+ audio_source_t source = getValue(&mFdp, kInputSources);
+ audio_input_flags_t flags = getValue(&mFdp, kInputFlags);
+
+ AudioDeviceTypeAddr deviceTypeAddr(device, address.c_str());
+
+ media::OpenInputRequest request{};
+ request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
+ request.input = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
+ request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
+ request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
+ request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
+
+ media::OpenInputResponse response{};
+ status_t status = af->openInput(request, &response);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+
+ input = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_module_handle_t(response.input));
+ af->closeInput(input);
+ return NO_ERROR;
+}
+
+status_t AudioFlingerFuzzer::invokeAudioOutputDevice() {
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (!af) {
+ return NO_ERROR;
+ }
+
+ audio_config_t config = {};
+ audio_module_handle_t module = mFdp.ConsumeIntegral<int32_t>();
+ audio_io_handle_t output = mFdp.ConsumeIntegral<int32_t>();
+ config.frame_count = mFdp.ConsumeIntegral<uint32_t>();
+ String8 address = static_cast<String8>(mFdp.ConsumeRandomLengthString().c_str());
+
+ config.channel_mask = getValue(&mFdp, kChannelMasks);
+
+ config.offload_info = AUDIO_INFO_INITIALIZER;
+ config.offload_info.bit_rate = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.bit_width = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.channel_mask = getValue(&mFdp, kChannelMasks);
+ config.offload_info.content_id = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.duration_us = mFdp.ConsumeIntegral<int64_t>();
+ config.offload_info.encapsulation_mode = getValue(&mFdp, kEncapsulation);
+ config.offload_info.format = getValue(&mFdp, kFormats);
+ config.offload_info.has_video = mFdp.ConsumeBool();
+ config.offload_info.is_streaming = mFdp.ConsumeBool();
+ config.offload_info.sample_rate = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.stream_type = getValue(&mFdp, kStreamtypes);
+ config.offload_info.sync_id = mFdp.ConsumeIntegral<uint32_t>();
+ config.offload_info.usage = getValue(&mFdp, kUsages);
+
+ config.format = getValue(&mFdp, kFormats);
+ config.sample_rate = mFdp.ConsumeIntegral<uint32_t>();
+
+ sp<DeviceDescriptorBase> device = new DeviceDescriptorBase(getValue(&mFdp, kDevices));
+ audio_output_flags_t flags = getValue(&mFdp, kOutputFlags);
+
+ media::OpenOutputRequest request{};
+ media::OpenOutputResponse response{};
+
+ request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
+ request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
+ request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
+
+ status_t status = af->openOutput(request, &response);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ output = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.output));
+
+ audio_io_handle_t output1 = mFdp.ConsumeIntegral<int32_t>();
+ af->openDuplicateOutput(output, output1);
+ af->suspendOutput(output);
+ af->restoreOutput(output);
+ af->closeOutput(output);
+ return NO_ERROR;
+}
+
+void AudioFlingerFuzzer::invokeAudioPatch() {
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (!af) {
+ return;
+ }
+ struct audio_patch patch = {};
+ audio_patch_handle_t handle = mFdp.ConsumeIntegral<int32_t>();
+
+ patch.id = mFdp.ConsumeIntegral<int32_t>();
+ patch.num_sources = mFdp.ConsumeIntegral<uint32_t>();
+ patch.num_sinks = mFdp.ConsumeIntegral<uint32_t>();
+
+ for (int i = 0; i < AUDIO_PATCH_PORTS_MAX; ++i) {
+ patch.sources[i].config_mask = mFdp.ConsumeIntegral<uint32_t>();
+ patch.sources[i].channel_mask = getValue(&mFdp, kChannelMasks);
+ patch.sources[i].format = getValue(&mFdp, kFormats);
+ patch.sources[i].gain.channel_mask = getValue(&mFdp, kChannelMasks);
+ patch.sources[i].gain.index = mFdp.ConsumeIntegral<int32_t>();
+ patch.sources[i].gain.mode = getValue(&mFdp, kGainModes);
+ patch.sources[i].gain.ramp_duration_ms = mFdp.ConsumeIntegral<uint32_t>();
+ patch.sources[i].id = static_cast<audio_format_t>(mFdp.ConsumeIntegral<int32_t>());
+ patch.sources[i].role = getValue(&mFdp, kPortRoles);
+ patch.sources[i].sample_rate = mFdp.ConsumeIntegral<uint32_t>();
+ patch.sources[i].type = getValue(&mFdp, kPortTypes);
+
+ patch.sinks[i].config_mask = mFdp.ConsumeIntegral<uint32_t>();
+ patch.sinks[i].channel_mask = getValue(&mFdp, kChannelMasks);
+ patch.sinks[i].format = getValue(&mFdp, kFormats);
+ patch.sinks[i].gain.channel_mask = getValue(&mFdp, kChannelMasks);
+ patch.sinks[i].gain.index = mFdp.ConsumeIntegral<int32_t>();
+ patch.sinks[i].gain.mode = getValue(&mFdp, kGainModes);
+ patch.sinks[i].gain.ramp_duration_ms = mFdp.ConsumeIntegral<uint32_t>();
+ patch.sinks[i].id = static_cast<audio_format_t>(mFdp.ConsumeIntegral<int32_t>());
+ patch.sinks[i].role = getValue(&mFdp, kPortRoles);
+ patch.sinks[i].sample_rate = mFdp.ConsumeIntegral<uint32_t>();
+ patch.sinks[i].type = getValue(&mFdp, kPortTypes);
+ }
+
+ status_t status = af->createAudioPatch(&patch, &handle);
+ if (status != NO_ERROR) {
+ return;
+ }
+
+ unsigned int num_patches = mFdp.ConsumeIntegral<uint32_t>();
+ struct audio_patch patches = {};
+ af->listAudioPatches(&num_patches, &patches);
+ af->releaseAudioPatch(handle);
+}
+
+void AudioFlingerFuzzer::process() {
+ invokeAudioEffect();
+ invokeAudioInputDevice();
+ invokeAudioOutputDevice();
+ invokeAudioPatch();
+ invokeAudioRecord();
+ invokeAudioSystem();
+ invokeAudioTrack();
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ if (size < 1) {
+ return 0;
+ }
+ AudioFlingerFuzzer audioFuzzer(data, size);
+ audioFuzzer.process();
+ return 0;
+}
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index d9a7804..6b1b90a 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -14,11 +14,10 @@
],
required: [
- "libaudiohal@2.0",
"libaudiohal@4.0",
"libaudiohal@5.0",
"libaudiohal@6.0",
-// "libaudiohal@7.0",
+ "libaudiohal@7.0",
],
shared_libs: [
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
index 7228b22..e420d07 100644
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ b/media/libaudiohal/FactoryHalHidl.cpp
@@ -35,7 +35,6 @@
"6.0",
"5.0",
"4.0",
- "2.0",
nullptr
};
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index 94e12b8..8975cc3 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -54,29 +54,15 @@
}
cc_library_shared {
- name: "libaudiohal@2.0",
- defaults: ["libaudiohal_default"],
- shared_libs: [
- "android.hardware.audio.common@2.0",
- "android.hardware.audio.common@2.0-util",
- "android.hardware.audio.effect@2.0",
- "android.hardware.audio@2.0",
- ],
- cflags: [
- "-DMAJOR_VERSION=2",
- "-DMINOR_VERSION=0",
- "-include common/all-versions/VersionMacro.h",
- ]
-}
-
-cc_library_shared {
name: "libaudiohal@4.0",
defaults: ["libaudiohal_default"],
shared_libs: [
"android.hardware.audio.common@4.0",
"android.hardware.audio.common@4.0-util",
"android.hardware.audio.effect@4.0",
+ "android.hardware.audio.effect@4.0-util",
"android.hardware.audio@4.0",
+ "android.hardware.audio@4.0-util",
],
cflags: [
"-DMAJOR_VERSION=4",
@@ -92,7 +78,9 @@
"android.hardware.audio.common@5.0",
"android.hardware.audio.common@5.0-util",
"android.hardware.audio.effect@5.0",
+ "android.hardware.audio.effect@5.0-util",
"android.hardware.audio@5.0",
+ "android.hardware.audio@5.0-util",
],
cflags: [
"-DMAJOR_VERSION=5",
@@ -108,7 +96,9 @@
"android.hardware.audio.common@6.0",
"android.hardware.audio.common@6.0-util",
"android.hardware.audio.effect@6.0",
+ "android.hardware.audio.effect@6.0-util",
"android.hardware.audio@6.0",
+ "android.hardware.audio@6.0-util",
],
cflags: [
"-DMAJOR_VERSION=6",
@@ -118,14 +108,15 @@
}
cc_library_shared {
- enabled: false,
name: "libaudiohal@7.0",
defaults: ["libaudiohal_default"],
shared_libs: [
"android.hardware.audio.common@7.0",
"android.hardware.audio.common@7.0-util",
"android.hardware.audio.effect@7.0",
+ "android.hardware.audio.effect@7.0-util",
"android.hardware.audio@7.0",
+ "android.hardware.audio@7.0-util",
],
cflags: [
"-DMAJOR_VERSION=7",
@@ -133,4 +124,3 @@
"-include common/all-versions/VersionMacro.h",
]
}
-
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index 2927936..32eaa31 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -120,129 +120,5 @@
ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
}
-#if MAJOR_VERSION >= 4
-// TODO: Use the same implementation in the hal when it moves to a util library.
-static std::string deviceAddressToHal(const DeviceAddress& address) {
- // HAL assumes that the address is NUL-terminated.
- char halAddress[AUDIO_DEVICE_MAX_ADDRESS_LEN];
- memset(halAddress, 0, sizeof(halAddress));
- audio_devices_t halDevice = static_cast<audio_devices_t>(address.device);
- if (getAudioDeviceOutAllA2dpSet().count(halDevice) > 0 ||
- halDevice == AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
- snprintf(halAddress, sizeof(halAddress), "%02X:%02X:%02X:%02X:%02X:%02X",
- address.address.mac[0], address.address.mac[1], address.address.mac[2],
- address.address.mac[3], address.address.mac[4], address.address.mac[5]);
- } else if (halDevice == AUDIO_DEVICE_OUT_IP || halDevice == AUDIO_DEVICE_IN_IP) {
- snprintf(halAddress, sizeof(halAddress), "%d.%d.%d.%d", address.address.ipv4[0],
- address.address.ipv4[1], address.address.ipv4[2], address.address.ipv4[3]);
- } else if (getAudioDeviceOutAllUsbSet().count(halDevice) > 0 ||
- getAudioDeviceInAllUsbSet().count(halDevice) > 0) {
- snprintf(halAddress, sizeof(halAddress), "card=%d;device=%d", address.address.alsa.card,
- address.address.alsa.device);
- } else if (halDevice == AUDIO_DEVICE_OUT_BUS || halDevice == AUDIO_DEVICE_IN_BUS) {
- snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
- } else if (halDevice == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ||
- halDevice == AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
- snprintf(halAddress, sizeof(halAddress), "%s", address.rSubmixAddress.c_str());
- } else {
- snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
- }
- return halAddress;
-}
-
-//local conversion helpers
-
-static audio_microphone_channel_mapping_t channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
- switch (mapping) {
- case AudioMicrophoneChannelMapping::UNUSED:
- return AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
- case AudioMicrophoneChannelMapping::DIRECT:
- return AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT;
- case AudioMicrophoneChannelMapping::PROCESSED:
- return AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED;
- default:
- LOG_ALWAYS_FATAL("Unknown channelMappingToHal conversion %d", mapping);
- }
-}
-
-static audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
- switch (location) {
- case AudioMicrophoneLocation::UNKNOWN:
- return AUDIO_MICROPHONE_LOCATION_UNKNOWN;
- case AudioMicrophoneLocation::MAINBODY:
- return AUDIO_MICROPHONE_LOCATION_MAINBODY;
- case AudioMicrophoneLocation::MAINBODY_MOVABLE:
- return AUDIO_MICROPHONE_LOCATION_MAINBODY_MOVABLE;
- case AudioMicrophoneLocation::PERIPHERAL:
- return AUDIO_MICROPHONE_LOCATION_PERIPHERAL;
- default:
- LOG_ALWAYS_FATAL("Unknown locationToHal conversion %d", location);
- }
-}
-static audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
- switch (dir) {
- case AudioMicrophoneDirectionality::UNKNOWN:
- return AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
- case AudioMicrophoneDirectionality::OMNI:
- return AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
- case AudioMicrophoneDirectionality::BI_DIRECTIONAL:
- return AUDIO_MICROPHONE_DIRECTIONALITY_BI_DIRECTIONAL;
- case AudioMicrophoneDirectionality::CARDIOID:
- return AUDIO_MICROPHONE_DIRECTIONALITY_CARDIOID;
- case AudioMicrophoneDirectionality::HYPER_CARDIOID:
- return AUDIO_MICROPHONE_DIRECTIONALITY_HYPER_CARDIOID;
- case AudioMicrophoneDirectionality::SUPER_CARDIOID:
- return AUDIO_MICROPHONE_DIRECTIONALITY_SUPER_CARDIOID;
- default:
- LOG_ALWAYS_FATAL("Unknown directionalityToHal conversion %d", dir);
- }
-}
-
-void microphoneInfoToHal(const MicrophoneInfo& src,
- audio_microphone_characteristic_t *pDst) {
- if (pDst != NULL) {
- snprintf(pDst->device_id, sizeof(pDst->device_id),
- "%s", src.deviceId.c_str());
- pDst->device = static_cast<audio_devices_t>(src.deviceAddress.device);
- snprintf(pDst->address, sizeof(pDst->address),
- "%s", deviceAddressToHal(src.deviceAddress).c_str());
- if (src.channelMapping.size() > AUDIO_CHANNEL_COUNT_MAX) {
- ALOGW("microphoneInfoToStruct found %zu channelMapping elements. Max expected is %d",
- src.channelMapping.size(), AUDIO_CHANNEL_COUNT_MAX);
- }
- size_t ch;
- for (ch = 0; ch < src.channelMapping.size() && ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
- pDst->channel_mapping[ch] = channelMappingToHal(src.channelMapping[ch]);
- }
- for (; ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
- pDst->channel_mapping[ch] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
- }
- pDst->location = locationToHal(src.location);
- pDst->group = (audio_microphone_group_t)src.group;
- pDst->index_in_the_group = (unsigned int)src.indexInTheGroup;
- pDst->sensitivity = src.sensitivity;
- pDst->max_spl = src.maxSpl;
- pDst->min_spl = src.minSpl;
- pDst->directionality = directionalityToHal(src.directionality);
- pDst->num_frequency_responses = (unsigned int)src.frequencyResponse.size();
- if (pDst->num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
- ALOGW("microphoneInfoToStruct found %d frequency responses. Max expected is %d",
- pDst->num_frequency_responses, AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES);
- pDst->num_frequency_responses = AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES;
- }
- for (size_t k = 0; k < pDst->num_frequency_responses; k++) {
- pDst->frequency_responses[0][k] = src.frequencyResponse[k].frequency;
- pDst->frequency_responses[1][k] = src.frequencyResponse[k].level;
- }
- pDst->geometric_location.x = src.position.x;
- pDst->geometric_location.y = src.position.y;
- pDst->geometric_location.z = src.position.z;
- pDst->orientation.x = src.orientation.x;
- pDst->orientation.y = src.orientation.y;
- pDst->orientation.z = src.orientation.z;
- }
-}
-#endif
-
} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
index fb3bb9d..59122c7 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -82,12 +82,6 @@
void emitError(const char* funcName, const char* description);
};
-#if MAJOR_VERSION >= 4
-using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
-void microphoneInfoToHal(const MicrophoneInfo& src,
- audio_microphone_characteristic_t *pDst);
-#endif
-
} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 0108816..a8fbe58 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -19,22 +19,24 @@
#define LOG_TAG "DeviceHalHidl"
//#define LOG_NDEBUG 0
-#include PATH(android/hardware/audio/FILE_VERSION/IPrimaryDevice.h)
#include <cutils/native_handle.h>
#include <hwbinder/IPCThreadState.h>
#include <media/AudioContainers.h>
#include <utils/Log.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IPrimaryDevice.h)
+#include <HidlUtils.h>
#include <common/all-versions/VersionUtils.h>
+#include <util/CoreUtils.h>
#include "DeviceHalHidl.h"
#include "EffectHalHidl.h"
-#include "HidlUtils.h"
+#include "ParameterUtils.h"
#include "StreamHalHidl.h"
-#include "VersionUtils.h"
using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::audio::common::utils::EnumBitfield;
+using ::android::hardware::audio::CPP_VERSION::implementation::CoreUtils;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
@@ -46,50 +48,6 @@
using EffectHalHidl = ::android::effect::CPP_VERSION::EffectHalHidl;
-namespace {
-
-using ::android::hardware::audio::common::CPP_VERSION::AudioPort;
-using ::android::hardware::audio::common::CPP_VERSION::AudioPortConfig;
-
-status_t deviceAddressFromHal(
- audio_devices_t device, const char* halAddress, DeviceAddress* address) {
- address->device = AudioDevice(device);
-
- if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
- return OK;
- }
- if (getAudioDeviceOutAllA2dpSet().count(device) > 0
- || device == AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
- int status = sscanf(halAddress,
- "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
- &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
- &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
- return status == 6 ? OK : BAD_VALUE;
- } else if (device == AUDIO_DEVICE_OUT_IP || device == AUDIO_DEVICE_IN_IP) {
- int status = sscanf(halAddress,
- "%hhu.%hhu.%hhu.%hhu",
- &address->address.ipv4[0], &address->address.ipv4[1],
- &address->address.ipv4[2], &address->address.ipv4[3]);
- return status == 4 ? OK : BAD_VALUE;
- } else if (getAudioDeviceOutAllUsbSet().count(device) > 0
- || getAudioDeviceInAllUsbSet().count(device) > 0) {
- int status = sscanf(halAddress,
- "card=%d;device=%d",
- &address->address.alsa.card, &address->address.alsa.device);
- return status == 2 ? OK : BAD_VALUE;
- } else if (device == AUDIO_DEVICE_OUT_BUS || device == AUDIO_DEVICE_IN_BUS) {
- address->busAddress = halAddress;
- return OK;
- } else if (device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX
- || device == AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
- address->rSubmixAddress = halAddress;
- return OK;
- }
- return OK;
-}
-
-} // namespace
-
DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
: ConversionHelperHidl("Device"), mDevice(device),
mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
@@ -237,16 +195,22 @@
sp<StreamOutHalInterface> *outStream) {
if (mDevice == 0) return NO_INIT;
DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(deviceType, address, &hidlDevice);
- if (status != OK) return status;
+ if (status_t status = CoreUtils::deviceAddressFromHal(deviceType, address, &hidlDevice);
+ status != OK) {
+ return status;
+ }
AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, false /*isInput*/, &hidlConfig);
+ if (status_t status = HidlUtils::audioConfigFromHal(*config, false /*isInput*/, &hidlConfig);
+ status != OK) {
+ return status;
+ }
+ CoreUtils::AudioOutputFlags hidlFlags;
+ if (status_t status = CoreUtils::audioOutputFlagsFromHal(flags, &hidlFlags); status != OK) {
+ return status;
+ }
Result retval = Result::NOT_INITIALIZED;
Return<void> ret = mDevice->openOutputStream(
- handle,
- hidlDevice,
- hidlConfig,
- EnumBitfield<AudioOutputFlag>(flags),
+ handle, hidlDevice, hidlConfig, hidlFlags,
#if MAJOR_VERSION >= 4
{} /* metadata */,
#endif
@@ -272,17 +236,30 @@
sp<StreamInHalInterface> *inStream) {
if (mDevice == 0) return NO_INIT;
DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
- if (status != OK) return status;
+ if (status_t status = CoreUtils::deviceAddressFromHal(devices, address, &hidlDevice);
+ status != OK) {
+ return status;
+ }
AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, true /*isInput*/, &hidlConfig);
+ if (status_t status = HidlUtils::audioConfigFromHal(*config, true /*isInput*/, &hidlConfig);
+ status != OK) {
+ return status;
+ }
+ CoreUtils::AudioInputFlags hidlFlags;
+ if (status_t status = CoreUtils::audioInputFlagsFromHal(flags, &hidlFlags); status != OK) {
+ return status;
+ }
Result retval = Result::NOT_INITIALIZED;
#if MAJOR_VERSION == 2
auto sinkMetadata = AudioSource(source);
#elif MAJOR_VERSION >= 4
// TODO: correctly propagate the tracks sources and volume
// for now, only send the main source at 1dbfs
- SinkMetadata sinkMetadata = {{{ .source = AudioSource(source), .gain = 1 }}};
+ AudioSource hidlSource;
+ if (status_t status = HidlUtils::audioSourceFromHal(source, &hidlSource); status != OK) {
+ return status;
+ }
+ SinkMetadata sinkMetadata = {{{ .source = std::move(hidlSource), .gain = 1 }}};
#endif
#if MAJOR_VERSION < 5
(void)outputDevice;
@@ -290,8 +267,10 @@
#else
if (outputDevice != AUDIO_DEVICE_NONE) {
DeviceAddress hidlOutputDevice;
- status = deviceAddressFromHal(outputDevice, outputDeviceAddress, &hidlOutputDevice);
- if (status != OK) return status;
+ if (status_t status = CoreUtils::deviceAddressFromHal(
+ outputDevice, outputDeviceAddress, &hidlOutputDevice); status != OK) {
+ return status;
+ }
sinkMetadata.tracks[0].destination.device(std::move(hidlOutputDevice));
}
#endif
@@ -300,11 +279,7 @@
flags = static_cast<audio_input_flags_t>(flags & ~AUDIO_INPUT_FLAG_DIRECT);
#endif
Return<void> ret = mDevice->openInputStream(
- handle,
- hidlDevice,
- hidlConfig,
- EnumBitfield<AudioInputFlag>(flags),
- sinkMetadata,
+ handle, hidlDevice, hidlConfig, hidlFlags, sinkMetadata,
[&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
@@ -441,7 +416,7 @@
for (size_t k = 0; k < micArrayHal.size(); k++) {
audio_microphone_characteristic_t dst;
//convert
- microphoneInfoToHal(micArrayHal[k], &dst);
+ (void)CoreUtils::microphoneInfoToHal(micArrayHal[k], &dst);
media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
microphonesInfo->push_back(microphone);
}
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 506feb8..c589a48 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -23,12 +23,13 @@
#include <media/EffectsFactoryApi.h>
#include <utils/Log.h>
+#include <util/EffectUtils.h>
+
#include "EffectBufferHalHidl.h"
#include "EffectHalHidl.h"
-#include "UuidUtils.h"
-using ::android::hardware::audio::common::CPP_VERSION::implementation::UuidUtils;
using ::android::hardware::audio::common::utils::EnumBitfield;
+using ::android::hardware::audio::effect::CPP_VERSION::implementation::EffectUtils;
using ::android::hardware::hidl_vec;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
@@ -42,6 +43,10 @@
EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
: mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
+ effect_descriptor_t halDescriptor{};
+ if (EffectHalHidl::getDescriptor(&halDescriptor) == NO_ERROR) {
+ mIsInput = (halDescriptor.flags & EFFECT_FLAG_TYPE_PRE_PROC) == EFFECT_FLAG_TYPE_PRE_PROC;
+ }
}
EffectHalHidl::~EffectHalHidl() {
@@ -56,59 +61,6 @@
}
// static
-void EffectHalHidl::effectDescriptorToHal(
- const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
- UuidUtils::uuidToHal(descriptor.type, &halDescriptor->type);
- UuidUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
- halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
- halDescriptor->cpuLoad = descriptor.cpuLoad;
- halDescriptor->memoryUsage = descriptor.memoryUsage;
- memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
- memcpy(halDescriptor->implementor,
- descriptor.implementor.data(), descriptor.implementor.size());
-}
-
-// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
-// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
-
-// static
-void EffectHalHidl::effectBufferConfigFromHal(
- const buffer_config_t& halConfig, EffectBufferConfig* config) {
- config->samplingRateHz = halConfig.samplingRate;
- config->channels = EnumBitfield<AudioChannelMask>(halConfig.channels);
- config->format = AudioFormat(halConfig.format);
- config->accessMode = EffectBufferAccess(halConfig.accessMode);
- config->mask = EnumBitfield<EffectConfigParameters>(halConfig.mask);
-}
-
-// static
-void EffectHalHidl::effectBufferConfigToHal(
- const EffectBufferConfig& config, buffer_config_t* halConfig) {
- halConfig->buffer.frameCount = 0;
- halConfig->buffer.raw = NULL;
- halConfig->samplingRate = config.samplingRateHz;
- halConfig->channels = static_cast<uint32_t>(config.channels);
- halConfig->bufferProvider.cookie = NULL;
- halConfig->bufferProvider.getBuffer = NULL;
- halConfig->bufferProvider.releaseBuffer = NULL;
- halConfig->format = static_cast<uint8_t>(config.format);
- halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
- halConfig->mask = static_cast<uint8_t>(config.mask);
-}
-
-// static
-void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
- effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
- effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
-}
-
-// static
-void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
- effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
- effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
-}
-
-// static
status_t EffectHalHidl::analyzeResult(const Result& result) {
switch (result) {
case Result::OK: return OK;
@@ -269,7 +221,7 @@
[&](Result r, const EffectDescriptor& result) {
retval = r;
if (retval == Result::OK) {
- effectDescriptorToHal(result, pDescriptor);
+ EffectUtils::effectDescriptorToHal(result, pDescriptor);
}
});
return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
@@ -301,14 +253,16 @@
ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
result = analyzeResult(r);
if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ EffectUtils::effectConfigToHal(
+ hidlConfig, static_cast<effect_config_t*>(pReplyData));
}
});
} else {
ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
result = analyzeResult(r);
if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ EffectUtils::effectConfigToHal(
+ hidlConfig, static_cast<effect_config_t*>(pReplyData));
}
});
}
@@ -332,7 +286,7 @@
ALOGE("Buffer provider callbacks are not supported");
}
EffectConfig hidlConfig;
- effectConfigFromHal(*halConfig, &hidlConfig);
+ EffectUtils::effectConfigFromHal(*halConfig, mIsInput, &hidlConfig);
Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
mEffect->setConfig(hidlConfig, nullptr, nullptr) :
mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index 1f238c0..8e46638 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -65,9 +65,6 @@
uint64_t effectId() const { return mEffectId; }
- static void effectDescriptorToHal(
- const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor);
-
private:
friend class EffectsFactoryHalHidl;
typedef MessageQueue<Result, hardware::kSynchronizedReadWrite> StatusMQ;
@@ -79,14 +76,9 @@
bool mBuffersChanged;
std::unique_ptr<StatusMQ> mStatusMQ;
EventFlag* mEfGroup;
+ bool mIsInput = false;
static status_t analyzeResult(const Result& result);
- static void effectBufferConfigFromHal(
- const buffer_config_t& halConfig, EffectBufferConfig* config);
- static void effectBufferConfigToHal(
- const EffectBufferConfig& config, buffer_config_t* halConfig);
- static void effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config);
- static void effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig);
// Can not be constructed directly by clients.
EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 35ac332..9c4363c 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -19,13 +19,16 @@
#include <cutils/native_handle.h>
+#include <UuidUtils.h>
+#include <util/EffectUtils.h>
+
#include "ConversionHelperHidl.h"
#include "EffectBufferHalHidl.h"
#include "EffectHalHidl.h"
#include "EffectsFactoryHalHidl.h"
-#include "UuidUtils.h"
using ::android::hardware::audio::common::CPP_VERSION::implementation::UuidUtils;
+using ::android::hardware::audio::effect::CPP_VERSION::implementation::EffectUtils;
using ::android::hardware::Return;
namespace android {
@@ -76,7 +79,7 @@
if (queryResult != OK) return queryResult;
}
if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
- EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
+ EffectUtils::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
return OK;
}
@@ -91,7 +94,7 @@
[&](Result r, const EffectDescriptor& result) {
retval = r;
if (retval == Result::OK) {
- EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
+ EffectUtils::effectDescriptorToHal(result, pDescriptor);
}
});
if (ret.isOk()) {
diff --git a/media/libaudiohal/impl/VersionUtils.h b/media/libaudiohal/impl/ParameterUtils.h
similarity index 91%
rename from media/libaudiohal/impl/VersionUtils.h
rename to media/libaudiohal/impl/ParameterUtils.h
index eb0a42a..9cab72e 100644
--- a/media/libaudiohal/impl/VersionUtils.h
+++ b/media/libaudiohal/impl/ParameterUtils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,8 +14,7 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_VERSION_UTILS_H
-#define ANDROID_HARDWARE_VERSION_UTILS_H
+#pragma once
#include PATH(android/hardware/audio/FILE_VERSION/types.h)
#include <hidl/HidlSupport.h>
@@ -59,5 +58,3 @@
} // namespace utils
} // namespace CPP_VERSION
} // namespace android
-
-#endif // ANDROID_HARDWARE_VERSION_UTILS_H
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 097bd12..2a3e2b6 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -17,18 +17,23 @@
#define LOG_TAG "StreamHalHidl"
//#define LOG_NDEBUG 0
-#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutCallback.h)
+#include <android/hidl/manager/1.0/IServiceManager.h>
#include <hwbinder/IPCThreadState.h>
#include <media/AudioParameter.h>
#include <mediautils/SchedulingPolicyService.h>
#include <utils/Log.h>
+#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutCallback.h)
+#include <HidlUtils.h>
+#include <util/CoreUtils.h>
+
#include "DeviceHalHidl.h"
#include "EffectHalHidl.h"
-#include "HidlUtils.h"
+#include "ParameterUtils.h"
#include "StreamHalHidl.h"
-#include "VersionUtils.h"
+using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::CPP_VERSION::implementation::CoreUtils;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
@@ -50,14 +55,11 @@
// Instrument audio signal power logging.
// Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr /* && mStreamPowerLog.isUserDebugOrEngBuild() */) {
- // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
- Return<void> ret = mStream->getAudioProperties(
- [&](auto sr, auto m, auto f) {
- mStreamPowerLog.init(sr,
- static_cast<audio_channel_mask_t>(m),
- static_cast<audio_format_t>(f));
- });
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ if (/* mStreamPowerLog.isUserDebugOrEngBuild() && */
+ StreamHalHidl::getAudioProperties(
+ &config.sample_rate, &config.channel_mask, &config.format) == NO_ERROR) {
+ mStreamPowerLog.init(config.sample_rate, config.channel_mask, config.format);
}
}
@@ -67,9 +69,12 @@
hardware::IPCThreadState::self()->flushCommands();
}
+// Note: this method will be removed
status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
- if (!mStream) return NO_INIT;
- return processReturn("getSampleRate", mStream->getSampleRate(), rate);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
+ *rate = config.sample_rate;
+ return status;
}
status_t StreamHalHidl::getBufferSize(size_t *size) {
@@ -81,19 +86,26 @@
return status;
}
+// Note: this method will be removed
status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
- if (!mStream) return NO_INIT;
- return processReturn("getChannelMask", mStream->getChannelMask(), mask);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
+ *mask = config.channel_mask;
+ return status;
}
+// Note: this method will be removed
status_t StreamHalHidl::getFormat(audio_format_t *format) {
- if (!mStream) return NO_INIT;
- return processReturn("getFormat", mStream->getFormat(), format);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ status_t status = getAudioProperties(&config.sample_rate, &config.channel_mask, &config.format);
+ *format = config.format;
+ return status;
}
status_t StreamHalHidl::getAudioProperties(
uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
if (!mStream) return NO_INIT;
+#if MAJOR_VERSION <= 6
Return<void> ret = mStream->getAudioProperties(
[&](uint32_t sr, auto m, auto f) {
*sampleRate = sr;
@@ -101,6 +113,26 @@
*format = static_cast<audio_format_t>(f);
});
return processReturn("getAudioProperties", ret);
+#else
+ Result retval;
+ status_t conversionStatus = BAD_VALUE;
+ audio_config_base_t halConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ Return<void> ret = mStream->getAudioProperties(
+ [&](Result r, const AudioConfigBase& config) {
+ retval = r;
+ if (retval == Result::OK) {
+ conversionStatus = HidlUtils::audioConfigBaseToHal(config, &halConfig);
+ }
+ });
+ if (status_t status = processReturn("getAudioProperties", ret, retval); status == NO_ERROR) {
+ *sampleRate = halConfig.sample_rate;
+ *mask = halConfig.channel_mask;
+ *format = halConfig.format;
+ return conversionStatus;
+ } else {
+ return status;
+ }
+#endif
}
status_t StreamHalHidl::setParameters(const String8& kvPairs) {
@@ -228,6 +260,24 @@
return getBufferSize(size);
}
+status_t StreamHalHidl::getHalPid(pid_t *pid) {
+ using ::android::hidl::base::V1_0::DebugInfo;
+ using ::android::hidl::manager::V1_0::IServiceManager;
+
+ DebugInfo debugInfo;
+ auto ret = mStream->getDebugInfo([&] (const auto &info) {
+ debugInfo = info;
+ });
+ if (!ret.isOk()) {
+ return INVALID_OPERATION;
+ }
+ if (debugInfo.pid != (int)IServiceManager::PidConstant::NO_PID) {
+ *pid = debugInfo.pid;
+ return NO_ERROR;
+ }
+ return NAME_NOT_FOUND;
+}
+
bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
return true;
@@ -454,7 +504,7 @@
const CommandMQ::Descriptor& commandMQ,
const DataMQ::Descriptor& dataMQ,
const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
+ const auto& halThreadInfo) {
retval = r;
if (retval == Result::OK) {
tempCommandMQ.reset(new CommandMQ(commandMQ));
@@ -463,8 +513,12 @@
if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
}
+#if MAJOR_VERSION <= 6
halThreadPid = halThreadInfo.pid;
halThreadTid = halThreadInfo.tid;
+#else
+ halThreadTid = halThreadInfo;
+#endif
}
});
if (!ret.isOk() || retval != Result::OK) {
@@ -485,6 +539,11 @@
ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
return NO_INIT;
}
+#if MAJOR_VERSION >= 7
+ if (status_t status = getHalPid(&halThreadPid); status != NO_ERROR) {
+ return status;
+ }
+#endif
requestHalThreadPriority(halThreadPid, halThreadTid);
mCommandMQ = std::move(tempCommandMQ);
@@ -598,40 +657,15 @@
return INVALID_OPERATION;
}
#elif MAJOR_VERSION >= 4
-/** Transform a standard collection to an HIDL vector. */
-template <class Values, class ElementConverter>
-static auto transformToHidlVec(const Values& values, ElementConverter converter) {
- hidl_vec<decltype(converter(*values.begin()))> result{values.size()};
- using namespace std;
- transform(begin(values), end(values), begin(result), converter);
- return result;
-}
-
status_t StreamOutHalHidl::updateSourceMetadata(
const StreamOutHalInterface::SourceMetadata& sourceMetadata) {
- CPP_VERSION::SourceMetadata halMetadata = {
- .tracks = transformToHidlVec(sourceMetadata.tracks,
- [](const playback_track_metadata_v7& metadata) -> PlaybackTrackMetadata {
- PlaybackTrackMetadata halTrackMetadata = {
- .usage=static_cast<AudioUsage>(metadata.base.usage),
- .contentType=static_cast<AudioContentType>(metadata.base.content_type),
- .gain=metadata.base.gain,
- };
-#if MAJOR_VERSION >= 7
- HidlUtils::audioChannelMaskFromHal(metadata.channel_mask, false /*isInput*/,
- &halTrackMetadata.channelMask);
-
- std::istringstream tags{metadata.tags};
- std::string tag;
- while (std::getline(tags, tag, HidlUtils::sAudioTagSeparator)) {
- if (!tag.empty()) {
- halTrackMetadata.tags.push_back(tag);
- }
- }
-#endif
- return halTrackMetadata;
- })};
- return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(halMetadata));
+ CPP_VERSION::SourceMetadata hidlMetadata;
+ if (status_t status = CoreUtils::sourceMetadataFromHalV7(
+ sourceMetadata.tracks, true /*ignoreNonVendorTags*/, &hidlMetadata);
+ status != OK) {
+ return status;
+ }
+ return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(hidlMetadata));
}
#endif
@@ -902,7 +936,7 @@
const CommandMQ::Descriptor& commandMQ,
const DataMQ::Descriptor& dataMQ,
const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
+ const auto& halThreadInfo) {
retval = r;
if (retval == Result::OK) {
tempCommandMQ.reset(new CommandMQ(commandMQ));
@@ -911,8 +945,12 @@
if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
}
+#if MAJOR_VERSION <= 6
halThreadPid = halThreadInfo.pid;
halThreadTid = halThreadInfo.tid;
+#else
+ halThreadTid = halThreadInfo;
+#endif
}
});
if (!ret.isOk() || retval != Result::OK) {
@@ -933,6 +971,11 @@
ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
return NO_INIT;
}
+#if MAJOR_VERSION >= 7
+ if (status_t status = getHalPid(&halThreadPid); status != NO_ERROR) {
+ return status;
+ }
+#endif
requestHalThreadPriority(halThreadPid, halThreadTid);
mCommandMQ = std::move(tempCommandMQ);
@@ -995,7 +1038,7 @@
for (size_t k = 0; k < micArrayHal.size(); k++) {
audio_microphone_characteristic_t dst;
// convert
- microphoneInfoToHal(micArrayHal[k], &dst);
+ (void)CoreUtils::microphoneInfoToHal(micArrayHal[k], &dst);
media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
microphonesInfo->push_back(microphone);
}
@@ -1005,27 +1048,13 @@
status_t StreamInHalHidl::updateSinkMetadata(const
StreamInHalInterface::SinkMetadata& sinkMetadata) {
- CPP_VERSION::SinkMetadata halMetadata = {
- .tracks = transformToHidlVec(sinkMetadata.tracks,
- [](const record_track_metadata_v7& metadata) -> RecordTrackMetadata {
- RecordTrackMetadata halTrackMetadata = {
- .source=static_cast<AudioSource>(metadata.base.source),
- .gain=metadata.base.gain,
- };
-#if MAJOR_VERSION >= 7
- HidlUtils::audioChannelMaskFromHal(metadata.channel_mask, true /*isInput*/,
- &halTrackMetadata.channelMask);
- std::istringstream tags{metadata.tags};
- std::string tag;
- while (std::getline(tags, tag, HidlUtils::sAudioTagSeparator)) {
- if (!tag.empty()) {
- halTrackMetadata.tags.push_back(tag);
- }
- }
-#endif
- return halTrackMetadata;
- })};
- return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(halMetadata));
+ CPP_VERSION::SinkMetadata hidlMetadata;
+ if (status_t status = CoreUtils::sinkMetadataFromHalV7(
+ sinkMetadata.tracks, true /*ignoreNonVendorTags*/, &hidlMetadata);
+ status != OK) {
+ return status;
+ }
+ return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(hidlMetadata));
}
#endif
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 7dfc78f..c6db6d6 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -105,6 +105,8 @@
status_t getCachedBufferSize(size_t *size);
+ status_t getHalPid(pid_t *pid);
+
bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
// mStreamPowerLog is used for audio signal power logging.
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index a3f2fb4..e89b288 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -22,8 +22,8 @@
#include <utils/Log.h>
#include "DeviceHalLocal.h"
+#include "ParameterUtils.h"
#include "StreamHalLocal.h"
-#include "VersionUtils.h"
namespace android {
namespace CPP_VERSION {
@@ -258,7 +258,7 @@
#if MAJOR_VERSION >= 7
void StreamOutHalLocal::doUpdateSourceMetadataV7(const SourceMetadata& sourceMetadata) {
- const source_metadata_t metadata {
+ const source_metadata_v7_t metadata {
.track_count = sourceMetadata.tracks.size(),
// const cast is fine as it is in a const structure
.tracks = const_cast<playback_track_metadata_v7*>(sourceMetadata.tracks.data()),
@@ -274,7 +274,7 @@
}
doUpdateSourceMetadata(sourceMetadata);
#else
- if (mDevice->version() < AUDIO_DEVICE_API_VERSION_3_2)
+ if (mDevice->version() < AUDIO_DEVICE_API_VERSION_3_2) {
if (mStream->update_source_metadata == nullptr) {
return INVALID_OPERATION;
}
@@ -446,13 +446,12 @@
status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
#if MAJOR_VERSION < 7
-
if (mStream->update_sink_metadata == nullptr) {
return INVALID_OPERATION; // not supported by the HAL
}
doUpdateSinkMetadata(sinkMetadata);
#else
- if (mDevice->version() < AUDIO_DEVICE_API_VERSION_3_2)
+ if (mDevice->version() < AUDIO_DEVICE_API_VERSION_3_2) {
if (mStream->update_sink_metadata == nullptr) {
return INVALID_OPERATION; // not supported by the HAL
}
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 097e9a2..b47f536 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -31,18 +31,22 @@
class StreamHalInterface : public virtual RefBase
{
public:
+ // TODO(mnaganov): Remove
// Return the sampling rate in Hz - eg. 44100.
virtual status_t getSampleRate(uint32_t *rate) = 0;
// Return size of input/output buffer in bytes for this stream - eg. 4800.
virtual status_t getBufferSize(size_t *size) = 0;
+ // TODO(mnaganov): Remove
// Return the channel mask.
virtual status_t getChannelMask(audio_channel_mask_t *mask) = 0;
+ // TODO(mnaganov): Remove
// Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
virtual status_t getFormat(audio_format_t *format) = 0;
+ // TODO(mnaganov): Change to use audio_config_base_t
// Convenience method.
virtual status_t getAudioProperties(
uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) = 0;
diff --git a/media/libmediahelper/tests/typeconverter_tests.cpp b/media/libmediahelper/tests/typeconverter_tests.cpp
index d7bfb89..181d636 100644
--- a/media/libmediahelper/tests/typeconverter_tests.cpp
+++ b/media/libmediahelper/tests/typeconverter_tests.cpp
@@ -182,8 +182,9 @@
audio_format_t format;
EXPECT_TRUE(FormatConverter::fromString(stringVal, format))
<< "Conversion of \"" << stringVal << "\" failed";
- EXPECT_TRUE(audio_is_valid_format(format))
- << "Converted format \"" << stringVal << "\" is invalid";
+ EXPECT_EQ(enumVal != xsd::AudioFormat::AUDIO_FORMAT_DEFAULT,
+ audio_is_valid_format(format))
+ << "Validity of \"" << stringVal << "\" is not as expected";
EXPECT_EQ(stringVal, toString(format));
}
}
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index c2e1dc9..3dfd850 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -7,7 +7,6 @@
name: "libmediametrics",
srcs: [
- "IMediaMetricsService.cpp",
"MediaMetricsItem.cpp",
"MediaMetrics.cpp",
],
@@ -17,6 +16,7 @@
"libcutils",
"liblog",
"libutils",
+ "mediametricsservice-aidl-unstable-cpp",
],
export_include_dirs: ["include"],
@@ -58,3 +58,21 @@
"//frameworks/base/media/jni",
],
}
+
+aidl_interface {
+ name: "mediametricsservice-aidl",
+ unstable: true,
+ local_include_dir: "aidl",
+ vendor_available: true,
+ srcs: [
+ "aidl/android/media/IMediaMetricsService.aidl",
+ ],
+ double_loadable: true,
+ backend: {
+ cpp: {
+ apex_available: [
+ "//apex_available:platform",
+ ],
+ },
+ },
+}
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index 7cdbe5f..f4371fd 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -31,8 +31,9 @@
#include <utils/SortedVector.h>
#include <utils/threads.h>
+#include <android/media/BnMediaMetricsService.h> // for direct Binder access
+#include <android/media/IMediaMetricsService.h>
#include <binder/IServiceManager.h>
-#include <media/IMediaMetricsService.h>
#include <media/MediaMetricsItem.h>
#include <private/android_filesystem_config.h>
@@ -278,17 +279,18 @@
// calls the appropriate daemon
bool mediametrics::Item::selfrecord() {
ALOGD_IF(DEBUG_API, "%s: delivering %s", __func__, this->toString().c_str());
- sp<IMediaMetricsService> svc = getService();
- if (svc != NULL) {
- status_t status = svc->submit(this);
- if (status != NO_ERROR) {
- ALOGW("%s: failed to record: %s", __func__, this->toString().c_str());
- return false;
- }
- return true;
- } else {
+
+ char *str;
+ size_t size;
+ status_t status = writeToByteString(&str, &size);
+ if (status == NO_ERROR) {
+ status = submitBuffer(str, size);
+ }
+ if (status != NO_ERROR) {
+ ALOGW("%s: failed to record: %s", __func__, this->toString().c_str());
return false;
}
+ return true;
}
//static
@@ -327,7 +329,7 @@
static sp<MediaMetricsDeathNotifier> sNotifier;
// static
-sp<IMediaMetricsService> BaseItem::sMediaMetricsService;
+sp<media::IMediaMetricsService> BaseItem::sMediaMetricsService;
static std::mutex sServiceMutex;
static int sRemainingBindAttempts = SVC_TRIES;
@@ -339,29 +341,67 @@
}
// static
-bool BaseItem::submitBuffer(const char *buffer, size_t size) {
-/*
- mediametrics::Item item;
- status_t status = item.readFromByteString(buffer, size);
- ALOGD("%s: status:%d, size:%zu, item:%s", __func__, status, size, item.toString().c_str());
- return item.selfrecord();
- */
-
+status_t BaseItem::submitBuffer(const char *buffer, size_t size) {
ALOGD_IF(DEBUG_API, "%s: delivering %zu bytes", __func__, size);
- sp<IMediaMetricsService> svc = getService();
- if (svc != nullptr) {
- const status_t status = svc->submitBuffer(buffer, size);
- if (status != NO_ERROR) {
- ALOGW("%s: failed(%d) to record: %zu bytes", __func__, status, size);
- return false;
- }
- return true;
+
+ // Validate size
+ if (size > std::numeric_limits<int32_t>::max()) return BAD_VALUE;
+
+ // Do we have the service available?
+ sp<media::IMediaMetricsService> svc = getService();
+ if (svc == nullptr) return NO_INIT;
+
+ ::android::status_t status = NO_ERROR;
+ if constexpr (/* DISABLES CODE */ (false)) {
+ // THIS PATH IS FOR REFERENCE ONLY.
+ // It is compiled so that any changes to IMediaMetricsService::submitBuffer()
+ // will lead here. If this code is changed, the else branch must
+ // be changed as well.
+ //
+ // Use the AIDL calling interface - this is a bit slower as a byte vector must be
+ // constructed. As the call is one-way, the only a transaction error occurs.
+ status = svc->submitBuffer({buffer, buffer + size}).transactionError();
+ } else {
+ // Use the Binder calling interface - this direct implementation avoids
+ // malloc/copy/free for the vector and reduces the overhead for logging.
+ // We based this off of the AIDL generated file:
+ // out/soong/.intermediates/frameworks/av/media/libmediametrics/mediametricsservice-aidl-unstable-cpp-source/gen/android/media/IMediaMetricsService.cpp
+ // TODO: Create an AIDL C++ back end optimized form of vector writing.
+ ::android::Parcel _aidl_data;
+ ::android::Parcel _aidl_reply; // we don't care about this as it is one-way.
+
+ status = _aidl_data.writeInterfaceToken(svc->getInterfaceDescriptor());
+ if (status != ::android::OK) goto _aidl_error;
+
+ status = _aidl_data.writeInt32(static_cast<int32_t>(size));
+ if (status != ::android::OK) goto _aidl_error;
+
+ status = _aidl_data.write(buffer, static_cast<int32_t>(size));
+ if (status != ::android::OK) goto _aidl_error;
+
+ status = ::android::IInterface::asBinder(svc)->transact(
+ ::android::media::BnMediaMetricsService::TRANSACTION_submitBuffer,
+ _aidl_data, &_aidl_reply, ::android::IBinder::FLAG_ONEWAY);
+
+ // AIDL permits setting a default implementation for additional functionality.
+ // See go/aog/713984. This is not used here.
+ // if (status == ::android::UNKNOWN_TRANSACTION
+ // && ::android::media::IMediaMetricsService::getDefaultImpl()) {
+ // status = ::android::media::IMediaMetricsService::getDefaultImpl()
+ // ->submitBuffer(immutableByteVectorFromBuffer(buffer, size))
+ // .transactionError();
+ // }
}
- return false;
+
+ if (status == NO_ERROR) return NO_ERROR;
+
+ _aidl_error:
+ ALOGW("%s: failed(%d) to record: %zu bytes", __func__, status, size);
+ return status;
}
//static
-sp<IMediaMetricsService> BaseItem::getService() {
+sp<media::IMediaMetricsService> BaseItem::getService() {
static const char *servicename = "media.metrics";
static const bool enabled = isEnabled(); // singleton initialized
@@ -379,7 +419,7 @@
if (sm != nullptr) {
sp<IBinder> binder = sm->getService(String16(servicename));
if (binder != nullptr) {
- sMediaMetricsService = interface_cast<IMediaMetricsService>(binder);
+ sMediaMetricsService = interface_cast<media::IMediaMetricsService>(binder);
sNotifier = new MediaMetricsDeathNotifier();
binder->linkToDeath(sNotifier);
} else {
diff --git a/media/libmediametrics/aidl/android/media/IMediaMetricsService.aidl b/media/libmediametrics/aidl/android/media/IMediaMetricsService.aidl
new file mode 100644
index 0000000..b14962d
--- /dev/null
+++ b/media/libmediametrics/aidl/android/media/IMediaMetricsService.aidl
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * MediaMetrics service interface
+ *
+ * {@hide}
+ */
+interface IMediaMetricsService {
+ oneway void submitBuffer(in byte[] buffer);
+}
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index 84388c9..2af7eee 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -170,6 +170,7 @@
#define AMEDIAMETRICS_PROP_EVENT_VALUE_CTOR "ctor"
#define AMEDIAMETRICS_PROP_EVENT_VALUE_DISCONNECT "disconnect"
#define AMEDIAMETRICS_PROP_EVENT_VALUE_DTOR "dtor"
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM "endAAudioStream" // AAudioStream
#define AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAUDIOINTERVALGROUP "endAudioIntervalGroup"
#define AMEDIAMETRICS_PROP_EVENT_VALUE_FLUSH "flush" // AudioTrack
#define AMEDIAMETRICS_PROP_EVENT_VALUE_INVALIDATE "invalidate" // server track, record
diff --git a/media/libmediametrics/include/media/IMediaMetricsService.h b/media/libmediametrics/include/media/IMediaMetricsService.h
deleted file mode 100644
index d6871ec..0000000
--- a/media/libmediametrics/include/media/IMediaMetricsService.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIAANALYTICSSERVICE_H
-#define ANDROID_IMEDIAANALYTICSSERVICE_H
-
-#include <utils/String8.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-
-#include <sys/types.h>
-#include <utils/Errors.h>
-#include <utils/Log.h>
-#include <utils/RefBase.h>
-#include <utils/List.h>
-
-#include <binder/IServiceManager.h>
-
-#include <media/MediaMetricsItem.h>
-
-namespace android {
-
-class IMediaMetricsService: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaMetricsService);
-
- /**
- * Submits the indicated record to the mediaanalytics service, where
- * it will be merged (if appropriate) with incomplete records that
- * share the same key and sessionID.
- *
- * \param item the item to submit.
- * \return status which is negative if an error is detected (some errors
- may be silent and return 0 - success).
- */
- virtual status_t submit(mediametrics::Item *item) = 0;
-
- virtual status_t submitBuffer(const char *buffer, size_t length) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnMediaMetricsService: public BnInterface<IMediaMetricsService>
-{
-public:
- status_t onTransact(uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0) override;
-
-protected:
- // Internal call where release is true if the service is to delete the item.
- virtual status_t submitInternal(
- mediametrics::Item *item, bool release) = 0;
-};
-
-}; // namespace android
-
-#endif // ANDROID_IMEDIASTATISTICSSERVICE_H
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index 303343f..428992c 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -32,7 +32,8 @@
namespace android {
-class IMediaMetricsService;
+namespace media { class IMediaMetricsService; }
+
class Parcel;
/*
@@ -239,7 +240,10 @@
public:
// are we collecting metrics data
static bool isEnabled();
- static sp<IMediaMetricsService> getService();
+ // returns the MediaMetrics service if active.
+ static sp<media::IMediaMetricsService> getService();
+ // submits a raw buffer directly to the MediaMetrics service - this is highly optimized.
+ static status_t submitBuffer(const char *buffer, size_t len);
protected:
static constexpr const char * const EnabledProperty = "media.metrics.enabled";
@@ -247,10 +251,9 @@
static const int EnabledProperty_default = 1;
// let's reuse a binder connection
- static sp<IMediaMetricsService> sMediaMetricsService;
+ static sp<media::IMediaMetricsService> sMediaMetricsService;
static void dropInstance();
- static bool submitBuffer(const char *buffer, size_t len);
template <typename T>
struct is_item_type {
@@ -573,7 +576,7 @@
bool record() {
return updateHeader()
- && BaseItem::submitBuffer(getBuffer(), getLength());
+ && BaseItem::submitBuffer(getBuffer(), getLength()) == OK;
}
bool isValid () const {
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index e3c0b05..994695f 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -238,7 +238,12 @@
int32_t operatingRate = getDefaultOperatingRate(encoderFormat);
if (operatingRate != -1) {
- SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_OPERATING_RATE, encoderFormat, operatingRate);
+ float tmpf;
+ int32_t tmpi;
+ if (!AMediaFormat_getFloat(encoderFormat, AMEDIAFORMAT_KEY_OPERATING_RATE, &tmpf) &&
+ !AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_OPERATING_RATE, &tmpi)) {
+ AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_OPERATING_RATE, operatingRate);
+ }
}
SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_PRIORITY, encoderFormat, kDefaultCodecPriority);
@@ -260,8 +265,8 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
- // TODO: replace __ANDROID_API_FUTURE__with 31 when it's official (b/178144708)
- #define __TRANSCODING_MIN_API__ __ANDROID_API_FUTURE__
+// TODO: replace __ANDROID_API_FUTURE__with 31 when it's official (b/178144708)
+#define __TRANSCODING_MIN_API__ __ANDROID_API_FUTURE__
AMediaCodec* encoder;
if (__builtin_available(android __TRANSCODING_MIN_API__, *)) {
diff --git a/media/libmediatranscoding/transcoder/benchmark/AndroidTestTemplate.xml b/media/libmediatranscoding/transcoder/benchmark/AndroidTestTemplate.xml
index 64085d8..683f07b 100644
--- a/media/libmediatranscoding/transcoder/benchmark/AndroidTestTemplate.xml
+++ b/media/libmediatranscoding/transcoder/benchmark/AndroidTestTemplate.xml
@@ -19,7 +19,7 @@
<option name="cleanup" value="false" />
<option name="push-file" key="{MODULE}" value="/data/local/tmp/{MODULE}" />
<option name="push-file"
- key="https://storage.googleapis.com/android_media/frameworks/av/media/libmediatranscoding/transcoder/benchmark/TranscodingBenchmark-1.1.zip?unzip=true"
+ key="https://storage.googleapis.com/android_media/frameworks/av/media/libmediatranscoding/transcoder/benchmark/TranscodingBenchmark-1.2.zip?unzip=true"
value="/data/local/tmp/TranscodingBenchmark/" />
</target_preparer>
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
index e0b2050..712f8fc 100644
--- a/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
@@ -33,6 +33,7 @@
#include <binder/ProcessState.h>
#include <fcntl.h>
#include <media/MediaTranscoder.h>
+#include <media/NdkCommon.h>
#include <iostream>
@@ -87,6 +88,7 @@
AMediaFormat* videoFormat = AMediaFormat_new();
AMediaFormat_setInt32(videoFormat, AMEDIAFORMAT_KEY_BIT_RATE, kVideoBitRate);
+ AMediaFormat_setString(videoFormat, AMEDIAFORMAT_KEY_MIME, AMEDIA_MIMETYPE_VIDEO_AVC);
return videoFormat;
}
@@ -222,7 +224,7 @@
}
static void SetMaxOperatingRate(AMediaFormat* format) {
- AMediaFormat_setFloat(format, AMEDIAFORMAT_KEY_OPERATING_RATE, INT32_MAX);
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_OPERATING_RATE, INT32_MAX);
AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_PRIORITY, 1);
}
@@ -314,6 +316,174 @@
false /* includeAudio */, false /* transcodeVideo */);
}
+//---------------------------- Codecs, Resolutions, Bitrate ---------------------------------------
+static void SetMimeBitrate(AMediaFormat* format, std::string mime, int32_t bitrate) {
+ AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, mime.c_str());
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_BIT_RATE, bitrate);
+}
+
+static void BM_1920x1080_Avc22Mbps2Avc12Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_h264_22Mbps.mp4",
+ "tx_bm_1920_1080_30fps_h264_22Mbps_transcoded_h264_12Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 12000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1920x1080_Avc15Mbps2Avc8Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_h264_15Mbps.mp4",
+ "tx_bm_1920_1080_30fps_h264_15Mbps_transcoded_h264_8Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 8000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1920x1080_Avc15Mbps2AvcPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_h264_15Mbps.mp4",
+ "tx_bm_1920_1080_30fps_h264_15Mbps_passthrough_V.mp4",
+ false /* includeAudio */, false /* transcodeVideo */);
+}
+
+static void BM_1920x1080_Avc15MbpsAac2Avc8Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_h264_15Mbps_aac.mp4",
+ "tx_bm_1920_1080_30fps_h264_15Mbps_aac_transcoded_h264_8Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 8000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1920x1080_Avc15MbpsAac2Avc8MbpsAac(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_h264_15Mbps_aac.mp4",
+ "tx_bm_1920_1080_30fps_h264_15Mbps_aac_transcoded_h264_8Mbps_aac.mp4",
+ true /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 8000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1920x1080_Avc15MbpsAac2AvcPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_h264_15Mbps_aac.mp4",
+ "tx_bm_1920_1080_30fps_h264_15Mbps_aac_passthrough_V.mp4",
+ false /* includeAudio */, false /* transcodeVideo */);
+}
+
+static void BM_1920x1080_Avc15MbpsAac2AvcAacPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_h264_15Mbps_aac.mp4",
+ "tx_bm_1920_1080_30fps_h264_15Mbps_aac_passthrough_AV.mp4",
+ true /* includeAudio */, false /* transcodeVideo */);
+}
+
+static void BM_1920x1080_Hevc17Mbps2Hevc8Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_hevc_17Mbps.mp4",
+ "tx_bm_1920_1080_30fps_hevc_17Mbps_transcoded_hevc_8Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/hevc", bitrate = 8000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1920x1080_Hevc17Mbps2Avc12Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_30fps_hevc_17Mbps.mp4",
+ "tx_bm_1920_1080_30fps_hevc_17Mbps_transcoded_h264_12Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 12000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1920x1080_60fps_Hevc28Mbps2Avc15Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1920_1080_60fps_hevc_28Mbps.mp4",
+ "tx_bm_1920_1080_60fps_hevc_28Mbps_transcoded_h264_15Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 15000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1280x720_Avc10Mbps2Avc4Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1280_720_30fps_h264_10Mbps.mp4",
+ "tx_bm_1280_720_30fps_h264_10Mbps_transcoded_h264_4Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 4000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1280x720_Avc10Mbps2AvcPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1280_720_30fps_h264_10Mbps.mp4",
+ "tx_bm_1280_720_30fps_h264_10Mbps_passthrough_V.mp4",
+ false /* includeAudio */, false /* transcodeVideo */);
+}
+
+static void BM_1280x720_Avc10MbpsAac2Avc4Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1280_720_30fps_h264_10Mbps_aac.mp4",
+ "tx_bm_1280_720_30fps_h264_10Mbps_aac_transcoded_h264_4Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 4000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1280x720_Avc10MbpsAac2Avc4MbpsAac(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1280_720_30fps_h264_10Mbps_aac.mp4",
+ "tx_bm_1280_720_30fps_h264_10Mbps_aac_transcoded_h264_4Mbps_aac.mp4",
+ true /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 4000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1280x720_Avc10MbpsAac2AvcPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1280_720_30fps_h264_10Mbps_aac.mp4",
+ "tx_bm_1280_720_30fps_h264_10Mbps_aac_passthrough_V.mp4",
+ false /* includeAudio */, false /* transcodeVideo */);
+}
+
+static void BM_1280x720_Avc10MbpsAac2AvcAacPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1280_720_30fps_h264_10Mbps_aac.mp4",
+ "tx_bm_1280_720_30fps_h264_10Mbps_aac_passthrough_AV.mp4",
+ true /* includeAudio */, false /* transcodeVideo */);
+}
+
+static void BM_1280x720_Hevc8Mbps2Avc4Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1280_720_30fps_hevc_8Mbps.mp4",
+ "tx_bm_1280_720_30fps_hevc_8Mbps_transcoded_h264_4Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 4000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_1080x1920_Avc15Mbps2Avc8Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_1080_1920_30fps_h264_15Mbps.mp4",
+ "tx_bm_1080_1920_30fps_h264_15Mbps_transcoded_h264_8Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 8000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_720x1280_Avc10Mbps2Avc4Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_720_1280_30fps_h264_10Mbps.mp4",
+ "tx_bm_720_1280_30fps_h264_10Mbps_transcoded_h264_4Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 4000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
+static void BM_3840x2160_Hevc42Mbps2Avc20Mbps(benchmark::State& state) {
+ TranscodeMediaFile(state, "tx_bm_3840_2160_30fps_hevc_42Mbps.mp4",
+ "tx_bm_3840_2160_30fps_hevc_42Mbps_transcoded_h264_4Mbps.mp4",
+ false /* includeAudio */, true /* transcodeVideo */,
+ [mime = "video/avc", bitrate = 20000000](AMediaFormat* dstFormat) {
+ SetMimeBitrate(dstFormat, mime, bitrate);
+ });
+}
+
//-------------------------------- Benchmark Registration ------------------------------------------
// Benchmark registration wrapper for transcoding.
@@ -337,6 +507,30 @@
TRANSCODER_BENCHMARK(BM_TranscodeAudioVideoPassthrough);
TRANSCODER_BENCHMARK(BM_TranscodeVideoPassthrough);
+TRANSCODER_BENCHMARK(BM_1920x1080_Avc22Mbps2Avc12Mbps);
+TRANSCODER_BENCHMARK(BM_1920x1080_Avc15Mbps2Avc8Mbps);
+TRANSCODER_BENCHMARK(BM_1920x1080_Avc15Mbps2AvcPassthrough);
+TRANSCODER_BENCHMARK(BM_1920x1080_Avc15MbpsAac2Avc8Mbps);
+TRANSCODER_BENCHMARK(BM_1920x1080_Avc15MbpsAac2Avc8MbpsAac);
+TRANSCODER_BENCHMARK(BM_1920x1080_Avc15MbpsAac2AvcPassthrough);
+TRANSCODER_BENCHMARK(BM_1920x1080_Avc15MbpsAac2AvcAacPassthrough);
+TRANSCODER_BENCHMARK(BM_1920x1080_Hevc17Mbps2Hevc8Mbps);
+TRANSCODER_BENCHMARK(BM_1920x1080_Hevc17Mbps2Avc12Mbps);
+TRANSCODER_BENCHMARK(BM_1920x1080_60fps_Hevc28Mbps2Avc15Mbps);
+
+TRANSCODER_BENCHMARK(BM_1280x720_Avc10Mbps2Avc4Mbps);
+TRANSCODER_BENCHMARK(BM_1280x720_Avc10Mbps2AvcPassthrough);
+TRANSCODER_BENCHMARK(BM_1280x720_Avc10MbpsAac2Avc4Mbps);
+TRANSCODER_BENCHMARK(BM_1280x720_Avc10MbpsAac2Avc4MbpsAac);
+TRANSCODER_BENCHMARK(BM_1280x720_Avc10MbpsAac2AvcPassthrough);
+TRANSCODER_BENCHMARK(BM_1280x720_Avc10MbpsAac2AvcAacPassthrough);
+TRANSCODER_BENCHMARK(BM_1280x720_Hevc8Mbps2Avc4Mbps);
+
+TRANSCODER_BENCHMARK(BM_1080x1920_Avc15Mbps2Avc8Mbps);
+TRANSCODER_BENCHMARK(BM_720x1280_Avc10Mbps2Avc4Mbps);
+
+TRANSCODER_BENCHMARK(BM_3840x2160_Hevc42Mbps2Avc20Mbps);
+
class CustomCsvReporter : public benchmark::BenchmarkReporter {
public:
CustomCsvReporter() : mPrintedHeader(false) {}
diff --git a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
index 6d781cd..e40a507 100644
--- a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
+++ b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
@@ -24,6 +24,7 @@
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="{MODULE}" />
+ <option name="native-test-timeout" value="10m" />
</test>
</configuration>
diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp
index c8965d9..c79384c 100644
--- a/media/libstagefright/foundation/ABuffer.cpp
+++ b/media/libstagefright/foundation/ABuffer.cpp
@@ -67,7 +67,7 @@
void ABuffer::setRange(size_t offset, size_t size) {
CHECK_LE(offset, mCapacity);
- CHECK_LE(offset + size, mCapacity);
+ CHECK_LE(size, mCapacity - offset);
mRangeOffset = offset;
mRangeLength = size;
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index cccb63a..72a377d 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -28,6 +28,8 @@
#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/hexdump.h>
+#include <android-base/properties.h>
+
#include <stdint.h>
namespace android {
@@ -513,7 +515,11 @@
void AAVCAssembler::submitAccessUnit() {
CHECK(!mNALUnits.empty());
- ALOGV("Access unit complete (%zu nal units)", mNALUnits.size());
+ if(android::base::GetBoolProperty("debug.stagefright.fps", false)) {
+ ALOGD("Access unit complete (%zu nal units)", mNALUnits.size());
+ } else {
+ ALOGV("Access unit complete (%zu nal units)", mNALUnits.size());
+ }
size_t totalSize = 0;
for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index 5f3f72c..4a505d4 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -9,7 +9,6 @@
"libmedia",
"libstagefright",
"libstagefright_foundation",
- "libstagefright_omx",
"libutils",
"liblog",
],
@@ -17,11 +16,8 @@
include_dirs: [
"frameworks/av/media/libstagefright",
"frameworks/av/media/libstagefright/include",
- "frameworks/native/include/media/openmax",
],
- compile_multilib: "prefer32",
-
cflags: [
"-Werror",
"-Wall",
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index a92848c..becbe6e 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -793,7 +793,7 @@
mtp_file_range mfr;
struct stat sstat;
uint64_t finalsize;
- bool transcode = android::base::GetBoolProperty("sys.fuse.transcode_mtp", true);
+ bool transcode = android::base::GetBoolProperty("sys.fuse.transcode_mtp", false);
ALOGD("Mtp transcode = %d", transcode);
mfr.fd = mDatabase->openFilePath(filePath, transcode);
// Doing this here because we want to update fileLength only for this case and leave the
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 62b8624..e19dd3a 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -559,8 +559,6 @@
int32_t bottom;
} AImageCropRect;
-#if __ANDROID_API__ >= 24
-
/**
* Return the image back the the system and delete the AImage object from memory.
*
@@ -777,10 +775,6 @@
const AImage* image, int planeIdx,
/*out*/uint8_t** data, /*out*/int* dataLength) __INTRODUCED_IN(24);
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 26
-
/**
* Return the image back the the system and delete the AImage object from memory asynchronously.
*
@@ -829,8 +823,6 @@
*/
media_status_t AImage_getHardwareBuffer(const AImage* image, /*out*/AHardwareBuffer** buffer) __INTRODUCED_IN(26);
-#endif /* __ANDROID_API__ >= 26 */
-
__END_DECLS
#endif //_NDK_IMAGE_H
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index 600ffc9..d86f3c7 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -53,8 +53,6 @@
*/
typedef struct AImageReader AImageReader;
-#if __ANDROID_API__ >= 24
-
/**
* Create a new reader for images of the desired size and format.
*
@@ -320,10 +318,6 @@
media_status_t AImageReader_setImageListener(
AImageReader* reader, AImageReader_ImageListener* listener) __INTRODUCED_IN(24);
-#endif /* __ANDROID_API__ >= 24 */
-
-#if __ANDROID_API__ >= 26
-
/**
* AImageReader constructor similar to {@link AImageReader_new} that takes an additional parameter
* for the consumer usage. All other parameters and the return values are identical to those passed
@@ -510,8 +504,6 @@
AImageReader *reader, /* out */native_handle_t **handle);
#endif
-#endif /* __ANDROID_API__ >= 26 */
-
__END_DECLS
#endif //_NDK_IMAGE_READER_H
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index 1f9904a..519148e 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -121,8 +121,6 @@
AMediaCodecOnAsyncError onAsyncError;
} AMediaCodecOnAsyncNotifyCallback;
-#if __ANDROID_API__ >= 21
-
/**
* Create codec by name. Use this if you know the exact codec you want to use.
* When configuring, you will need to specify whether to use the codec as an
@@ -311,8 +309,6 @@
media_status_t AMediaCodec_releaseOutputBufferAtTime(
AMediaCodec *mData, size_t idx, int64_t timestampNs) __INTRODUCED_IN(21);
-#if __ANDROID_API__ >= 26
-
/**
* Creates a Surface that can be used as the input to encoder, in place of input buffers
*
@@ -393,10 +389,6 @@
*/
media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData) __INTRODUCED_IN(26);
-#endif /* __ANDROID_API__ >= 26 */
-
-#if __ANDROID_API__ >= 28
-
/**
* Get format of the buffer. The specified buffer index must have been previously obtained from
* dequeueOutputBuffer.
@@ -482,8 +474,6 @@
*/
bool AMediaCodecActionCode_isTransient(int32_t actionCode) __INTRODUCED_IN(28);
-#endif /* __ANDROID_API__ >= 28 */
-
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
@@ -580,9 +570,6 @@
*/
media_status_t AMediaCodecCryptoInfo_getEncryptedBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
-#endif /* __ANDROID_API__ >= 21 */
-
-#if __ANDROID_API__ >= 31
extern const char* AMEDIACODEC_KEY_HDR10_PLUS_INFO __INTRODUCED_IN(31);
extern const char* AMEDIACODEC_KEY_LOW_LATENCY __INTRODUCED_IN(31);
extern const char* AMEDIACODEC_KEY_OFFSET_TIME __INTRODUCED_IN(31);
@@ -590,7 +577,6 @@
extern const char* AMEDIACODEC_KEY_SUSPEND __INTRODUCED_IN(31);
extern const char* AMEDIACODEC_KEY_SUSPEND_TIME __INTRODUCED_IN(31);
extern const char* AMEDIACODEC_KEY_VIDEO_BITRATE __INTRODUCED_IN(31);
-#endif /* __ANDROID_API__ >= 31 */
__END_DECLS
diff --git a/media/ndk/include/media/NdkMediaCrypto.h b/media/ndk/include/media/NdkMediaCrypto.h
index 3fa07c7..590d51d 100644
--- a/media/ndk/include/media/NdkMediaCrypto.h
+++ b/media/ndk/include/media/NdkMediaCrypto.h
@@ -47,8 +47,6 @@
typedef uint8_t AMediaUUID[16];
-#if __ANDROID_API__ >= 21
-
/**
* Available since API level 21.
*/
@@ -69,8 +67,6 @@
*/
void AMediaCrypto_delete(AMediaCrypto* crypto) __INTRODUCED_IN(21);
-#endif /* __ANDROID_API__ >= 21 */
-
__END_DECLS
#endif // _NDK_MEDIA_CRYPTO_H
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 0577df2..4158a97 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -38,8 +38,6 @@
struct AMediaDataSource;
typedef struct AMediaDataSource AMediaDataSource;
-#if __ANDROID_API__ >= 28
-
/*
* AMediaDataSource's callbacks will be invoked on an implementation-defined thread
* or thread pool. No guarantees are provided about which thread(s) will be used for
@@ -93,8 +91,6 @@
*/
AMediaDataSource* AMediaDataSource_new() __INTRODUCED_IN(28);
-#if __ANDROID_API__ >= 29
-
/**
* Called to get an estimate of the number of bytes that can be read from this data source
* starting at |offset| without blocking for I/O.
@@ -124,8 +120,6 @@
int numheaders,
const char * const *key_values) __INTRODUCED_IN(29);
-#endif /*__ANDROID_API__ >= 29 */
-
/**
* Delete a previously created media data source.
*
@@ -185,10 +179,6 @@
AMediaDataSource*,
AMediaDataSourceClose) __INTRODUCED_IN(28);
-#endif /*__ANDROID_API__ >= 28 */
-
-#if __ANDROID_API__ >= 29
-
/**
* Close the data source, unblock reads, and release associated resources.
*
@@ -213,8 +203,6 @@
AMediaDataSource*,
AMediaDataSourceGetAvailableSize) __INTRODUCED_IN(29);
-#endif /*__ANDROID_API__ >= 29 */
-
__END_DECLS
#endif // _NDK_MEDIA_DATASOURCE_H
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 31f5c7d..849a8f9 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -165,8 +165,6 @@
const AMediaDrmSessionId *sessionId, const AMediaDrmKeyStatus *keyStatus,
size_t numKeys, bool hasNewUsableKey);
-#if __ANDROID_API__ >= 21
-
/**
* Query if the given scheme identified by its UUID is supported on this device, and
* whether the drm plugin is able to handle the media container format specified by mimeType.
@@ -576,8 +574,6 @@
const char *macAlgorithm, uint8_t *keyId, const uint8_t *message, size_t messageSize,
const uint8_t *signature, size_t signatureSize) __INTRODUCED_IN(21);
-#endif /* __ANDROID_API__ >= 21 */
-
__END_DECLS
#endif //_NDK_MEDIA_DRM_H
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index a1cd9e3..e429820 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -50,8 +50,6 @@
struct AMediaExtractor;
typedef struct AMediaExtractor AMediaExtractor;
-#if __ANDROID_API__ >= 21
-
/**
* Create new media extractor.
*
@@ -82,8 +80,6 @@
media_status_t AMediaExtractor_setDataSource(AMediaExtractor*,
const char *location) __INTRODUCED_IN(21);
-#if __ANDROID_API__ >= 28
-
/**
* Set the custom data source implementation from which the extractor will read.
*
@@ -92,8 +88,6 @@
media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*,
AMediaDataSource *src) __INTRODUCED_IN(28);
-#endif /* __ANDROID_API__ >= 28 */
-
/**
* Return the number of tracks in the previously specified media file
*
@@ -211,8 +205,6 @@
AMEDIAEXTRACTOR_SAMPLE_FLAG_ENCRYPTED = 2,
};
-#if __ANDROID_API__ >= 28
-
/**
* Returns the format of the extractor. The caller must free the returned format
* using AMediaFormat_delete(format).
@@ -266,10 +258,6 @@
media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex,
AMediaFormat *fmt) __INTRODUCED_IN(28);
-#endif /* __ANDROID_API__ >= 28 */
-
-#endif /* __ANDROID_API__ >= 21 */
-
__END_DECLS
#endif // _NDK_MEDIA_EXTRACTOR_H
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 18092f0..941e3a1 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -61,8 +61,6 @@
struct AMediaFormat;
typedef struct AMediaFormat AMediaFormat;
-#if __ANDROID_API__ >= 21
-
/**
* Available since API level 21.
*/
@@ -205,9 +203,7 @@
extern const char* AMEDIAFORMAT_KEY_TRACK_ID __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX __INTRODUCED_IN(28);
extern const char* AMEDIAFORMAT_KEY_WIDTH __INTRODUCED_IN(21);
-#endif /* __ANDROID_API__ >= 21 */
-#if __ANDROID_API__ >= 28
/**
* Available since API level 28.
*/
@@ -231,9 +227,7 @@
*/
void AMediaFormat_setRect(AMediaFormat*, const char* name,
int32_t left, int32_t top, int32_t right, int32_t bottom) __INTRODUCED_IN(28);
-#endif /* __ANDROID_API__ >= 28 */
-#if __ANDROID_API__ >= 29
/**
* Remove all key/value pairs from the given AMediaFormat.
*
@@ -307,9 +301,6 @@
extern const char* AMEDIAFORMAT_KEY_VALID_SAMPLES __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_YEAR __INTRODUCED_IN(29);
-#endif /* __ANDROID_API__ >= 29 */
-
-#if __ANDROID_API__ >= 30
/**
* An optional key describing the low latency decoding mode. This is an optional parameter
* that applies only to decoders. If enabled, the decoder doesn't hold input and output
@@ -320,7 +311,6 @@
* Available since API level 30.
*/
extern const char* AMEDIAFORMAT_KEY_LOW_LATENCY __INTRODUCED_IN(30);
-#endif /* __ANDROID_API__ >= 30 */
#if __ANDROID_API__ >= 31
extern const char* AMEDIAFORMAT_KEY_HDR10_PLUS_INFO __INTRODUCED_IN(31);
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 9de3fbf..519e249 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -54,8 +54,6 @@
AMEDIAMUXER_OUTPUT_FORMAT_THREE_GPP = 2,
} OutputFormat;
-#if __ANDROID_API__ >= 21
-
/**
* Create new media muxer.
*
@@ -140,8 +138,6 @@
size_t trackIdx, const uint8_t *data,
const AMediaCodecBufferInfo *info) __INTRODUCED_IN(21);
-#endif /* __ANDROID_API__ >= 21 */
-
__END_DECLS
#endif // _NDK_MEDIA_MUXER_H
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 1f5b8d2..eaf0d10 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2961,7 +2961,10 @@
auto backInserter = std::back_inserter(metadata.tracks);
for (const sp<Track> &track : mActiveTracks) {
// No track is invalid as this is called after prepareTrack_l in the same critical section
- track->copyMetadataTo(backInserter);
+ // Do not forward metadata for PatchTrack with unspecified stream type
+ if (track->streamType() != AUDIO_STREAM_PATCH) {
+ track->copyMetadataTo(backInserter);
+ }
}
sendMetadataToBackend_l(metadata);
}
@@ -8101,6 +8104,10 @@
}
StreamInHalInterface::SinkMetadata metadata;
for (const sp<RecordTrack> &track : mActiveTracks) {
+ // Do not forward PatchRecord metadata to audio HAL
+ if (track->isPatchTrack()) {
+ continue;
+ }
// No track is invalid as this is called after prepareTrack_l in the same critical section
record_track_metadata_v7_t trackMetadata;
trackMetadata.base = {
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 0cc3a68..129f6f6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -39,6 +39,7 @@
// TODO(mnaganov): Consider finding an alternative for using HIDL code.
using hardware::Return;
using hardware::Status;
+using hardware::Void;
using utilities::convertTo;
template<typename E, typename C>
@@ -89,7 +90,6 @@
};
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
// No children
};
@@ -106,8 +106,6 @@
static constexpr const char *format = "format";
static constexpr const char *channelMasks = "channelMasks";
};
-
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
};
struct MixPortTraits : public AndroidCollectionTraits<IOProfile, IOProfileCollection>
@@ -125,7 +123,6 @@
static constexpr const char *maxActiveCount = "maxActiveCount";
};
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
// Children: GainTraits
};
@@ -147,7 +144,6 @@
static constexpr const char *encodedFormats = "encodedFormats";
};
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
// Children: GainTraits (optional)
};
@@ -166,8 +162,6 @@
};
typedef HwModule *PtrSerializingCtx;
-
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
};
struct ModuleTraits : public AndroidCollectionTraits<HwModule, HwModuleCollection>
@@ -187,13 +181,14 @@
typedef AudioPolicyConfig *PtrSerializingCtx;
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
// Children: mixPortTraits, devicePortTraits, and routeTraits
// Need to call deserialize on each child
};
struct GlobalConfigTraits
{
+ typedef void Element;
+
static constexpr const char *tag = "globalConfiguration";
struct Attributes
@@ -203,14 +198,16 @@
static constexpr const char *engineLibrarySuffix = "engine_library";
};
- static status_t deserialize(const xmlNode *root, AudioPolicyConfig *config);
+ typedef AudioPolicyConfig *PtrSerializingCtx;
};
struct SurroundSoundTraits
{
+ typedef void Element;
+
static constexpr const char *tag = "surroundSound";
- static status_t deserialize(const xmlNode *root, AudioPolicyConfig *config);
+ typedef AudioPolicyConfig *PtrSerializingCtx;
// Children: SurroundSoundFormatTraits
};
@@ -224,28 +221,30 @@
static constexpr const char *name = "name";
static constexpr const char *subformats = "subformats";
};
-
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
};
class PolicySerializer
{
public:
- PolicySerializer() : mVersion{std::to_string(gMajor) + "." + std::to_string(gMinor)}
- {
- ALOGV("%s: Version=%s Root=%s", __func__, mVersion.c_str(), rootName);
- }
status_t deserialize(const char *configFile, AudioPolicyConfig *config);
+ template <class Trait>
+ status_t deserializeCollection(const xmlNode *cur,
+ typename Trait::Collection *collection,
+ typename Trait::PtrSerializingCtx serializingContext);
+ template <class Trait>
+ Return<typename Trait::Element> deserialize(const xmlNode *cur,
+ typename Trait::PtrSerializingCtx serializingContext);
+
private:
static constexpr const char *rootName = "audioPolicyConfiguration";
static constexpr const char *versionAttribute = "version";
- static constexpr uint32_t gMajor = 1; /**< the major number of the policy xml format version. */
- static constexpr uint32_t gMinor = 0; /**< the minor number of the policy xml format version. */
typedef AudioPolicyConfig Element;
- const std::string mVersion;
+ std::string mChannelMasksSeparator = ",";
+ std::string mSamplingRatesSeparator = ",";
+ std::string mFlagsSeparator = "|";
// Children: ModulesTraits, VolumeTraits, SurroundSoundTraits (optional)
};
@@ -295,7 +294,7 @@
}
template <class Trait>
-status_t deserializeCollection(const xmlNode *cur,
+status_t PolicySerializer::deserializeCollection(const xmlNode *cur,
typename Trait::Collection *collection,
typename Trait::PtrSerializingCtx serializingContext)
{
@@ -308,7 +307,7 @@
}
for (; child != NULL; child = child->next) {
if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(Trait::tag))) {
- auto element = Trait::deserialize(child, serializingContext);
+ auto element = deserialize<Trait>(child, serializingContext);
if (element.isOk()) {
status_t status = Trait::addElementToCollection(element, collection);
if (status != NO_ERROR) {
@@ -328,11 +327,14 @@
return NO_ERROR;
}
-Return<AudioGainTraits::Element> AudioGainTraits::deserialize(const xmlNode *cur,
- PtrSerializingCtx /*serializingContext*/)
+template<>
+Return<AudioGainTraits::Element> PolicySerializer::deserialize<AudioGainTraits>(const xmlNode *cur,
+ AudioGainTraits::PtrSerializingCtx /*serializingContext*/)
{
+ using Attributes = AudioGainTraits::Attributes;
+
static uint32_t index = 0;
- Element gain = new AudioGain(index++, true);
+ AudioGainTraits::Element gain = new AudioGain(index++, true);
std::string mode = getXmlAttribute(cur, Attributes::mode);
if (!mode.empty()) {
@@ -395,16 +397,19 @@
}
}
-Return<AudioProfileTraits::Element> AudioProfileTraits::deserialize(const xmlNode *cur,
- PtrSerializingCtx /*serializingContext*/)
+template<>
+Return<AudioProfileTraits::Element> PolicySerializer::deserialize<AudioProfileTraits>(
+ const xmlNode *cur, AudioProfileTraits::PtrSerializingCtx /*serializingContext*/)
{
+ using Attributes = AudioProfileTraits::Attributes;
+
std::string samplingRates = getXmlAttribute(cur, Attributes::samplingRates);
std::string format = getXmlAttribute(cur, Attributes::format);
std::string channels = getXmlAttribute(cur, Attributes::channelMasks);
- Element profile = new AudioProfile(formatFromString(format, gDynamicFormat),
- channelMasksFromString(channels, ","),
- samplingRatesFromString(samplingRates, ","));
+ AudioProfileTraits::Element profile = new AudioProfile(formatFromString(format, gDynamicFormat),
+ channelMasksFromString(channels, mChannelMasksSeparator.c_str()),
+ samplingRatesFromString(samplingRates, mSamplingRatesSeparator.c_str()));
profile->setDynamicFormat(profile->getFormat() == gDynamicFormat);
profile->setDynamicChannels(profile->getChannels().empty());
@@ -413,15 +418,18 @@
return profile;
}
-Return<MixPortTraits::Element> MixPortTraits::deserialize(const xmlNode *child,
- PtrSerializingCtx /*serializingContext*/)
+template<>
+Return<MixPortTraits::Element> PolicySerializer::deserialize<MixPortTraits>(const xmlNode *child,
+ MixPortTraits::PtrSerializingCtx /*serializingContext*/)
{
+ using Attributes = MixPortTraits::Attributes;
+
std::string name = getXmlAttribute(child, Attributes::name);
if (name.empty()) {
ALOGE("%s: No %s found", __func__, Attributes::name);
return Status::fromStatusT(BAD_VALUE);
}
- ALOGV("%s: %s %s=%s", __func__, tag, Attributes::name, name.c_str());
+ ALOGV("%s: %s %s=%s", __func__, MixPortTraits::tag, Attributes::name, name.c_str());
std::string role = getXmlAttribute(child, Attributes::role);
if (role.empty()) {
ALOGE("%s: No %s found", __func__, Attributes::role);
@@ -431,7 +439,7 @@
audio_port_role_t portRole = (role == Attributes::roleSource) ?
AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
- Element mixPort = new IOProfile(name, portRole);
+ MixPortTraits::Element mixPort = new IOProfile(name, portRole);
AudioProfileTraits::Collection profiles;
status_t status = deserializeCollection<AudioProfileTraits>(child, &profiles, NULL);
@@ -450,10 +458,10 @@
if (!flags.empty()) {
// Source role
if (portRole == AUDIO_PORT_ROLE_SOURCE) {
- mixPort->setFlags(OutputFlagConverter::maskFromString(flags));
+ mixPort->setFlags(OutputFlagConverter::maskFromString(flags, mFlagsSeparator.c_str()));
} else {
// Sink role
- mixPort->setFlags(InputFlagConverter::maskFromString(flags));
+ mixPort->setFlags(InputFlagConverter::maskFromString(flags, mFlagsSeparator.c_str()));
}
}
std::string maxOpenCount = getXmlAttribute(child, Attributes::maxOpenCount);
@@ -475,9 +483,13 @@
return mixPort;
}
-Return<DevicePortTraits::Element> DevicePortTraits::deserialize(const xmlNode *cur,
- PtrSerializingCtx /*serializingContext*/)
+template<>
+Return<DevicePortTraits::Element> PolicySerializer::deserialize<DevicePortTraits>(
+ const xmlNode *cur, DevicePortTraits::PtrSerializingCtx /*serializingContext*/)
{
+ using Attributes = DevicePortTraits::Attributes;
+ auto& tag = DevicePortTraits::tag;
+
std::string name = getXmlAttribute(cur, Attributes::tagName);
if (name.empty()) {
ALOGE("%s: No %s found", __func__, Attributes::tagName);
@@ -513,7 +525,8 @@
encodedFormats = formatsFromString(encodedFormatsLiteral, " ");
}
std::string address = getXmlAttribute(cur, Attributes::address);
- Element deviceDesc = new DeviceDescriptor(type, name, address, encodedFormats);
+ DevicePortTraits::Element deviceDesc =
+ new DeviceDescriptor(type, name, address, encodedFormats);
AudioProfileTraits::Collection profiles;
status_t status = deserializeCollection<AudioProfileTraits>(cur, &profiles, NULL);
@@ -538,8 +551,12 @@
return deviceDesc;
}
-Return<RouteTraits::Element> RouteTraits::deserialize(const xmlNode *cur, PtrSerializingCtx ctx)
+template<>
+Return<RouteTraits::Element> PolicySerializer::deserialize<RouteTraits>(
+ const xmlNode *cur, RouteTraits::PtrSerializingCtx ctx)
{
+ using Attributes = RouteTraits::Attributes;
+
std::string type = getXmlAttribute(cur, Attributes::type);
if (type.empty()) {
ALOGE("%s: No %s found", __func__, Attributes::type);
@@ -548,8 +565,8 @@
audio_route_type_t routeType = (type == Attributes::typeMix) ?
AUDIO_ROUTE_MIX : AUDIO_ROUTE_MUX;
- ALOGV("%s: %s %s=%s", __func__, tag, Attributes::type, type.c_str());
- Element route = new AudioRoute(routeType);
+ ALOGV("%s: %s %s=%s", __func__, RouteTraits::tag, Attributes::type, type.c_str());
+ RouteTraits::Element route = new AudioRoute(routeType);
std::string sinkAttr = getXmlAttribute(cur, Attributes::sink);
if (sinkAttr.empty()) {
@@ -595,8 +612,16 @@
return route;
}
-Return<ModuleTraits::Element> ModuleTraits::deserialize(const xmlNode *cur, PtrSerializingCtx ctx)
+template<>
+Return<ModuleTraits::Element> PolicySerializer::deserialize<ModuleTraits>(
+ const xmlNode *cur, ModuleTraits::PtrSerializingCtx ctx)
{
+ using Attributes = ModuleTraits::Attributes;
+ auto& tag = ModuleTraits::tag;
+ auto& childAttachedDevicesTag = ModuleTraits::childAttachedDevicesTag;
+ auto& childAttachedDeviceTag = ModuleTraits::childAttachedDeviceTag;
+ auto& childDefaultOutputDeviceTag = ModuleTraits::childDefaultOutputDeviceTag;
+
std::string name = getXmlAttribute(cur, Attributes::name);
if (name.empty()) {
ALOGE("%s: No %s found", __func__, Attributes::name);
@@ -610,11 +635,11 @@
versionMajor, versionMajor);
}
- ALOGV("%s: %s %s=%s", __func__, tag, Attributes::name, name.c_str());
+ ALOGV("%s: %s %s=%s", __func__, ModuleTraits::tag, Attributes::name, name.c_str());
- Element module = new HwModule(name.c_str(), versionMajor, versionMinor);
+ ModuleTraits::Element module = new HwModule(name.c_str(), versionMajor, versionMinor);
- // Deserialize childrens: Audio Mix Port, Audio Device Ports (Source/Sink), Audio Routes
+ // Deserialize children: Audio Mix Port, Audio Device Ports (Source/Sink), Audio Routes
MixPortTraits::Collection mixPorts;
status_t status = deserializeCollection<MixPortTraits>(cur, &mixPorts, NULL);
if (status != NO_ERROR) {
@@ -677,10 +702,14 @@
return module;
}
-status_t GlobalConfigTraits::deserialize(const xmlNode *root, AudioPolicyConfig *config)
+template<>
+Return<GlobalConfigTraits::Element> PolicySerializer::deserialize<GlobalConfigTraits>(
+ const xmlNode *root, GlobalConfigTraits::PtrSerializingCtx config)
{
+ using Attributes = GlobalConfigTraits::Attributes;
+
for (const xmlNode *cur = root->xmlChildrenNode; cur != NULL; cur = cur->next) {
- if (!xmlStrcmp(cur->name, reinterpret_cast<const xmlChar*>(tag))) {
+ if (!xmlStrcmp(cur->name, reinterpret_cast<const xmlChar*>(GlobalConfigTraits::tag))) {
bool value;
std::string attr = getXmlAttribute(cur, Attributes::speakerDrcEnabled);
if (!attr.empty() &&
@@ -696,33 +725,38 @@
if (!engineLibrarySuffix.empty()) {
config->setEngineLibraryNameSuffix(engineLibrarySuffix);
}
- return NO_ERROR;
+ return Void();
}
}
- return NO_ERROR;
+ return Void();
}
-status_t SurroundSoundTraits::deserialize(const xmlNode *root, AudioPolicyConfig *config)
+template<>
+Return<SurroundSoundTraits::Element> PolicySerializer::deserialize<SurroundSoundTraits>(
+ const xmlNode *root, SurroundSoundTraits::PtrSerializingCtx config)
{
config->setDefaultSurroundFormats();
for (const xmlNode *cur = root->xmlChildrenNode; cur != NULL; cur = cur->next) {
- if (!xmlStrcmp(cur->name, reinterpret_cast<const xmlChar*>(tag))) {
+ if (!xmlStrcmp(cur->name, reinterpret_cast<const xmlChar*>(SurroundSoundTraits::tag))) {
AudioPolicyConfig::SurroundFormats formats;
status_t status = deserializeCollection<SurroundSoundFormatTraits>(
cur, &formats, nullptr);
if (status == NO_ERROR) {
config->setSurroundFormats(formats);
}
- return NO_ERROR;
+ return Void();
}
}
- return NO_ERROR;
+ return Void();
}
-Return<SurroundSoundFormatTraits::Element> SurroundSoundFormatTraits::deserialize(
- const xmlNode *cur, PtrSerializingCtx /*serializingContext*/)
+template<>
+Return<SurroundSoundFormatTraits::Element> PolicySerializer::deserialize<SurroundSoundFormatTraits>(
+ const xmlNode *cur, SurroundSoundFormatTraits::PtrSerializingCtx /*serializingContext*/)
{
+ using Attributes = SurroundSoundFormatTraits::Attributes;
+
std::string formatLiteral = getXmlAttribute(cur, Attributes::name);
if (formatLiteral.empty()) {
ALOGE("%s: No %s found for a surround format", __func__, Attributes::name);
@@ -733,7 +767,8 @@
ALOGE("%s: Unrecognized format %s", __func__, formatLiteral.c_str());
return Status::fromStatusT(BAD_VALUE);
}
- Element pair = std::make_pair(format, Collection::mapped_type{});
+ SurroundSoundFormatTraits::Element pair = std::make_pair(
+ format, SurroundSoundFormatTraits::Collection::mapped_type{});
std::string subformatsLiteral = getXmlAttribute(cur, Attributes::subformats);
if (subformatsLiteral.empty()) return pair;
@@ -775,12 +810,14 @@
ALOGE("%s: No version found in root node %s", __func__, rootName);
return BAD_VALUE;
}
- if (version != mVersion) {
- ALOGE("%s: Version does not match; expect %s got %s", __func__, mVersion.c_str(),
- version.c_str());
+ if (version == "7.0") {
+ mChannelMasksSeparator = mSamplingRatesSeparator = mFlagsSeparator = " ";
+ } else if (version != "1.0") {
+ ALOGE("%s: Version does not match; expected \"1.0\" or \"7.0\" got \"%s\"",
+ __func__, version.c_str());
return BAD_VALUE;
}
- // Lets deserialize children
+ // Let's deserialize children
// Modules
ModuleTraits::Collection modules;
status_t status = deserializeCollection<ModuleTraits>(root, &modules, config);
@@ -790,10 +827,10 @@
config->setHwModules(modules);
// Global Configuration
- GlobalConfigTraits::deserialize(root, config);
+ deserialize<GlobalConfigTraits>(root, config);
// Surround configuration
- SurroundSoundTraits::deserialize(root, config);
+ deserialize<SurroundSoundTraits>(root, config);
return android::OK;
}
diff --git a/services/audiopolicy/config/audio_policy_configuration_7_0.xml b/services/audiopolicy/config/audio_policy_configuration_7_0.xml
index 31c8954..9961a00 100644
--- a/services/audiopolicy/config/audio_policy_configuration_7_0.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_7_0.xml
@@ -14,7 +14,7 @@
limitations under the License.
-->
-<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+<audioPolicyConfiguration version="7.0" xmlns:xi="http://www.w3.org/2001/XInclude">
<!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
<!-- Global configuration Decalaration -->
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
index 7238317..98415b7 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -12,7 +12,13 @@
</mixPort>
<!-- Le Audio Audio Ports -->
<mixPort name="le audio output" role="source">
- <profile name="" format="AUDIO_FORMAT_PCM_16_BIT,AUDIO_FORMAT_PCM_24_BIT,AUDIO_FORMAT_PCM_32_BIT"
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+ samplingRates="8000,16000,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
samplingRates="8000,16000,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_OUT_MONO,AUDIO_CHANNEL_OUT_STEREO"/>
</mixPort>
@@ -37,6 +43,10 @@
<!-- Hearing AIDs Audio Ports -->
<devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
<!-- BLE Audio Ports -->
+ <!-- Note that these device types are not valid in HAL versions < 7. Any device
+ running pre-V7 HAL and using this file will not pass VTS. Need to use
+ bluetooth_audio_policy_configuration_7_0.xml instead.
+ -->
<devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
<devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
</devicePorts>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
index 2dffe02..fbe7571 100644
--- a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
@@ -10,6 +10,18 @@
samplingRates="24000 16000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
</mixPort>
+ <!-- Le Audio Audio Ports -->
+ <mixPort name="le audio output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_24_BIT_PACKED"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_PCM_32_BIT"
+ samplingRates="8000 16000 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
</mixPorts>
<devicePorts>
<!-- A2DP Audio Ports -->
@@ -30,6 +42,9 @@
</devicePort>
<!-- Hearing AIDs Audio Ports -->
<devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ <!-- BLE Audio Ports -->
+ <devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
+ <devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
</devicePorts>
<routes>
<route type="mix" sink="BT A2DP Out"
@@ -40,5 +55,9 @@
sources="a2dp output"/>
<route type="mix" sink="BT Hearing Aid Out"
sources="hearing aid output"/>
+ <route type="mix" sink="BLE Headset Out"
+ sources="le audio output"/>
+ <route type="mix" sink="BLE Speaker Out"
+ sources="le audio output"/>
</routes>
</module>
diff --git a/services/audiopolicy/fuzzer/Android.bp b/services/audiopolicy/fuzzer/Android.bp
new file mode 100644
index 0000000..21f6515
--- /dev/null
+++ b/services/audiopolicy/fuzzer/Android.bp
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ ******************************************************************************/
+
+cc_fuzz {
+ name: "audiopolicy_fuzzer",
+ srcs: [
+ "audiopolicy_fuzzer.cpp",
+ ],
+ include_dirs: [
+ "frameworks/av/services/audiopolicy",
+ ],
+ shared_libs: [
+ "android.hardware.audio.common-util",
+ "capture_state_listener-aidl-cpp",
+ "libaudioclient",
+ "libaudiofoundation",
+ "libbase",
+ "libcutils",
+ "libhidlbase",
+ "libdl",
+ "liblog",
+ "libmedia_helper",
+ "libmediametrics",
+ "libutils",
+ "libxml2",
+ "libbinder",
+ "libaudiopolicy",
+ "libaudiopolicymanagerdefault",
+ ],
+ static_libs: [
+ "android.hardware.audio.common@7.0-enums",
+ "libaudiopolicycomponents",
+ ],
+ header_libs: [
+ "libaudiopolicycommon",
+ "libaudiopolicyengine_interface_headers",
+ "libaudiopolicymanager_interface_headers",
+ ],
+ data: [":audiopolicyfuzzer_configuration_files"],
+}
diff --git a/services/audiopolicy/fuzzer/README.md b/services/audiopolicy/fuzzer/README.md
new file mode 100644
index 0000000..08d7213
--- /dev/null
+++ b/services/audiopolicy/fuzzer/README.md
@@ -0,0 +1,63 @@
+# Fuzzer for libaudiopolicy
+
+## Plugin Design Considerations
+The fuzzer plugin for libaudiopolicy is designed based on the
+understanding of the service and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+AudioPolicy APIs contain the following parameters:
+1. AudioFormats
+2. AudioChannelMasks
+3. AudioOutputFlags
+4. AudioDevices
+5. MixTypes
+6. MixRouteFlags
+7. SampleRates
+8. AudioUsages
+9. AudioContentTypes
+10. AudioSources
+11. AudioFlagMasks
+12. AudioPolicyDeviceStates
+
+| Parameter| Valid Input Values| Configured Value|
+|------------- |-------------| ----- |
+| `AudioFormat` | 77 values of type `audio_format_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `AudioChannelMask` | 83 values of type `audio_channel_mask_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `AudioOutputFlag` | 16 values of type `audio_output_flags_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `AudioDevice` | `AUDIO_DEVICE_OUT_AUX_DIGITAL`, `AUDIO_DEVICE_OUT_STUB`, `AUDIO_DEVICE_IN_VOICE_CALL`, `AUDIO_DEVICE_IN_AUX_DIGITAL`, `AUDIO_DEVICE_IN_STUB` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `MixType` | `MIX_TYPE_PLAYERS`, `MIX_TYPE_RECORDERS` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `MixRouteFlag` | `MIX_ROUTE_FLAG_RENDER`, `MIX_ROUTE_FLAG_LOOP_BACK`, `MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER`, `MIX_ROUTE_FLAG_ALL` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `SampleRate` | `0` to `UINT32_MAX` | Value obtained from FuzzedDataProvider |
+| `AudioUsage` | `AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST`, `AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT`, `AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED`, `AUDIO_USAGE_NOTIFICATION_EVENT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `AudioContentType` | `AUDIO_CONTENT_TYPE_UNKNOWN`, `AUDIO_CONTENT_TYPE_SPEECH`, `AUDIO_CONTENT_TYPE_MUSIC`, `AUDIO_CONTENT_TYPE_MOVIE`, `AUDIO_CONTENT_TYPE_SONIFICATION` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `AudioSource` | 14 values of type `audio_source_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `AudioFlagMask` | 15 values of type `audio_flags_mask_t` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+| `AudioPolicyDeviceStates` | `AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE`, `AUDIO_POLICY_DEVICE_STATE_AVAILABLE`, `AUDIO_POLICY_DEVICE_STATE_CNT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+## Build
+
+This describes steps to build audiopolicy_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) audiopolicy_fuzzer
+```
+
+#### Steps to run
+To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/audiopolicy_fuzzer/audiopolicy_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
new file mode 100644
index 0000000..c1f2aa8
--- /dev/null
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -0,0 +1,972 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ ******************************************************************************/
+#include <stdint.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <Serializer.h>
+#include <android-base/file.h>
+#include <libxml/parser.h>
+#include <libxml/xinclude.h>
+#include <media/AudioPolicy.h>
+#include <media/PatchBuilder.h>
+#include <media/RecordingActivityTracker.h>
+
+#include <AudioPolicyInterface.h>
+#include <android_audio_policy_configuration_V7_0-enums.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <tests/AudioPolicyManagerTestClient.h>
+#include <tests/AudioPolicyTestClient.h>
+#include <tests/AudioPolicyTestManager.h>
+#include <xsdc/XsdcSupport.h>
+
+using namespace android;
+
+namespace xsd {
+using namespace ::android::audio::policy::configuration::V7_0;
+}
+
+static const std::vector<audio_format_t> kAudioFormats = [] {
+ std::vector<audio_format_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioFormat>{}) {
+ audio_format_t audioFormatHal;
+ std::string audioFormat = toString(enumVal);
+ if (audio_format_from_string(audioFormat.c_str(), &audioFormatHal)) {
+ result.push_back(audioFormatHal);
+ }
+ }
+ return result;
+}();
+
+static const std::vector<audio_channel_mask_t> kAudioChannelOutMasks = [] {
+ std::vector<audio_channel_mask_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioChannelMask>{}) {
+ audio_channel_mask_t audioChannelMaskHal;
+ std::string audioChannelMask = toString(enumVal);
+ if (enumVal != xsd::AudioChannelMask::AUDIO_CHANNEL_NONE &&
+ audioChannelMask.find("_IN_") == std::string::npos &&
+ audio_channel_mask_from_string(audioChannelMask.c_str(), &audioChannelMaskHal)) {
+ result.push_back(audioChannelMaskHal);
+ }
+ }
+ return result;
+}();
+
+static const std::vector<audio_channel_mask_t> kAudioChannelInMasks = [] {
+ std::vector<audio_channel_mask_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioChannelMask>{}) {
+ audio_channel_mask_t audioChannelMaskHal;
+ std::string audioChannelMask = toString(enumVal);
+ if (enumVal != xsd::AudioChannelMask::AUDIO_CHANNEL_NONE &&
+ audioChannelMask.find("_OUT_") == std::string::npos &&
+ audio_channel_mask_from_string(audioChannelMask.c_str(), &audioChannelMaskHal)) {
+ result.push_back(audioChannelMaskHal);
+ }
+ }
+ return result;
+}();
+
+static const std::vector<audio_output_flags_t> kAudioOutputFlags = [] {
+ std::vector<audio_output_flags_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioInOutFlag>{}) {
+ audio_output_flags_t audioOutputFlagHal;
+ std::string audioOutputFlag = toString(enumVal);
+ if (audioOutputFlag.find("_OUTPUT_") != std::string::npos &&
+ audio_output_flag_from_string(audioOutputFlag.c_str(), &audioOutputFlagHal)) {
+ result.push_back(audioOutputFlagHal);
+ }
+ }
+ return result;
+}();
+
+static const std::vector<audio_devices_t> kAudioDevices = [] {
+ std::vector<audio_devices_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioDevice>{}) {
+ audio_devices_t audioDeviceHal;
+ std::string audioDevice = toString(enumVal);
+ if (audio_device_from_string(audioDevice.c_str(), &audioDeviceHal)) {
+ result.push_back(audioDeviceHal);
+ }
+ }
+ return result;
+}();
+
+static const std::vector<audio_usage_t> kAudioUsages = [] {
+ std::vector<audio_usage_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioUsage>{}) {
+ audio_usage_t audioUsageHal;
+ std::string audioUsage = toString(enumVal);
+ if (audio_usage_from_string(audioUsage.c_str(), &audioUsageHal)) {
+ result.push_back(audioUsageHal);
+ }
+ }
+ return result;
+}();
+
+static const std::vector<audio_source_t> kAudioSources = [] {
+ std::vector<audio_source_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioSource>{}) {
+ audio_source_t audioSourceHal;
+ std::string audioSource = toString(enumVal);
+ if (audio_source_from_string(audioSource.c_str(), &audioSourceHal)) {
+ result.push_back(audioSourceHal);
+ }
+ }
+ return result;
+}();
+
+static const std::vector<audio_content_type_t> kAudioContentTypes = [] {
+ std::vector<audio_content_type_t> result;
+ for (const auto enumVal : xsdc_enum_range<xsd::AudioContentType>{}) {
+ audio_content_type_t audioContentTypeHal;
+ std::string audioContentType = toString(enumVal);
+ if (audio_content_type_from_string(audioContentType.c_str(), &audioContentTypeHal)) {
+ result.push_back(audioContentTypeHal);
+ }
+ }
+ return result;
+}();
+
+std::vector<int> kMixTypes = {MIX_TYPE_PLAYERS, MIX_TYPE_RECORDERS};
+
+std::vector<int> kMixRouteFlags = {MIX_ROUTE_FLAG_RENDER, MIX_ROUTE_FLAG_LOOP_BACK,
+ MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER, MIX_ROUTE_FLAG_ALL};
+
+std::vector<audio_flags_mask_t> kAudioFlagMasks = {
+ AUDIO_FLAG_NONE, AUDIO_FLAG_AUDIBILITY_ENFORCED,
+ AUDIO_FLAG_SECURE, AUDIO_FLAG_SCO,
+ AUDIO_FLAG_BEACON, AUDIO_FLAG_HW_AV_SYNC,
+ AUDIO_FLAG_HW_HOTWORD, AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY,
+ AUDIO_FLAG_BYPASS_MUTE, AUDIO_FLAG_LOW_LATENCY,
+ AUDIO_FLAG_DEEP_BUFFER, AUDIO_FLAG_NO_MEDIA_PROJECTION,
+ AUDIO_FLAG_MUTE_HAPTIC, AUDIO_FLAG_NO_SYSTEM_CAPTURE,
+ AUDIO_FLAG_CAPTURE_PRIVATE};
+
+std::vector<audio_policy_dev_state_t> kAudioPolicyDeviceStates = {
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ AUDIO_POLICY_DEVICE_STATE_CNT,
+};
+
+std::vector<uint32_t> kSamplingRates = {8000, 16000, 44100, 48000, 88200, 96000};
+
+template <typename T>
+T getValueFromVector(FuzzedDataProvider *fdp, std::vector<T> arr) {
+ if (fdp->ConsumeBool()) {
+ return arr[fdp->ConsumeIntegralInRange<int32_t>(0, arr.size() - 1)];
+ } else {
+ return (T)fdp->ConsumeIntegral<uint32_t>();
+ }
+}
+
+class AudioPolicyManagerFuzzer {
+ public:
+ explicit AudioPolicyManagerFuzzer(FuzzedDataProvider *fdp);
+ virtual ~AudioPolicyManagerFuzzer() = default;
+ virtual bool initialize();
+ virtual void SetUpManagerConfig();
+ bool getOutputForAttr(audio_port_handle_t *selectedDeviceId, audio_format_t format,
+ audio_channel_mask_t channelMask, int sampleRate,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ audio_io_handle_t *output = nullptr,
+ audio_port_handle_t *portId = nullptr, audio_attributes_t attr = {});
+ bool getInputForAttr(const audio_attributes_t &attr, audio_unique_id_t riid,
+ audio_port_handle_t *selectedDeviceId, audio_format_t format,
+ audio_channel_mask_t channelMask, int sampleRate,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+ audio_port_handle_t *portId = nullptr);
+ bool findDevicePort(audio_port_role_t role, audio_devices_t deviceType,
+ const std::string &address, audio_port_v7 *foundPort);
+ static audio_port_handle_t getDeviceIdFromPatch(const struct audio_patch *patch);
+ audio_patch createFuzzedPatch();
+ void fuzzPatchCreation();
+ virtual void process();
+
+ protected:
+ std::unique_ptr<AudioPolicyManagerTestClient> mClient{new AudioPolicyManagerTestClient};
+ std::unique_ptr<AudioPolicyTestManager> mManager{new AudioPolicyTestManager(mClient.get())};
+ FuzzedDataProvider *mFdp;
+};
+
+AudioPolicyManagerFuzzer::AudioPolicyManagerFuzzer(FuzzedDataProvider *fdp)
+ : mFdp(fdp) {}
+
+bool AudioPolicyManagerFuzzer::initialize() {
+ if (mFdp->remaining_bytes() < 1) {
+ return false;
+ }
+ // init code
+ SetUpManagerConfig();
+
+ if (mManager->initialize() != NO_ERROR) {
+ return false;
+ }
+ if (mManager->initCheck() != NO_ERROR) {
+ return false;
+ }
+ return true;
+}
+
+void AudioPolicyManagerFuzzer::SetUpManagerConfig() { mManager->getConfig().setDefault(); }
+
+bool AudioPolicyManagerFuzzer::getOutputForAttr(
+ audio_port_handle_t *selectedDeviceId, audio_format_t format, audio_channel_mask_t channelMask,
+ int sampleRate, audio_output_flags_t flags, audio_io_handle_t *output,
+ audio_port_handle_t *portId, audio_attributes_t attr) {
+ audio_io_handle_t localOutput;
+ if (!output) output = &localOutput;
+ *output = AUDIO_IO_HANDLE_NONE;
+ audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ config.sample_rate = sampleRate;
+ config.channel_mask = channelMask;
+ config.format = format;
+ audio_port_handle_t localPortId;
+ if (!portId) portId = &localPortId;
+ *portId = AUDIO_PORT_HANDLE_NONE;
+ AudioPolicyInterface::output_type_t outputType;
+
+ if (mManager->getOutputForAttr(&attr, output, AUDIO_SESSION_NONE, &stream, 0 /*uid*/, &config,
+ &flags, selectedDeviceId, portId, {}, &outputType) != OK) {
+ return false;
+ }
+ if (*output == AUDIO_IO_HANDLE_NONE || *portId == AUDIO_PORT_HANDLE_NONE) {
+ return false;
+ }
+ return true;
+}
+
+bool AudioPolicyManagerFuzzer::getInputForAttr(
+ const audio_attributes_t &attr, audio_unique_id_t riid, audio_port_handle_t *selectedDeviceId,
+ audio_format_t format, audio_channel_mask_t channelMask, int sampleRate,
+ audio_input_flags_t flags, audio_port_handle_t *portId) {
+ audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ config.sample_rate = sampleRate;
+ config.channel_mask = channelMask;
+ config.format = format;
+ audio_port_handle_t localPortId;
+ if (!portId) portId = &localPortId;
+ *portId = AUDIO_PORT_HANDLE_NONE;
+ AudioPolicyInterface::input_type_t inputType;
+
+ if (mManager->getInputForAttr(&attr, &input, riid, AUDIO_SESSION_NONE, 0 /*uid*/, &config,
+ flags, selectedDeviceId, &inputType, portId) != OK) {
+ return false;
+ }
+ if (*portId == AUDIO_PORT_HANDLE_NONE || input == AUDIO_IO_HANDLE_NONE) {
+ return false;
+ }
+ return true;
+}
+
+bool AudioPolicyManagerFuzzer::findDevicePort(audio_port_role_t role, audio_devices_t deviceType,
+ const std::string &address,
+ audio_port_v7 *foundPort) {
+ uint32_t numPorts = 0;
+ uint32_t generation1;
+ status_t ret;
+
+ ret = mManager->listAudioPorts(role, AUDIO_PORT_TYPE_DEVICE, &numPorts, nullptr, &generation1);
+ if (ret != NO_ERROR) {
+ return false;
+ }
+
+ uint32_t generation2;
+ struct audio_port_v7 ports[numPorts];
+ ret = mManager->listAudioPorts(role, AUDIO_PORT_TYPE_DEVICE, &numPorts, ports, &generation2);
+ if (ret != NO_ERROR) {
+ return false;
+ }
+
+ for (const auto &port : ports) {
+ if (port.role == role && port.ext.device.type == deviceType &&
+ (strncmp(port.ext.device.address, address.c_str(), AUDIO_DEVICE_MAX_ADDRESS_LEN) ==
+ 0)) {
+ if (foundPort) *foundPort = port;
+ return true;
+ }
+ }
+ return false;
+}
+
+audio_port_handle_t AudioPolicyManagerFuzzer::getDeviceIdFromPatch(
+ const struct audio_patch *patch) {
+ if (patch->num_sources != 0 && patch->num_sinks != 0) {
+ if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
+ return patch->sinks[0].id;
+ } else {
+ return patch->sources[0].id;
+ }
+ }
+ return AUDIO_PORT_HANDLE_NONE;
+}
+
+audio_patch AudioPolicyManagerFuzzer::createFuzzedPatch() {
+ audio_patch patch{};
+ patch.id = mFdp->ConsumeIntegral<uint32_t>();
+ patch.num_sources = mFdp->ConsumeIntegralInRange(0, AUDIO_PATCH_PORTS_MAX);
+ for (int i = 0; i < patch.num_sources; ++i) {
+ audio_port_config config{};
+ std::vector<uint8_t> bytes = mFdp->ConsumeBytes<uint8_t>(sizeof(config));
+ memcpy(reinterpret_cast<uint8_t *>(&config), &bytes[0], bytes.size());
+ patch.sources[i] = config;
+ }
+ patch.num_sinks = mFdp->ConsumeIntegralInRange(0, AUDIO_PATCH_PORTS_MAX);
+ for (int i = 0; i < patch.num_sinks; ++i) {
+ audio_port_config config{};
+ std::vector<uint8_t> bytes = mFdp->ConsumeBytes<uint8_t>(sizeof(config));
+ memcpy(reinterpret_cast<uint8_t *>(&config), &bytes[0], bytes.size());
+ patch.sinks[i] = config;
+ }
+ return patch;
+}
+
+void AudioPolicyManagerFuzzer::fuzzPatchCreation() {
+ if (mFdp->remaining_bytes()) {
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
+ uid_t uid = mFdp->ConsumeIntegral<uint32_t>();
+
+ // create a fuzzed patch
+ handle = AUDIO_PATCH_HANDLE_NONE;
+ audio_patch patch = createFuzzedPatch();
+ uid = mFdp->ConsumeIntegral<uint32_t>();
+ if (mManager->createAudioPatch(&patch, &handle, uid) == NO_ERROR) {
+ mManager->releaseAudioPatch(handle, uid);
+ }
+ }
+}
+
+void AudioPolicyManagerFuzzer::process() {
+ if (initialize()) {
+ fuzzPatchCreation();
+ }
+}
+
+class AudioPolicyManagerFuzzerWithConfigurationFile : public AudioPolicyManagerFuzzer {
+ public:
+ explicit AudioPolicyManagerFuzzerWithConfigurationFile(FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzer(fdp){};
+
+ protected:
+ void SetUpManagerConfig() override;
+ virtual std::string getConfigFile();
+ void traverseAndFuzzXML(xmlDocPtr pDoc, xmlNodePtr curr);
+ std::string fuzzXML(std::string xmlPath);
+
+ static inline const std::string sExecutableDir = base::GetExecutableDirectory() + "/";
+ static inline const std::string sDefaultConfig =
+ sExecutableDir + "data/test_audio_policy_configuration.xml";
+ static inline const std::string sFuzzedConfig = sExecutableDir + "fuzzed.xml";;
+};
+
+std::string AudioPolicyManagerFuzzerWithConfigurationFile::getConfigFile() {
+ return fuzzXML(sDefaultConfig);
+}
+
+void AudioPolicyManagerFuzzerWithConfigurationFile::SetUpManagerConfig() {
+ deserializeAudioPolicyFile(getConfigFile().c_str(), &mManager->getConfig());
+}
+
+void AudioPolicyManagerFuzzerWithConfigurationFile::traverseAndFuzzXML(xmlDocPtr pDoc,
+ xmlNodePtr curr) {
+ if (curr == nullptr) {
+ return;
+ }
+
+ xmlAttr *attribute = curr->properties;
+ while (attribute) {
+ if (!xmlStrcmp(attribute->name, reinterpret_cast<const xmlChar *>("format"))) {
+ const char *newFormat =
+ audio_format_to_string(getValueFromVector<audio_format_t>(mFdp, kAudioFormats));
+ xmlSetProp(curr, attribute->name, reinterpret_cast<const xmlChar *>(newFormat));
+ }
+ if (!xmlStrcmp(attribute->name, reinterpret_cast<const xmlChar *>("flags"))) {
+ std::string newFlag = "";
+ uint16_t numFlags = std::max((uint16_t)1, mFdp->ConsumeIntegral<uint16_t>());
+ for (uint16_t i = 0; i < numFlags; ++i) {
+ newFlag += std::string(audio_output_flag_to_string(
+ getValueFromVector<audio_output_flags_t>(mFdp, kAudioOutputFlags)));
+ if (i != (numFlags - 1)) {
+ newFlag += std::string("|");
+ }
+ }
+ xmlSetProp(curr, attribute->name, reinterpret_cast<const xmlChar *>(newFlag.c_str()));
+ }
+ if (!xmlStrcmp(attribute->name, reinterpret_cast<const xmlChar *>("samplingRates"))) {
+ std::string newRate = "";
+ uint16_t numRates = std::max((uint16_t)1, mFdp->ConsumeIntegral<uint16_t>());
+ for (uint16_t i = 0; i < numRates; ++i) {
+ newRate += std::to_string(getValueFromVector<uint32_t>(mFdp, kSamplingRates));
+ if (i != (numRates - 1)) {
+ newRate += std::string(",");
+ }
+ }
+ xmlSetProp(curr, attribute->name, reinterpret_cast<const xmlChar *>(newRate.c_str()));
+ }
+ if (!xmlStrcmp(attribute->name, reinterpret_cast<const xmlChar *>("channelMasks"))) {
+ int isOutMask = -1;
+ char *value =
+ reinterpret_cast<char *>(xmlNodeListGetString(pDoc, attribute->children, 1));
+ if (std::string(value).find(std::string("_OUT_")) != std::string::npos) {
+ // OUT mask
+ isOutMask = 1;
+ } else if (std::string(value).find(std::string("_IN_")) != std::string::npos) {
+ // IN mask
+ isOutMask = 0;
+ }
+ if (isOutMask != -1) {
+ std::string newMask = "";
+ uint16_t numMasks = std::max((uint16_t)1, mFdp->ConsumeIntegral<uint16_t>());
+ for (uint16_t i = 0; i < numMasks; ++i) {
+ if (isOutMask) {
+ newMask += std::string(audio_channel_out_mask_to_string(
+ getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelOutMasks)));
+ } else {
+ newMask += std::string(audio_channel_in_mask_to_string(
+ getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelInMasks)));
+ }
+ if (i != (numMasks - 1)) {
+ newMask += std::string(",");
+ }
+ }
+ xmlSetProp(curr, attribute->name,
+ reinterpret_cast<const xmlChar *>(newMask.c_str()));
+ }
+ xmlFree(value);
+ }
+ attribute = attribute->next;
+ }
+
+ curr = curr->xmlChildrenNode;
+ while (curr != nullptr) {
+ traverseAndFuzzXML(pDoc, curr);
+ curr = curr->next;
+ }
+}
+
+std::string AudioPolicyManagerFuzzerWithConfigurationFile::fuzzXML(std::string xmlPath) {
+ std::string outPath = sFuzzedConfig;
+
+ // Load in the xml file from disk
+ xmlDocPtr pDoc = xmlParseFile(xmlPath.c_str());
+ xmlNodePtr root = xmlDocGetRootElement(pDoc);
+
+ traverseAndFuzzXML(pDoc, root);
+
+ // Save the document back out to disk.
+ xmlSaveFileEnc(outPath.c_str(), pDoc, "UTF-8");
+ xmlFreeDoc(pDoc);
+
+ return outPath;
+}
+
+class AudioPolicyManagerFuzzerMsd : public AudioPolicyManagerFuzzerWithConfigurationFile {
+ public:
+ explicit AudioPolicyManagerFuzzerMsd(FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzerWithConfigurationFile(fdp) {}
+
+ protected:
+ std::string getConfigFile() override;
+
+ static inline const std::string sMsdConfig =
+ sExecutableDir + "data/test_audio_policy_msd_configuration.xml";
+};
+
+std::string AudioPolicyManagerFuzzerMsd::getConfigFile() { return fuzzXML(sMsdConfig); }
+
+using PolicyMixTuple = std::tuple<audio_usage_t, audio_source_t, uint32_t>;
+
+class AudioPolicyManagerFuzzerDynamicPolicy : public AudioPolicyManagerFuzzerWithConfigurationFile {
+ public:
+ explicit AudioPolicyManagerFuzzerDynamicPolicy(FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzerWithConfigurationFile(fdp){};
+ ~AudioPolicyManagerFuzzerDynamicPolicy() override;
+ void process() override;
+
+ protected:
+ status_t addPolicyMix(int mixType, int mixFlag, audio_devices_t deviceType,
+ std::string mixAddress, const audio_config_t &audioConfig,
+ const std::vector<PolicyMixTuple> &rules);
+ void clearPolicyMix();
+ void registerPolicyMixes();
+ void unregisterPolicyMixes();
+
+ Vector<AudioMix> mAudioMixes;
+ const std::string mMixAddress = "remote_submix_media";
+};
+
+AudioPolicyManagerFuzzerDynamicPolicy::~AudioPolicyManagerFuzzerDynamicPolicy() {
+ clearPolicyMix();
+}
+
+status_t AudioPolicyManagerFuzzerDynamicPolicy::addPolicyMix(
+ int mixType, int mixFlag, audio_devices_t deviceType, std::string mixAddress,
+ const audio_config_t &audioConfig, const std::vector<PolicyMixTuple> &rules) {
+ Vector<AudioMixMatchCriterion> myMixMatchCriteria;
+
+ for (const auto &rule : rules) {
+ myMixMatchCriteria.add(
+ AudioMixMatchCriterion(std::get<0>(rule), std::get<1>(rule), std::get<2>(rule)));
+ }
+
+ AudioMix myAudioMix(myMixMatchCriteria, mixType, audioConfig, mixFlag,
+ String8(mixAddress.c_str()), 0);
+ myAudioMix.mDeviceType = deviceType;
+ // Clear mAudioMix before add new one to make sure we don't add already existing mixes.
+ mAudioMixes.clear();
+ mAudioMixes.add(myAudioMix);
+
+ // As the policy mixes registration may fail at some case,
+ // caller need to check the returned status.
+ status_t ret = mManager->registerPolicyMixes(mAudioMixes);
+ return ret;
+}
+
+void AudioPolicyManagerFuzzerDynamicPolicy::clearPolicyMix() {
+ if (mManager != nullptr) {
+ mManager->unregisterPolicyMixes(mAudioMixes);
+ }
+ mAudioMixes.clear();
+}
+
+void AudioPolicyManagerFuzzerDynamicPolicy::registerPolicyMixes() {
+ const uint32_t numPolicies = mFdp->ConsumeIntegralInRange<uint32_t>(1, MAX_MIXES_PER_POLICY);
+
+ for (int i = 0; i < numPolicies; ++i) {
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = getValueFromVector<audio_channel_mask_t>(
+ mFdp, mFdp->ConsumeBool() ? kAudioChannelInMasks : kAudioChannelOutMasks);
+ audioConfig.format = getValueFromVector<audio_format_t>(mFdp, kAudioFormats);
+ audioConfig.sample_rate = getValueFromVector<uint32_t>(mFdp, kSamplingRates);
+ addPolicyMix(getValueFromVector<int>(mFdp, kMixTypes),
+ getValueFromVector<int>(mFdp, kMixRouteFlags),
+ getValueFromVector<audio_devices_t>(mFdp, kAudioDevices), "", audioConfig,
+ std::vector<PolicyMixTuple>());
+ }
+}
+
+void AudioPolicyManagerFuzzerDynamicPolicy::unregisterPolicyMixes() {
+ mManager->unregisterPolicyMixes(mAudioMixes);
+}
+
+void AudioPolicyManagerFuzzerDynamicPolicy::process() {
+ if (initialize()) {
+ registerPolicyMixes();
+ fuzzPatchCreation();
+ unregisterPolicyMixes();
+ }
+}
+
+class AudioPolicyManagerFuzzerDPNoRemoteSubmixModule
+ : public AudioPolicyManagerFuzzerDynamicPolicy {
+ public:
+ explicit AudioPolicyManagerFuzzerDPNoRemoteSubmixModule(FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzerDynamicPolicy(fdp){};
+
+ protected:
+ std::string getConfigFile() override;
+
+ static inline const std::string sPrimaryOnlyConfig =
+ sExecutableDir + "data/test_audio_policy_primary_only_configuration.xml";
+};
+
+std::string AudioPolicyManagerFuzzerDPNoRemoteSubmixModule::getConfigFile() {
+ return fuzzXML(sPrimaryOnlyConfig);
+}
+
+class AudioPolicyManagerFuzzerDPPlaybackReRouting : public AudioPolicyManagerFuzzerDynamicPolicy {
+ public:
+ explicit AudioPolicyManagerFuzzerDPPlaybackReRouting(FuzzedDataProvider *fdp);
+ ~AudioPolicyManagerFuzzerDPPlaybackReRouting() override;
+ void process() override;
+
+ protected:
+ bool initialize() override;
+ void playBackReRouting();
+
+ std::unique_ptr<RecordingActivityTracker> mTracker;
+
+ std::vector<PolicyMixTuple> mUsageRules = {
+ {AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, RULE_MATCH_ATTRIBUTE_USAGE},
+ {AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, RULE_MATCH_ATTRIBUTE_USAGE}};
+
+ struct audio_port_v7 mInjectionPort;
+ audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
+ audio_config_t mAudioConfig;
+};
+
+AudioPolicyManagerFuzzerDPPlaybackReRouting::AudioPolicyManagerFuzzerDPPlaybackReRouting(
+ FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzerDynamicPolicy(fdp) {
+ const uint32_t numRules = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+ for (int i = 0; i < numRules; ++i) {
+ PolicyMixTuple rule = {getValueFromVector<audio_usage_t>(mFdp, kAudioUsages),
+ getValueFromVector<audio_source_t>(mFdp, kAudioSources),
+ RULE_MATCH_ATTRIBUTE_USAGE};
+ mUsageRules.push_back(rule);
+ }
+}
+
+AudioPolicyManagerFuzzerDPPlaybackReRouting::~AudioPolicyManagerFuzzerDPPlaybackReRouting() {
+ mManager->stopInput(mPortId);
+}
+
+bool AudioPolicyManagerFuzzerDPPlaybackReRouting::initialize() {
+ AudioPolicyManagerFuzzerDynamicPolicy::initialize();
+ mTracker.reset(new RecordingActivityTracker());
+
+ mAudioConfig = AUDIO_CONFIG_INITIALIZER;
+ mAudioConfig.channel_mask =
+ getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelOutMasks);
+ mAudioConfig.format = getValueFromVector<audio_format_t>(mFdp, kAudioFormats);
+ mAudioConfig.sample_rate = getValueFromVector<uint32_t>(mFdp, kSamplingRates);
+ status_t ret = addPolicyMix(getValueFromVector<int>(mFdp, kMixTypes),
+ getValueFromVector<int>(mFdp, kMixRouteFlags),
+ getValueFromVector<audio_devices_t>(mFdp, kAudioDevices),
+ mMixAddress, mAudioConfig, mUsageRules);
+ if (ret != NO_ERROR) {
+ return false;
+ }
+
+ struct audio_port_v7 extractionPort;
+ findDevicePort(AUDIO_PORT_ROLE_SOURCE, getValueFromVector<audio_devices_t>(mFdp, kAudioDevices),
+ mMixAddress, &extractionPort);
+
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_source_t source = getValueFromVector<audio_source_t>(mFdp, kAudioSources);
+ audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, source,
+ AUDIO_FLAG_NONE, ""};
+ std::string tags = "addr=" + mMixAddress;
+ strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ getInputForAttr(attr, mTracker->getRiid(), &selectedDeviceId, mAudioConfig.format,
+ mAudioConfig.channel_mask, mAudioConfig.sample_rate, AUDIO_INPUT_FLAG_NONE,
+ &mPortId);
+
+ ret = mManager->startInput(mPortId);
+ if (ret != NO_ERROR) {
+ return false;
+ }
+ if (!findDevicePort(AUDIO_PORT_ROLE_SINK,
+ getValueFromVector<audio_devices_t>(mFdp, kAudioDevices), mMixAddress,
+ &mInjectionPort)) {
+ return false;
+ }
+
+ return true;
+}
+
+void AudioPolicyManagerFuzzerDPPlaybackReRouting::playBackReRouting() {
+ const uint32_t numTestCases = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+ for (int i = 0; i < numTestCases; ++i) {
+ audio_attributes_t attr;
+ attr.content_type = getValueFromVector<audio_content_type_t>(mFdp, kAudioContentTypes);
+ attr.usage = getValueFromVector<audio_usage_t>(mFdp, kAudioUsages);
+ attr.source = getValueFromVector<audio_source_t>(mFdp, kAudioSources);
+ attr.flags = getValueFromVector<audio_flags_mask_t>(mFdp, kAudioFlagMasks);
+ std::string tags(mFdp->ConsumeBool() ? "" : "addr=remote_submix_media");
+ strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+
+ audio_port_handle_t playbackRoutedPortId = AUDIO_PORT_HANDLE_NONE;
+ getOutputForAttr(&playbackRoutedPortId, mAudioConfig.format, mAudioConfig.channel_mask,
+ mAudioConfig.sample_rate, AUDIO_OUTPUT_FLAG_NONE, nullptr /*output*/,
+ nullptr /*portId*/, attr);
+ }
+}
+
+void AudioPolicyManagerFuzzerDPPlaybackReRouting::process() {
+ if (initialize()) {
+ playBackReRouting();
+ registerPolicyMixes();
+ fuzzPatchCreation();
+ unregisterPolicyMixes();
+ }
+}
+
+class AudioPolicyManagerFuzzerDPMixRecordInjection : public AudioPolicyManagerFuzzerDynamicPolicy {
+ public:
+ explicit AudioPolicyManagerFuzzerDPMixRecordInjection(FuzzedDataProvider *fdp);
+ ~AudioPolicyManagerFuzzerDPMixRecordInjection() override;
+ void process() override;
+
+ protected:
+ bool initialize() override;
+ void recordingInjection();
+
+ std::unique_ptr<RecordingActivityTracker> mTracker;
+
+ std::vector<PolicyMixTuple> mSourceRules = {
+ {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_CAMCORDER, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET},
+ {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_MIC, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET},
+ {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_VOICE_COMMUNICATION,
+ RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET}};
+
+ struct audio_port_v7 mExtractionPort;
+ audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
+ audio_config_t mAudioConfig;
+};
+
+AudioPolicyManagerFuzzerDPMixRecordInjection::AudioPolicyManagerFuzzerDPMixRecordInjection(
+ FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzerDynamicPolicy(fdp) {
+ const uint32_t numRules = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+ for (int i = 0; i < numRules; ++i) {
+ PolicyMixTuple rule = {getValueFromVector<audio_usage_t>(mFdp, kAudioUsages),
+ getValueFromVector<audio_source_t>(mFdp, kAudioSources),
+ RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET};
+ mSourceRules.push_back(rule);
+ }
+}
+
+AudioPolicyManagerFuzzerDPMixRecordInjection::~AudioPolicyManagerFuzzerDPMixRecordInjection() {
+ mManager->stopOutput(mPortId);
+}
+
+bool AudioPolicyManagerFuzzerDPMixRecordInjection::initialize() {
+ AudioPolicyManagerFuzzerDynamicPolicy::initialize();
+
+ mTracker.reset(new RecordingActivityTracker());
+
+ mAudioConfig = AUDIO_CONFIG_INITIALIZER;
+ mAudioConfig.channel_mask =
+ getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelInMasks);
+ mAudioConfig.format = getValueFromVector<audio_format_t>(mFdp, kAudioFormats);
+ mAudioConfig.sample_rate = getValueFromVector<uint32_t>(mFdp, kSamplingRates);
+ status_t ret = addPolicyMix(getValueFromVector<int>(mFdp, kMixTypes),
+ getValueFromVector<int>(mFdp, kMixRouteFlags),
+ getValueFromVector<audio_devices_t>(mFdp, kAudioDevices),
+ mMixAddress, mAudioConfig, mSourceRules);
+ if (ret != NO_ERROR) {
+ return false;
+ }
+
+ struct audio_port_v7 injectionPort;
+ findDevicePort(AUDIO_PORT_ROLE_SINK, getValueFromVector<audio_devices_t>(mFdp, kAudioDevices),
+ mMixAddress, &injectionPort);
+
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_usage_t usage = getValueFromVector<audio_usage_t>(mFdp, kAudioUsages);
+ audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, usage, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""};
+ std::string tags = std::string("addr=") + mMixAddress;
+ strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ getOutputForAttr(&selectedDeviceId, mAudioConfig.format, mAudioConfig.channel_mask,
+ mAudioConfig.sample_rate /*sampleRate*/, AUDIO_OUTPUT_FLAG_NONE,
+ nullptr /*output*/, &mPortId, attr);
+ ret = mManager->startOutput(mPortId);
+ if (ret != NO_ERROR) {
+ return false;
+ }
+ getDeviceIdFromPatch(mClient->getLastAddedPatch());
+ if (!findDevicePort(AUDIO_PORT_ROLE_SOURCE,
+ getValueFromVector<audio_devices_t>(mFdp, kAudioDevices), mMixAddress,
+ &mExtractionPort)) {
+ return false;
+ }
+
+ return true;
+}
+
+void AudioPolicyManagerFuzzerDPMixRecordInjection::recordingInjection() {
+ const uint32_t numTestCases = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+ for (int i = 0; i < numTestCases; ++i) {
+ audio_attributes_t attr;
+ attr.content_type = getValueFromVector<audio_content_type_t>(mFdp, kAudioContentTypes);
+ attr.usage = getValueFromVector<audio_usage_t>(mFdp, kAudioUsages);
+ attr.source = getValueFromVector<audio_source_t>(mFdp, kAudioSources);
+ attr.flags = getValueFromVector<audio_flags_mask_t>(mFdp, kAudioFlagMasks);
+ std::string tags(mFdp->ConsumeBool() ? "" : "addr=remote_submix_media");
+ strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+
+ audio_port_handle_t captureRoutedPortId = AUDIO_PORT_HANDLE_NONE;
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ getInputForAttr(attr, mTracker->getRiid(), &captureRoutedPortId, mAudioConfig.format,
+ mAudioConfig.channel_mask, mAudioConfig.sample_rate, AUDIO_INPUT_FLAG_NONE,
+ &portId);
+ }
+}
+
+void AudioPolicyManagerFuzzerDPMixRecordInjection::process() {
+ if (initialize()) {
+ recordingInjection();
+ registerPolicyMixes();
+ fuzzPatchCreation();
+ unregisterPolicyMixes();
+ }
+}
+
+using DeviceConnectionTestParams =
+ std::tuple<audio_devices_t /*type*/, std::string /*name*/, std::string /*address*/>;
+
+class AudioPolicyManagerFuzzerDeviceConnection
+ : public AudioPolicyManagerFuzzerWithConfigurationFile {
+ public:
+ explicit AudioPolicyManagerFuzzerDeviceConnection(FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzerWithConfigurationFile(fdp){};
+ void process() override;
+
+ protected:
+ void setDeviceConnectionState();
+ void explicitlyRoutingAfterConnection();
+};
+
+void AudioPolicyManagerFuzzerDeviceConnection::setDeviceConnectionState() {
+ const uint32_t numTestCases = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+ for (int i = 0; i < numTestCases; ++i) {
+ const audio_devices_t type = getValueFromVector<audio_devices_t>(mFdp, kAudioDevices);
+ const std::string name = mFdp->ConsumeRandomLengthString();
+ const std::string address = mFdp->ConsumeRandomLengthString();
+ mManager->setDeviceConnectionState(
+ type, getValueFromVector<audio_policy_dev_state_t>(mFdp, kAudioPolicyDeviceStates),
+ address.c_str(), name.c_str(), getValueFromVector<audio_format_t>(mFdp, kAudioFormats));
+ }
+}
+
+void AudioPolicyManagerFuzzerDeviceConnection::explicitlyRoutingAfterConnection() {
+ const uint32_t numTestCases = mFdp->ConsumeIntegralInRange<uint32_t>(1, 10);
+ for (int i = 0; i < numTestCases; ++i) {
+ const audio_devices_t type = getValueFromVector<audio_devices_t>(mFdp, kAudioDevices);
+ const std::string name = mFdp->ConsumeRandomLengthString();
+ const std::string address = mFdp->ConsumeRandomLengthString();
+ mManager->setDeviceConnectionState(
+ type, getValueFromVector<audio_policy_dev_state_t>(mFdp, kAudioPolicyDeviceStates),
+ address.c_str(), name.c_str(), getValueFromVector<audio_format_t>(mFdp, kAudioFormats));
+
+ audio_port_v7 devicePort;
+ const audio_port_role_t role =
+ audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
+ findDevicePort(role, type, address, &devicePort);
+
+ audio_port_handle_t routedPortId = devicePort.id;
+ // Try start input or output according to the device type
+ if (audio_is_output_devices(type)) {
+ getOutputForAttr(&routedPortId, getValueFromVector<audio_format_t>(mFdp, kAudioFormats),
+ getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelOutMasks),
+ getValueFromVector<uint32_t>(mFdp, kSamplingRates),
+ AUDIO_OUTPUT_FLAG_NONE);
+ } else if (audio_is_input_device(type)) {
+ RecordingActivityTracker tracker;
+ getInputForAttr({}, tracker.getRiid(), &routedPortId,
+ getValueFromVector<audio_format_t>(mFdp, kAudioFormats),
+ getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelInMasks),
+ getValueFromVector<uint32_t>(mFdp, kSamplingRates),
+ AUDIO_INPUT_FLAG_NONE);
+ }
+ }
+}
+
+void AudioPolicyManagerFuzzerDeviceConnection::process() {
+ if (initialize()) {
+ setDeviceConnectionState();
+ explicitlyRoutingAfterConnection();
+ fuzzPatchCreation();
+ }
+}
+
+class AudioPolicyManagerTVFuzzer : public AudioPolicyManagerFuzzerWithConfigurationFile {
+ public:
+ explicit AudioPolicyManagerTVFuzzer(FuzzedDataProvider *fdp)
+ : AudioPolicyManagerFuzzerWithConfigurationFile(fdp){};
+ void process() override;
+
+ protected:
+ std::string getConfigFile();
+ void testHDMIPortSelection(audio_output_flags_t flags);
+
+ static inline const std::string sTvConfig =
+ AudioPolicyManagerTVFuzzer::sExecutableDir + "data/test_tv_apm_configuration.xml";
+};
+
+std::string AudioPolicyManagerTVFuzzer::getConfigFile() { return fuzzXML(sTvConfig); }
+
+void AudioPolicyManagerTVFuzzer::testHDMIPortSelection(audio_output_flags_t flags) {
+ audio_devices_t audioDevice = getValueFromVector<audio_devices_t>(mFdp, kAudioDevices);
+ audio_format_t audioFormat = getValueFromVector<audio_format_t>(mFdp, kAudioFormats);
+ status_t ret = mManager->setDeviceConnectionState(
+ audioDevice, AUDIO_POLICY_DEVICE_STATE_AVAILABLE, "" /*address*/, "" /*name*/, audioFormat);
+ if (ret != NO_ERROR) {
+ return;
+ }
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ getOutputForAttr(&selectedDeviceId, getValueFromVector<audio_format_t>(mFdp, kAudioFormats),
+ getValueFromVector<audio_channel_mask_t>(mFdp, kAudioChannelOutMasks),
+ getValueFromVector<uint32_t>(mFdp, kSamplingRates), flags, &output, &portId);
+ sp<SwAudioOutputDescriptor> outDesc = mManager->getOutputs().valueFor(output);
+ if (outDesc.get() == nullptr) {
+ return;
+ }
+ audio_port_v7 port = {};
+ outDesc->toAudioPort(&port);
+ mManager->releaseOutput(portId);
+ mManager->setDeviceConnectionState(audioDevice, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ "" /*address*/, "" /*name*/, audioFormat);
+}
+
+void AudioPolicyManagerTVFuzzer::process() {
+ if (initialize()) {
+ testHDMIPortSelection(getValueFromVector<audio_output_flags_t>(mFdp, kAudioOutputFlags));
+ fuzzPatchCreation();
+ }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+ if (size < 1) {
+ return 0;
+ }
+ FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+ while (fdp.remaining_bytes() > 0) {
+ AudioPolicyManagerFuzzer audioPolicyManagerFuzzer(&fdp);
+ audioPolicyManagerFuzzer.process();
+
+ AudioPolicyManagerFuzzerMsd audioPolicyManagerFuzzerMsd(&fdp);
+ audioPolicyManagerFuzzerMsd.process();
+
+ AudioPolicyManagerFuzzerWithConfigurationFile audioPolicyManagerFuzzerWithConfigurationFile(
+ &fdp);
+ audioPolicyManagerFuzzerWithConfigurationFile.process();
+
+ AudioPolicyManagerFuzzerDynamicPolicy audioPolicyManagerFuzzerDynamicPolicy(&fdp);
+ audioPolicyManagerFuzzerDynamicPolicy.process();
+
+ AudioPolicyManagerFuzzerDPNoRemoteSubmixModule
+ audioPolicyManagerFuzzerDPNoRemoteSubmixModule(&fdp);
+ audioPolicyManagerFuzzerDPNoRemoteSubmixModule.process();
+
+ AudioPolicyManagerFuzzerDPPlaybackReRouting audioPolicyManagerFuzzerDPPlaybackReRouting(
+ &fdp);
+ audioPolicyManagerFuzzerDPPlaybackReRouting.process();
+
+ AudioPolicyManagerFuzzerDPMixRecordInjection audioPolicyManagerFuzzerDPMixRecordInjection(
+ &fdp);
+ audioPolicyManagerFuzzerDPMixRecordInjection.process();
+
+ AudioPolicyManagerFuzzerDeviceConnection audioPolicyManagerFuzzerDeviceConnection(&fdp);
+ audioPolicyManagerFuzzerDeviceConnection.process();
+
+ AudioPolicyManagerTVFuzzer audioPolicyManagerTVFuzzer(&fdp);
+ audioPolicyManagerTVFuzzer.process();
+ }
+ return 0;
+}
diff --git a/services/audiopolicy/fuzzer/resources/Android.bp b/services/audiopolicy/fuzzer/resources/Android.bp
new file mode 100644
index 0000000..f1e3a51
--- /dev/null
+++ b/services/audiopolicy/fuzzer/resources/Android.bp
@@ -0,0 +1,27 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ ******************************************************************************/
+
+filegroup {
+ name: "audiopolicyfuzzer_configuration_files",
+ srcs: [
+ "test_audio_policy_configuration.xml",
+ "test_audio_policy_msd_configuration.xml",
+ "test_audio_policy_primary_only_configuration.xml",
+ "test_tv_apm_configuration.xml",
+ ],
+}
diff --git a/services/audiopolicy/fuzzer/resources/test_audio_policy_configuration.xml b/services/audiopolicy/fuzzer/resources/test_audio_policy_configuration.xml
new file mode 100644
index 0000000..7e26c33
--- /dev/null
+++ b/services/audiopolicy/fuzzer/resources/test_audio_policy_configuration.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+ <modules>
+ <!-- Primary module -->
+ <module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000"
+ channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ <mixPort name="mixport_bt_hfp_output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="mixport_bt_hfp_input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,16000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+ </devicePort>
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ </devicePort>
+ <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink">
+ </devicePort>
+ <devicePort tagName="Hdmi-In Mic" type="AUDIO_DEVICE_IN_HDMI" role="source">
+ </devicePort>
+ <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO"
+ role="sink" address="hfp_client_out">
+ </devicePort>
+ <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET"
+ role="source" address="hfp_client_in">
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker"
+ sources="primary output"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic,Hdmi-In Mic"/>
+ <route type="mix" sink="Hdmi"
+ sources="primary output"/>
+ <route type="mix" sink="BT SCO"
+ sources="mixport_bt_hfp_output"/>
+ <route type="mix" sink="mixport_bt_hfp_input"
+ sources="BT SCO Headset Mic"/>
+ </routes>
+ </module>
+
+ <!-- Remote Submix module -->
+ <module name="r_submix" halVersion="2.0">
+ <attachedDevices>
+ <item>Remote Submix In</item>
+ </attachedDevices>
+ <mixPorts>
+ <mixPort name="r_submix output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="r_submix input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Remote Submix Out" type="AUDIO_DEVICE_OUT_REMOTE_SUBMIX" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="Remote Submix In" type="AUDIO_DEVICE_IN_REMOTE_SUBMIX" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Remote Submix Out"
+ sources="r_submix output"/>
+ <route type="mix" sink="r_submix input"
+ sources="Remote Submix In"/>
+ </routes>
+ </module>
+ </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/fuzzer/resources/test_audio_policy_msd_configuration.xml b/services/audiopolicy/fuzzer/resources/test_audio_policy_msd_configuration.xml
new file mode 100644
index 0000000..5248d79
--- /dev/null
+++ b/services/audiopolicy/fuzzer/resources/test_audio_policy_msd_configuration.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+ <modules>
+ <module name="msd" halVersion="2.0">
+ <attachedDevices>
+ <item>MS12 Input</item>
+ <item>MS12 Output</item>
+ </attachedDevices>
+ <mixPorts>
+ <mixPort name="ms12 input" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="ms12 compressed input" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1|AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3_JOC"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1|AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_AC4"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1|AUDIO_CHANNEL_OUT_7POINT1"/>
+ </mixPort>
+ <!-- The HW AV Sync flag is not required, but is recommended -->
+ <mixPort name="ms12 output" role="sink" flags="AUDIO_INPUT_FLAG_HW_AV_SYNC|AUDIO_INPUT_FLAG_DIRECT">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="MS12 Input" type="AUDIO_DEVICE_OUT_BUS" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1|AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3_JOC"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1|AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_AC4"
+ samplingRates="32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO|AUDIO_CHANNEL_OUT_5POINT1|AUDIO_CHANNEL_OUT_7POINT1"/>
+ </devicePort>
+ <devicePort tagName="MS12 Output" type="AUDIO_DEVICE_IN_BUS" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="MS12 Input" sources="ms12 input,ms12 compressed input"/>
+ <route type="mix" sink="ms12 output" sources="MS12 Output"/>
+ </routes>
+ </module>
+ </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/fuzzer/resources/test_audio_policy_primary_only_configuration.xml b/services/audiopolicy/fuzzer/resources/test_audio_policy_primary_only_configuration.xml
new file mode 100644
index 0000000..15e3773
--- /dev/null
+++ b/services/audiopolicy/fuzzer/resources/test_audio_policy_primary_only_configuration.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+ <modules>
+ <!-- Primary module -->
+ <module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000"
+ channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+ </devicePort>
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker"
+ sources="primary output"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic"/>
+ </routes>
+ </module>
+ </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/fuzzer/resources/test_tv_apm_configuration.xml b/services/audiopolicy/fuzzer/resources/test_tv_apm_configuration.xml
new file mode 100644
index 0000000..658d3ce
--- /dev/null
+++ b/services/audiopolicy/fuzzer/resources/test_tv_apm_configuration.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <globalConfiguration speaker_drc_enabled="false"/>
+ <modules>
+ <module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <!-- Profiles on the HDMI port are explicit for simplicity. In reality they are dynamic -->
+ <!-- Note: ports are intentionally arranged from more specific to less
+ specific in order to test b/140447125 for HW AV Sync, and similar "explicit matches" -->
+ <mixPort name="tunnel" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="low latency" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_MMAP_NOIRQ">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="direct" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink" />
+ <devicePort tagName="Out Aux Digital" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink" />
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker" sources="primary output"/>
+ <route type="mix" sink="Out Aux Digital" sources="primary output,tunnel,direct,low latency"/>
+ </routes>
+ </module>
+ </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index c0a8f9d..e721a78 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1064,8 +1064,7 @@
*output = AUDIO_IO_HANDLE_NONE;
if (!msdDevices.isEmpty()) {
*output = getOutputForDevices(msdDevices, session, *stream, config, flags);
- sp<DeviceDescriptor> device = outputDevices.isEmpty() ? nullptr : outputDevices.itemAt(0);
- if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
+ if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatches(&outputDevices) == NO_ERROR) {
ALOGV("%s() Using MSD devices %s instead of devices %s",
__func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
} else {
@@ -1081,6 +1080,12 @@
}
*selectedDeviceId = getFirstDeviceId(outputDevices);
+ for (auto &outputDevice : outputDevices) {
+ if (outputDevice->getId() == getConfig().getDefaultOutputDevice()->getId()) {
+ *selectedDeviceId = outputDevice->getId();
+ break;
+ }
+ }
if (outputDevices.onlyContainsDevicesWithType(AUDIO_DEVICE_OUT_TELEPHONY_TX)) {
*outputType = API_OUTPUT_TELEPHONY_TX;
@@ -1223,24 +1228,9 @@
sp<SwAudioOutputDescriptor> outputDesc =
new SwAudioOutputDescriptor(profile, mpClientInterface);
- String8 address = getFirstDeviceAddress(devices);
-
- // MSD patch may be using the only output stream that can service this request. Release
- // MSD patch to prioritize this request over any active output on MSD.
- AudioPatchCollection msdPatches = getMsdPatches();
- for (size_t i = 0; i < msdPatches.size(); i++) {
- const auto& patch = msdPatches[i];
- for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) {
- const struct audio_port_config *sink = &patch->mPatch.sinks[j];
- if (sink->type == AUDIO_PORT_TYPE_DEVICE &&
- devices.containsDeviceWithType(sink->ext.device.type) &&
- (address.isEmpty() || strncmp(sink->ext.device.address, address.string(),
- AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
- releaseAudioPatch(patch->getHandle(), mUidCached);
- break;
- }
- }
- }
+ // An MSD patch may be using the only output stream that can service this request. Release
+ // all MSD patches to prioritize this request over any active output on MSD.
+ releaseMsdPatches(devices);
status_t status = outputDesc->open(config, devices, stream, flags, output);
@@ -1414,7 +1404,8 @@
}
AudioProfileVector deviceProfiles;
for (const auto &outProfile : outputProfiles) {
- if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) {
+ if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) &&
+ outProfile->supportsDevice(outputDevice)) {
appendAudioProfiles(deviceProfiles, outProfile->getAudioProfiles());
}
}
@@ -1482,40 +1473,85 @@
return patchBuilder;
}
-status_t AudioPolicyManager::setMsdPatch(const sp<DeviceDescriptor> &outputDevice) {
- sp<DeviceDescriptor> device = outputDevice;
- if (device == nullptr) {
+status_t AudioPolicyManager::setMsdPatches(const DeviceVector *outputDevices) {
+ DeviceVector devices;
+ if (outputDevices != nullptr && outputDevices->size() > 0) {
+ devices.add(*outputDevices);
+ } else {
// Use media strategy for unspecified output device. This should only
// occur on checkForDeviceAndOutputChanges(). Device connection events may
// therefore invalidate explicit routing requests.
- DeviceVector devices = mEngine->getOutputDevicesForAttributes(
+ devices = mEngine->getOutputDevicesForAttributes(
attributes_initializer(AUDIO_USAGE_MEDIA), nullptr, false /*fromCache*/);
- LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no outpudevice to set Msd Patch");
- device = devices.itemAt(0);
+ LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no output device to set MSD patch");
}
- ALOGV("%s() for device %s", __func__, device->toString().c_str());
- PatchBuilder patchBuilder = buildMsdPatch(device);
- const struct audio_patch* patch = patchBuilder.patch();
- const AudioPatchCollection msdPatches = getMsdPatches();
- if (!msdPatches.isEmpty()) {
- LOG_ALWAYS_FATAL_IF(msdPatches.size() > 1,
- "The current MSD prototype only supports one output patch");
- sp<AudioPatch> currentPatch = msdPatches.valueAt(0);
- if (audio_patches_are_equal(¤tPatch->mPatch, patch)) {
- return NO_ERROR;
+ std::vector<PatchBuilder> patchesToCreate;
+ for (auto i = 0u; i < devices.size(); ++i) {
+ ALOGV("%s() for device %s", __func__, devices[i]->toString().c_str());
+ patchesToCreate.push_back(buildMsdPatch(devices[i]));
+ }
+ // Retain only the MSD patches associated with outputDevices request.
+ // Tear down the others, and create new ones as needed.
+ AudioPatchCollection patchesToRemove = getMsdPatches();
+ for (auto it = patchesToCreate.begin(); it != patchesToCreate.end(); ) {
+ auto retainedPatch = false;
+ for (auto i = 0u; i < patchesToRemove.size(); ++i) {
+ if (audio_patches_are_equal(it->patch(), &patchesToRemove[i]->mPatch)) {
+ patchesToRemove.removeItemsAt(i);
+ retainedPatch = true;
+ break;
+ }
}
+ if (retainedPatch) {
+ it = patchesToCreate.erase(it);
+ continue;
+ }
+ ++it;
+ }
+ if (patchesToCreate.size() == 0 && patchesToRemove.size() == 0) {
+ return NO_ERROR;
+ }
+ for (auto i = 0u; i < patchesToRemove.size(); ++i) {
+ auto ¤tPatch = patchesToRemove.valueAt(i);
releaseAudioPatch(currentPatch->getHandle(), mUidCached);
}
- status_t status = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/,
- patch, 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
- ALOGE_IF(status != NO_ERROR, "%s() error %d creating MSD audio patch", __func__, status);
- ALOGI_IF(status == NO_ERROR, "%s() Patch created from MSD_IN to "
- "device:%s (format:%#x channels:%#x samplerate:%d)", __func__,
- device->toString().c_str(), patch->sources[0].format,
- patch->sources[0].channel_mask, patch->sources[0].sample_rate);
+ status_t status = NO_ERROR;
+ for (const auto &p : patchesToCreate) {
+ auto currStatus = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/,
+ p.patch(), 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/);
+ char message[256];
+ snprintf(message, sizeof(message), "%s() %s: creating MSD patch from device:IN_BUS to "
+ "device:%#x (format:%#x channels:%#x samplerate:%d)", __func__,
+ currStatus == NO_ERROR ? "Success" : "Error",
+ p.patch()->sinks[0].ext.device.type, p.patch()->sources[0].format,
+ p.patch()->sources[0].channel_mask, p.patch()->sources[0].sample_rate);
+ if (currStatus == NO_ERROR) {
+ ALOGD("%s", message);
+ } else {
+ ALOGE("%s", message);
+ if (status == NO_ERROR) {
+ status = currStatus;
+ }
+ }
+ }
return status;
}
+void AudioPolicyManager::releaseMsdPatches(const DeviceVector& devices) {
+ AudioPatchCollection msdPatches = getMsdPatches();
+ for (size_t i = 0; i < msdPatches.size(); i++) {
+ const auto& patch = msdPatches[i];
+ for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) {
+ const struct audio_port_config *sink = &patch->mPatch.sinks[j];
+ if (sink->type == AUDIO_PORT_TYPE_DEVICE && devices.getDevice(sink->ext.device.type,
+ String8(sink->ext.device.address), AUDIO_FORMAT_DEFAULT) != nullptr) {
+ releaseAudioPatch(patch->getHandle(), mUidCached);
+ break;
+ }
+ }
+ }
+}
+
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags,
audio_format_t format,
@@ -5319,8 +5355,13 @@
}
}
if (!directOutputOpen) {
- ALOGV("no direct outputs open, reset MSD patch");
- setMsdPatch();
+ ALOGV("no direct outputs open, reset MSD patches");
+ // TODO: The MSD patches to be established here may differ to current MSD patches due to
+ // how output devices for patching are resolved. Avoid by caching and reusing the
+ // arguments to mEngine->getOutputDevicesForAttributes() when resolving which output
+ // devices to patch to. This may be complicated by the fact that devices may become
+ // unavailable.
+ setMsdPatches();
}
}
}
@@ -5387,7 +5428,13 @@
if (onOutputsChecked != nullptr && onOutputsChecked()) checkA2dpSuspend();
updateDevicesAndOutputs();
if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
- setMsdPatch();
+ // TODO: The MSD patches to be established here may differ to current MSD patches due to how
+ // output devices for patching are resolved. Nevertheless, AudioTracks affected by device
+ // configuration changes will ultimately be rerouted correctly. We can still avoid
+ // unnecessary rerouting by caching and reusing the arguments to
+ // mEngine->getOutputDevicesForAttributes() when resolving which output devices to patch to.
+ // This may be complicated by the fact that devices may become unavailable.
+ setMsdPatches();
}
// an event that changed routing likely occurred, inform upper layers
mpClientInterface->onRoutingUpdated();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 3c55b63..d3ceb1b 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -848,13 +848,6 @@
// end point.
audio_port_handle_t mCallRxSourceClientPort = AUDIO_PORT_HANDLE_NONE;
-private:
- void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
-
- // Add or remove AC3 DTS encodings based on user preferences.
- void modifySurroundFormats(const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr);
- void modifySurroundChannelMasks(ChannelMaskSet *channelMasksPtr);
-
// Support for Multi-Stream Decoder (MSD) module
sp<DeviceDescriptor> getMsdAudioInDevice() const;
DeviceVector getMsdAudioOutDevices() const;
@@ -864,7 +857,14 @@
audio_port_config *sourceConfig,
audio_port_config *sinkConfig) const;
PatchBuilder buildMsdPatch(const sp<DeviceDescriptor> &outputDevice) const;
- status_t setMsdPatch(const sp<DeviceDescriptor> &outputDevice = nullptr);
+ status_t setMsdPatches(const DeviceVector *outputDevices = nullptr);
+ void releaseMsdPatches(const DeviceVector& devices);
+private:
+ void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
+
+ // Add or remove AC3 DTS encodings based on user preferences.
+ void modifySurroundFormats(const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr);
+ void modifySurroundChannelMasks(ChannelMaskSet *channelMasksPtr);
// If any, resolve any "dynamic" fields of an Audio Profiles collection
void updateAudioProfiles(const sp<DeviceDescriptor>& devDesc, audio_io_handle_t ioHandle,
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index fed88a4..caf7309 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -280,6 +280,7 @@
int32_t uidAidl,
const media::AudioConfig& configAidl,
int32_t flagsAidl,
+ int32_t selectedDeviceIdAidl,
media::GetOutputForAttrResponse* _aidl_return)
{
audio_attributes_t attr = VALUE_OR_RETURN_BINDER_STATUS(
@@ -293,8 +294,10 @@
aidl2legacy_AudioConfig_audio_config_t(configAidl));
audio_output_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_output_flags_t_mask(flagsAidl));
+ audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_int32_t_audio_port_handle_t(selectedDeviceIdAidl));
+
audio_io_handle_t output;
- audio_port_handle_t selectedDeviceId;
audio_port_handle_t portId;
std::vector<audio_io_handle_t> secondaryOutputs;
@@ -504,6 +507,7 @@
const std::string& opPackageNameAidl,
const media::AudioConfigBase& configAidl,
int32_t flagsAidl,
+ int32_t selectedDeviceIdAidl,
media::GetInputForAttrResponse* _aidl_return) {
audio_attributes_t attr = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl));
@@ -521,7 +525,9 @@
aidl2legacy_AudioConfigBase_audio_config_base_t(configAidl));
audio_input_flags_t flags = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_input_flags_t_mask(flagsAidl));
- audio_port_handle_t selectedDeviceId;
+ audio_port_handle_t selectedDeviceId = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_int32_t_audio_port_handle_t(selectedDeviceIdAidl));
+
audio_port_handle_t portId;
if (mAudioPolicyManager == NULL) {
@@ -590,7 +596,14 @@
bool canCaptureHotword = captureHotwordAllowed(opPackageName, pid, uid);
if ((inputSource == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
- return binderStatusFromStatusT(BAD_VALUE);
+ return binderStatusFromStatusT(PERMISSION_DENIED);
+ }
+
+ if (((flags & AUDIO_INPUT_FLAG_HW_HOTWORD) != 0)
+ && !canCaptureHotword) {
+ ALOGE("%s: permission denied: hotword mode not allowed"
+ " for uid %d pid %d", __func__, uid, pid);
+ return binderStatusFromStatusT(PERMISSION_DENIED);
}
sp<AudioPolicyEffects>audioPolicyEffects;
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index c22ed9b..72d8f28 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -81,7 +81,7 @@
binder::Status getOutput(media::AudioStreamType stream, int32_t* _aidl_return) override;
binder::Status getOutputForAttr(const media::AudioAttributesInternal& attr, int32_t session,
int32_t pid, int32_t uid, const media::AudioConfig& config,
- int32_t flags,
+ int32_t flags, int32_t selectedDeviceId,
media::GetOutputForAttrResponse* _aidl_return) override;
binder::Status startOutput(int32_t portId) override;
binder::Status stopOutput(int32_t portId) override;
@@ -90,6 +90,7 @@
int32_t riid, int32_t session, int32_t pid, int32_t uid,
const std::string& opPackageName,
const media::AudioConfigBase& config, int32_t flags,
+ int32_t selectedDeviceId,
media::GetInputForAttrResponse* _aidl_return) override;
binder::Status startInput(int32_t portId) override;
binder::Status stopInput(int32_t portId) override;
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index be860e5..ea95364 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -29,6 +29,8 @@
using AudioPolicyManager::getOutputs;
using AudioPolicyManager::getAvailableOutputDevices;
using AudioPolicyManager::getAvailableInputDevices;
+ using AudioPolicyManager::releaseMsdPatches;
+ using AudioPolicyManager::setMsdPatches;
using AudioPolicyManager::setSurroundFormatEnabled;
uint32_t getAudioPortGeneration() const { return mAudioPortGeneration; }
};
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 3032589..684358f 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -349,7 +349,17 @@
// TODO: Add patch creation tests that involve already existing patch
-class AudioPolicyManagerTestMsd : public AudioPolicyManagerTest {
+enum
+{
+ MSD_AUDIO_PATCH_COUNT_NUM_AUDIO_PATCHES_INDEX = 0,
+ MSD_AUDIO_PATCH_COUNT_NAME_INDEX = 1
+};
+using MsdAudioPatchCountSpecification = std::tuple<size_t, std::string>;
+
+class AudioPolicyManagerTestMsd : public AudioPolicyManagerTest,
+ public ::testing::WithParamInterface<MsdAudioPatchCountSpecification> {
+ public:
+ AudioPolicyManagerTestMsd();
protected:
void SetUpManagerConfig() override;
void TearDown() override;
@@ -357,8 +367,26 @@
sp<DeviceDescriptor> mMsdOutputDevice;
sp<DeviceDescriptor> mMsdInputDevice;
sp<DeviceDescriptor> mDefaultOutputDevice;
+
+ const size_t mExpectedAudioPatchCount;
+ sp<DeviceDescriptor> mSpdifDevice;
};
+AudioPolicyManagerTestMsd::AudioPolicyManagerTestMsd()
+ : mExpectedAudioPatchCount(std::get<MSD_AUDIO_PATCH_COUNT_NUM_AUDIO_PATCHES_INDEX>(
+ GetParam())) {}
+
+INSTANTIATE_TEST_CASE_P(
+ MsdAudioPatchCount,
+ AudioPolicyManagerTestMsd,
+ ::testing::Values(
+ MsdAudioPatchCountSpecification(1u, "single"),
+ MsdAudioPatchCountSpecification(2u, "dual")
+ ),
+ [](const ::testing::TestParamInfo<MsdAudioPatchCountSpecification> &info) {
+ return std::get<MSD_AUDIO_PATCH_COUNT_NAME_INDEX>(info.param); }
+);
+
void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
// TODO: Consider using Serializer to load part of the config from a string.
AudioPolicyManagerTest::SetUpManagerConfig();
@@ -378,6 +406,19 @@
config.addDevice(mMsdOutputDevice);
config.addDevice(mMsdInputDevice);
+ if (mExpectedAudioPatchCount == 2) {
+ // Add SPDIF device with PCM output profile as a second device for dual MSD audio patching.
+ mSpdifDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPDIF);
+ mSpdifDevice->addAudioProfile(pcmOutputProfile);
+ config.addDevice(mSpdifDevice);
+
+ sp<OutputProfile> spdifOutputProfile = new OutputProfile("spdif output");
+ spdifOutputProfile->addAudioProfile(pcmOutputProfile);
+ spdifOutputProfile->addSupportedDevice(mSpdifDevice);
+ config.getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
+ addOutputProfile(spdifOutputProfile);
+ }
+
sp<HwModule> msdModule = new HwModule(AUDIO_HARDWARE_MODULE_ID_MSD, 2 /*halVersionMajor*/);
HwModuleCollection modules = config.getHwModules();
modules.add(msdModule);
@@ -413,64 +454,88 @@
addOutputProfile(primaryEncodedOutputProfile);
mDefaultOutputDevice = config.getDefaultOutputDevice();
+ if (mExpectedAudioPatchCount == 2) {
+ mSpdifDevice->addAudioProfile(dtsOutputProfile);
+ primaryEncodedOutputProfile->addSupportedDevice(mSpdifDevice);
+ }
}
void AudioPolicyManagerTestMsd::TearDown() {
mMsdOutputDevice.clear();
mMsdInputDevice.clear();
mDefaultOutputDevice.clear();
+ mSpdifDevice.clear();
AudioPolicyManagerTest::TearDown();
}
-TEST_F(AudioPolicyManagerTestMsd, InitSuccess) {
+TEST_P(AudioPolicyManagerTestMsd, InitSuccess) {
ASSERT_TRUE(mMsdOutputDevice);
ASSERT_TRUE(mMsdInputDevice);
ASSERT_TRUE(mDefaultOutputDevice);
}
-TEST_F(AudioPolicyManagerTestMsd, Dump) {
+TEST_P(AudioPolicyManagerTestMsd, Dump) {
dumpToLog();
}
-TEST_F(AudioPolicyManagerTestMsd, PatchCreationOnSetForceUse) {
+TEST_P(AudioPolicyManagerTestMsd, PatchCreationOnSetForceUse) {
const PatchCountCheck patchCount = snapshotPatchCount();
mManager->setForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND,
AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
- ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
}
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
+TEST_P(AudioPolicyManagerTestMsd, PatchCreationSetReleaseMsdPatches) {
+ const PatchCountCheck patchCount = snapshotPatchCount();
+ DeviceVector devices = mManager->getAvailableOutputDevices();
+ // Remove MSD output device to avoid patching to itself
+ devices.remove(mMsdOutputDevice);
+ ASSERT_EQ(mExpectedAudioPatchCount, devices.size());
+ mManager->setMsdPatches(&devices);
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
+ // Dual patch: exercise creating one new audio patch and reusing another existing audio patch.
+ DeviceVector singleDevice(devices[0]);
+ mManager->releaseMsdPatches(singleDevice);
+ ASSERT_EQ(mExpectedAudioPatchCount - 1, patchCount.deltaFromSnapshot());
+ mManager->setMsdPatches(&devices);
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
+ mManager->releaseMsdPatches(devices);
+ ASSERT_EQ(0, patchCount.deltaFromSnapshot());
+}
+
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
- ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
}
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
- ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
}
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
- ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
+ selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
- ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
}
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
@@ -479,7 +544,7 @@
ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
-TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrFormatSwitching) {
+TEST_P(AudioPolicyManagerTestMsd, GetOutputForAttrFormatSwitching) {
// Switch between formats that are supported and not supported by MSD.
{
const PatchCountCheck patchCount = snapshotPatchCount();
@@ -489,9 +554,9 @@
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
nullptr /*output*/, &portId);
ASSERT_EQ(selectedDeviceId, mDefaultOutputDevice->getId());
- ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
mManager->releaseOutput(portId);
- ASSERT_EQ(1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(mExpectedAudioPatchCount, patchCount.deltaFromSnapshot());
}
{
const PatchCountCheck patchCount = snapshotPatchCount();
@@ -501,7 +566,7 @@
AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
nullptr /*output*/, &portId);
ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
- ASSERT_EQ(-1, patchCount.deltaFromSnapshot());
+ ASSERT_EQ(-static_cast<int>(mExpectedAudioPatchCount), patchCount.deltaFromSnapshot());
mManager->releaseOutput(portId);
ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index b4c0da3..641e463 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -237,6 +237,10 @@
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
mUidPolicy->unregisterSelf();
mSensorPrivacyPolicy->unregisterSelf();
+
+ for (auto const& [_, policy] : mCameraSensorPrivacyPolicies) {
+ policy->unregisterSelf();
+ }
}
void CameraService::onNewProviderRegistered() {
@@ -1686,6 +1690,12 @@
client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
}
+ // Set camera muting behavior
+ if (client->supportsCameraMute()) {
+ client->setCameraMute(mOverrideCameraMuteMode ||
+ isUserSensorPrivacyEnabledForUid(clientUid));
+ }
+
if (shimUpdateOnly) {
// If only updating legacy shim parameters, immediately disconnect client
mServiceLock.unlock();
@@ -3212,6 +3222,39 @@
}
}
+status_t CameraService::SensorPrivacyPolicy::registerSelfForIndividual(int userId) {
+ Mutex::Autolock _l(mSensorPrivacyLock);
+ if (mRegistered) {
+ return OK;
+ }
+
+ status_t res = mSpm.addIndividualSensorPrivacyListener(userId,
+ SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA, this);
+ if (res != OK) {
+ ALOGE("Unable to register camera privacy listener: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ res = mSpm.isIndividualSensorPrivacyEnabled(userId,
+ SensorPrivacyManager::INDIVIDUAL_SENSOR_CAMERA, mSensorPrivacyEnabled);
+ if (res != OK) {
+ ALOGE("Unable to check camera privacy: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ res = mSpm.linkToDeath(this);
+ if (res != OK) {
+ ALOGE("Register link to death failed for sensor privacy: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ mRegistered = true;
+ mIsIndividual = true;
+ mUserId = userId;
+ ALOGV("SensorPrivacyPolicy: Registered with SensorPrivacyManager");
+ return OK;
+}
+
void CameraService::SensorPrivacyPolicy::unregisterSelf() {
Mutex::Autolock _l(mSensorPrivacyLock);
mSpm.removeSensorPrivacyListener(this);
@@ -3231,10 +3274,14 @@
mSensorPrivacyEnabled = enabled;
}
// if sensor privacy is enabled then block all clients from accessing the camera
- if (enabled) {
- sp<CameraService> service = mService.promote();
- if (service != nullptr) {
- service->blockAllClients();
+ sp<CameraService> service = mService.promote();
+ if (service != nullptr) {
+ if (mIsIndividual) {
+ service->setMuteForAllClients(mUserId, enabled);
+ } else {
+ if (enabled) {
+ service->blockAllClients();
+ }
}
}
return binder::Status::ok();
@@ -3865,6 +3912,19 @@
}
}
+void CameraService::setMuteForAllClients(userid_t userId, bool enabled) {
+ const auto clients = mActiveClientManager.getAll();
+ for (auto& current : clients) {
+ if (current != nullptr) {
+ const auto basicClient = current->getValue();
+ if (basicClient.get() != nullptr
+ && multiuser_get_user_id(basicClient->getClientUid()) == userId) {
+ basicClient->setCameraMute(enabled);
+ }
+ }
+ }
+}
+
// NOTE: This is a remote API - make sure all args are validated
status_t CameraService::shellCommand(int in, int out, int err, const Vector<String16>& args) {
if (!checkCallingPermission(sManageCameraPermission, nullptr, nullptr)) {
@@ -3887,6 +3947,8 @@
return handleSetImageDumpMask(args);
} else if (args.size() >= 1 && args[0] == String16("get-image-dump-mask")) {
return handleGetImageDumpMask(out);
+ } else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
+ return handleSetCameraMute(args);
} else if (args.size() == 1 && args[0] == String16("help")) {
printHelp(out);
return NO_ERROR;
@@ -4009,6 +4071,29 @@
return dprintf(out, "Image dump mask: %d\n", mImageDumpMask);
}
+status_t CameraService::handleSetCameraMute(const Vector<String16>& args) {
+ int muteValue = strtol(String8(args[1]), nullptr, 10);
+ if (errno != 0) return BAD_VALUE;
+
+ if (muteValue < 0 || muteValue > 1) return BAD_VALUE;
+ Mutex::Autolock lock(mServiceLock);
+
+ mOverrideCameraMuteMode = (muteValue == 1);
+
+ const auto clients = mActiveClientManager.getAll();
+ for (auto& current : clients) {
+ if (current != nullptr) {
+ const auto basicClient = current->getValue();
+ if (basicClient.get() != nullptr) {
+ if (basicClient->supportsCameraMute()) {
+ basicClient->setCameraMute(mOverrideCameraMuteMode);
+ }
+ }
+ }
+ }
+
+ return OK;
+}
status_t CameraService::printHelp(int out) {
return dprintf(out, "Camera service commands:\n"
@@ -4021,6 +4106,7 @@
" set-image-dump-mask <MASK> specifies the formats to be saved to disk\n"
" Valid values 0=OFF, 1=ON for JPEG\n"
" get-image-dump-mask returns the current image-dump-mask value\n"
+ " set-camera-mute <0/1> enable or disable camera muting\n"
" help print this message\n");
}
@@ -4045,4 +4131,16 @@
return mode;
}
+bool CameraService::isUserSensorPrivacyEnabledForUid(uid_t uid) {
+ userid_t userId = multiuser_get_user_id(uid);
+ if (mCameraSensorPrivacyPolicies.find(userId) == mCameraSensorPrivacyPolicies.end()) {
+ sp<SensorPrivacyPolicy> userPolicy = new SensorPrivacyPolicy(this);
+ if (userPolicy->registerSelfForIndividual(userId) != OK) {
+ return false;
+ }
+ mCameraSensorPrivacyPolicies[userId] = userPolicy;
+ }
+ return mCameraSensorPrivacyPolicies[userId]->isSensorPrivacyEnabled();
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 43b03e6..dbfc6c3 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -278,6 +278,12 @@
// Override rotate-and-crop AUTO behavior
virtual status_t setRotateAndCropOverride(uint8_t rotateAndCrop) = 0;
+ // Whether the client supports camera muting (black only output)
+ virtual bool supportsCameraMute() = 0;
+
+ // Set/reset camera mute
+ virtual status_t setCameraMute(bool enabled) = 0;
+
protected:
BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
@@ -631,9 +637,11 @@
public virtual IBinder::DeathRecipient {
public:
explicit SensorPrivacyPolicy(wp<CameraService> service)
- : mService(service), mSensorPrivacyEnabled(false), mRegistered(false) {}
+ : mService(service), mSensorPrivacyEnabled(false), mRegistered(false),
+ mIsIndividual(false), mUserId(0) {}
void registerSelf();
+ status_t registerSelfForIndividual(int userId);
void unregisterSelf();
bool isSensorPrivacyEnabled();
@@ -649,6 +657,8 @@
Mutex mSensorPrivacyLock;
bool mSensorPrivacyEnabled;
bool mRegistered;
+ bool mIsIndividual;
+ userid_t mUserId;
};
sp<UidPolicy> mUidPolicy;
@@ -1023,6 +1033,9 @@
// Blocks all active clients.
void blockAllClients();
+ // Mutes all active clients for a user.
+ void setMuteForAllClients(userid_t userId, bool enabled);
+
// Overrides the UID state as if it is idle
status_t handleSetUidState(const Vector<String16>& args, int err);
@@ -1044,6 +1057,9 @@
// Get the mask for image dump to disk
status_t handleGetImageDumpMask(int out);
+ // Set the camera mute state
+ status_t handleSetCameraMute(const Vector<String16>& args);
+
// Prints the shell command help
status_t printHelp(int out);
@@ -1088,6 +1104,15 @@
// Current image dump mask
uint8_t mImageDumpMask = 0;
+
+ // Current camera mute mode
+ bool mOverrideCameraMuteMode = false;
+
+ // Map from user to sensor privacy policy
+ std::map<userid_t, sp<SensorPrivacyPolicy>> mCameraSensorPrivacyPolicies;
+
+ // Checks if the sensor privacy is enabled for the uid
+ bool isUserSensorPrivacyEnabledForUid(uid_t uid);
};
} // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 2494302..31cfed6 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -935,6 +935,7 @@
return res;
}
+ mCallbackProcessor->unpauseCallback();
params.state = Parameters::PREVIEW;
return OK;
}
@@ -969,6 +970,7 @@
FALLTHROUGH_INTENDED;
case Parameters::RECORD:
case Parameters::PREVIEW:
+ mCallbackProcessor->pauseCallback();
syncWithDevice();
// Due to flush a camera device sync is not a sufficient
// guarantee that the current client parameters are
@@ -2296,6 +2298,14 @@
static_cast<camera_metadata_enum_android_scaler_rotate_and_crop_t>(rotateAndCrop));
}
+bool Camera2Client::supportsCameraMute() {
+ return mDevice->supportsCameraMute();
+}
+
+status_t Camera2Client::setCameraMute(bool enabled) {
+ return mDevice->setCameraMute(enabled);
+}
+
status_t Camera2Client::waitUntilCurrentRequestIdLocked() {
int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
if (activeRequestId != 0) {
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index f8da0b6..4d667e3 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -87,6 +87,9 @@
virtual int32_t getGlobalAudioRestriction();
virtual status_t setRotateAndCropOverride(uint8_t rotateAndCrop);
+ virtual bool supportsCameraMute();
+ virtual status_t setCameraMute(bool enabled);
+
/**
* Interface used by CameraService
*/
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 744aaee..4c3ded6 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -39,6 +39,7 @@
mDevice(client->getCameraDevice()),
mId(client->getCameraId()),
mCallbackAvailable(false),
+ mCallbackPaused(true),
mCallbackToApp(false),
mCallbackStreamId(NO_STREAM) {
}
@@ -216,6 +217,14 @@
return mCallbackStreamId;
}
+void CallbackProcessor::unpauseCallback() {
+ mCallbackPaused = false;
+}
+
+void CallbackProcessor::pauseCallback() {
+ mCallbackPaused = true;
+}
+
void CallbackProcessor::dump(int /*fd*/, const Vector<String16>& /*args*/) const {
}
@@ -234,7 +243,7 @@
do {
sp<Camera2Client> client = mClient.promote();
- if (client == 0) {
+ if (client == 0 || mCallbackPaused) {
res = discardNewCallback();
} else {
res = processNewCallback(client);
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
index 5231688..a336326 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CALLBACKPROCESSOR_H
#define ANDROID_SERVERS_CAMERA_CAMERA2_CALLBACKPROCESSOR_H
+#include <atomic>
+
#include <utils/Thread.h>
#include <utils/String16.h>
#include <utils/Vector.h>
@@ -52,6 +54,9 @@
status_t deleteStream();
int getStreamId() const;
+ void unpauseCallback();
+ void pauseCallback();
+
void dump(int fd, const Vector<String16>& args) const;
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
@@ -67,6 +72,8 @@
NO_STREAM = -1
};
+ std::atomic<bool> mCallbackPaused;
+
// True if mCallbackWindow is a remote consumer, false if just the local
// mCallbackConsumer
bool mCallbackToApp;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 6e1aba9..d47014e 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1532,6 +1532,14 @@
static_cast<camera_metadata_enum_android_scaler_rotate_and_crop_t>(rotateAndCrop));
}
+bool CameraDeviceClient::supportsCameraMute() {
+ return mDevice->supportsCameraMute();
+}
+
+status_t CameraDeviceClient::setCameraMute(bool enabled) {
+ return mDevice->setCameraMute(enabled);
+}
+
binder::Status CameraDeviceClient::switchToOffline(
const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
const std::vector<int>& offlineOutputIds,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 57688a0..5588285 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -189,6 +189,9 @@
virtual status_t setRotateAndCropOverride(uint8_t rotateAndCrop) override;
+ virtual bool supportsCameraMute();
+ virtual status_t setCameraMute(bool enabled);
+
virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t dumpClient(int fd, const Vector<String16>& args);
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 62b5479..6765c3b 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -72,6 +72,16 @@
return OK;
}
+bool CameraOfflineSessionClient::supportsCameraMute() {
+ // Offline mode doesn't support muting
+ return false;
+}
+
+status_t CameraOfflineSessionClient::setCameraMute(bool) {
+ return INVALID_OPERATION;
+}
+
+
status_t CameraOfflineSessionClient::dump(int fd, const Vector<String16>& args) {
return BasicClient::dump(fd, args);
}
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index 839c435..5c5fcda 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -76,6 +76,9 @@
status_t setRotateAndCropOverride(uint8_t rotateAndCrop) override;
+ bool supportsCameraMute() override;
+ status_t setCameraMute(bool enabled) override;
+
// permissions management
status_t startCameraOps() override;
status_t finishCameraOps() override;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 5e46f08..1be46d6 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -399,6 +399,21 @@
camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue) = 0;
/**
+ * Whether camera muting (producing black-only output) is supported.
+ *
+ * Calling setCameraMute(true) when this returns false will return an
+ * INVALID_OPERATION error.
+ */
+ virtual bool supportsCameraMute() = 0;
+
+ /**
+ * Mute the camera.
+ *
+ * When muted, black image data is output on all output streams.
+ */
+ virtual status_t setCameraMute(bool enabled) = 0;
+
+ /**
* Get the status tracker of the camera device
*/
virtual wp<camera3::StatusTracker> getStatusTracker() = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 385bfd6..7606d7d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -357,6 +357,15 @@
mRotateAndCropMappers.emplace(mId.c_str(), &mDeviceInfo);
}
+ camera_metadata_entry_t availableTestPatternModes = mDeviceInfo.find(
+ ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES);
+ for (size_t i = 0; i < availableTestPatternModes.count; i++) {
+ if (availableTestPatternModes.data.i32[i] == ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR) {
+ mSupportCameraMute = true;
+ break;
+ }
+ }
+
return OK;
}
@@ -2388,6 +2397,26 @@
newRequest->mZoomRatioIs1x = false;
}
+ if (mSupportCameraMute) {
+ auto testPatternModeEntry =
+ newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+ newRequest->mOriginalTestPatternMode = testPatternModeEntry.count > 0 ?
+ testPatternModeEntry.data.i32[0] :
+ ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+
+ auto testPatternDataEntry =
+ newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+ if (testPatternDataEntry.count > 0) {
+ memcpy(newRequest->mOriginalTestPatternData, testPatternModeEntry.data.i32,
+ sizeof(newRequest->mOriginalTestPatternData));
+ } else {
+ newRequest->mOriginalTestPatternData[0] = 0;
+ newRequest->mOriginalTestPatternData[1] = 0;
+ newRequest->mOriginalTestPatternData[2] = 0;
+ newRequest->mOriginalTestPatternData[3] = 0;
+ }
+ }
+
return newRequest;
}
@@ -3860,6 +3889,8 @@
mCurrentAfTriggerId(0),
mCurrentPreCaptureTriggerId(0),
mRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE),
+ mCameraMute(false),
+ mCameraMuteChanged(false),
mRepeatingLastFrameNumber(
hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
mPrepareVideoStream(false),
@@ -4484,10 +4515,13 @@
mPrevTriggers = triggerCount;
bool rotateAndCropChanged = overrideAutoRotateAndCrop(captureRequest);
+ bool testPatternChanged = overrideTestPattern(captureRequest);
- // If the request is the same as last, or we had triggers last time
+ // If the request is the same as last, or we had triggers now or last time or
+ // changing overrides this time
bool newRequest =
- (mPrevRequest != captureRequest || triggersMixedIn || rotateAndCropChanged) &&
+ (mPrevRequest != captureRequest || triggersMixedIn ||
+ rotateAndCropChanged || testPatternChanged) &&
// Request settings are all the same within one batch, so only treat the first
// request in a batch as new
!(batchedRequest && i > 0);
@@ -4952,6 +4986,16 @@
return OK;
}
+status_t Camera3Device::RequestThread::setCameraMute(bool enabled) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mTriggerMutex);
+ if (enabled != mCameraMute) {
+ mCameraMute = enabled;
+ mCameraMuteChanged = true;
+ }
+ return OK;
+}
+
nsecs_t Camera3Device::getExpectedInFlightDuration() {
ATRACE_CALL();
std::lock_guard<std::mutex> l(mInFlightLock);
@@ -5504,6 +5548,61 @@
return false;
}
+bool Camera3Device::RequestThread::overrideTestPattern(
+ const sp<CaptureRequest> &request) {
+ ATRACE_CALL();
+
+ Mutex::Autolock l(mTriggerMutex);
+
+ bool changed = false;
+
+ int32_t testPatternMode = request->mOriginalTestPatternMode;
+ int32_t testPatternData[4] = {
+ request->mOriginalTestPatternData[0],
+ request->mOriginalTestPatternData[1],
+ request->mOriginalTestPatternData[2],
+ request->mOriginalTestPatternData[3]
+ };
+
+ if (mCameraMute) {
+ testPatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR;
+ testPatternData[0] = 0;
+ testPatternData[1] = 0;
+ testPatternData[2] = 0;
+ testPatternData[3] = 0;
+ }
+
+ CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
+
+ auto testPatternEntry = metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+ if (testPatternEntry.count > 0) {
+ if (testPatternEntry.data.i32[0] != testPatternMode) {
+ testPatternEntry.data.i32[0] = testPatternMode;
+ changed = true;
+ }
+ } else {
+ metadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE,
+ &testPatternMode, 1);
+ changed = true;
+ }
+
+ auto testPatternColor = metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+ if (testPatternColor.count > 0) {
+ for (size_t i = 0; i < 4; i++) {
+ if (testPatternColor.data.i32[i] != (int32_t)testPatternData[i]) {
+ testPatternColor.data.i32[i] = testPatternData[i];
+ changed = true;
+ }
+ }
+ } else {
+ metadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA,
+ (int32_t*)testPatternData, 4);
+ changed = true;
+ }
+
+ return changed;
+}
+
/**
* PreparerThread inner class methods
*/
@@ -6129,4 +6228,22 @@
return mRequestThread->setRotateAndCropAutoBehavior(rotateAndCropValue);
}
+bool Camera3Device::supportsCameraMute() {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ return mSupportCameraMute;
+}
+
+status_t Camera3Device::setCameraMute(bool enabled) {
+ ATRACE_CALL();
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ if (mRequestThread == nullptr || !mSupportCameraMute) {
+ return INVALID_OPERATION;
+ }
+ return mRequestThread->setCameraMute(enabled);
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index b06ce45..567b3ad 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -238,6 +238,21 @@
status_t setRotateAndCropAutoBehavior(
camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
+ /**
+ * Whether camera muting (producing black-only output) is supported.
+ *
+ * Calling setCameraMute(true) when this returns false will return an
+ * INVALID_OPERATION error.
+ */
+ bool supportsCameraMute();
+
+ /**
+ * Mute the camera.
+ *
+ * When muted, black image data is output on all output streams.
+ */
+ status_t setCameraMute(bool enabled);
+
// Get the status trackeer for the camera device
wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; }
@@ -525,6 +540,11 @@
// overriding of ROTATE_AND_CROP value and adjustment of coordinates
// in several other controls in both the request and the result
bool mRotateAndCropAuto;
+ // Original value of TEST_PATTERN_MODE and DATA so that they can be
+ // restored when sensor muting is turned off
+ int32_t mOriginalTestPatternMode;
+ int32_t mOriginalTestPatternData[4];
+
// Whether this capture request has its zoom ratio set to 1.0x before
// the framework overrides it for camera HAL consumption.
bool mZoomRatioIs1x;
@@ -868,6 +888,7 @@
status_t setRotateAndCropAutoBehavior(
camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
+ status_t setCameraMute(bool enabled);
protected:
virtual bool threadLoop();
@@ -889,6 +910,10 @@
// Override rotate_and_crop control if needed; returns true if the current value was changed
bool overrideAutoRotateAndCrop(const sp<CaptureRequest> &request);
+ // Override test_pattern control if needed for camera mute; returns true
+ // if the current value was changed
+ bool overrideTestPattern(const sp<CaptureRequest> &request);
+
static const nsecs_t kRequestTimeout = 50e6; // 50 ms
// TODO: does this need to be adjusted for long exposure requests?
@@ -1011,6 +1036,8 @@
uint32_t mCurrentAfTriggerId;
uint32_t mCurrentPreCaptureTriggerId;
camera_metadata_enum_android_scaler_rotate_and_crop_t mRotateAndCropOverride;
+ bool mCameraMute;
+ bool mCameraMuteChanged;
int64_t mRepeatingLastFrameNumber;
@@ -1276,6 +1303,10 @@
// Whether HAL supports offline processing capability.
bool mSupportOfflineProcessing = false;
+
+ // Whether the HAL supports camera muting via test pattern
+ bool mSupportCameraMute = false;
+
}; // class Camera3Device
}; // namespace android
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 3bb70f1..b64f726 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -100,6 +100,7 @@
"libmediametricsservice",
"libmediautils",
"libutils",
+ "mediametricsservice-aidl-unstable-cpp",
],
header_libs: [
"libaudioutils_headers",
@@ -142,6 +143,7 @@
},
shared_libs: [
+ "mediametricsservice-aidl-unstable-cpp",
"libbase", // android logging
"libbinder",
"libcutils",
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index d78d1e3..3b2de76 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -136,6 +136,25 @@
"connection_count",
};
+// static constexpr const char * const AAudioStreamFields[] {
+// "mediametrics_aaudiostream_reported",
+// "caller_name",
+// "path",
+// "direction",
+// "frames_per_burst",
+// "buffer_size",
+// "buffer_capacity",
+// "channel_count",
+// "total_frames_transferred",
+// "perf_mode_requested",
+// "perf_mode_actual",
+// "sharing",
+// "xrun_count",
+// "device_type",
+// "format_app",
+// "format_device",
+// };
+
/**
* sendToStatsd is a helper method that sends the arguments to statsd
* and returns a pair { result, summary_string }.
@@ -192,6 +211,24 @@
});
}));
+ // Handle legacy aaudio stream statistics
+ mActions.addAction(
+ AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "*." AMEDIAMETRICS_PROP_EVENT,
+ std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM),
+ std::make_shared<AnalyticsActions::Function>(
+ [this](const std::shared_ptr<const android::mediametrics::Item> &item) {
+ mAAudioStreamInfo.endAAudioStream(item, AAudioStreamInfo::CALLER_PATH_LEGACY);
+ }));
+
+ // Handle mmap aaudio stream statistics
+ mActions.addAction(
+ AMEDIAMETRICS_KEY_PREFIX_AUDIO_STREAM "*." AMEDIAMETRICS_PROP_EVENT,
+ std::string(AMEDIAMETRICS_PROP_EVENT_VALUE_ENDAAUDIOSTREAM),
+ std::make_shared<AnalyticsActions::Function>(
+ [this](const std::shared_ptr<const android::mediametrics::Item> &item) {
+ mAAudioStreamInfo.endAAudioStream(item, AAudioStreamInfo::CALLER_PATH_MMAP);
+ }));
+
// Handle device use record statistics
mActions.addAction(
AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD "*." AMEDIAMETRICS_PROP_EVENT,
@@ -843,4 +880,109 @@
}
}
+void AudioAnalytics::AAudioStreamInfo::endAAudioStream(
+ const std::shared_ptr<const android::mediametrics::Item> &item, CallerPath path) const {
+ const std::string& key = item->getKey();
+
+ std::string callerNameStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_CALLERNAME, &callerNameStr);
+
+ const auto callerName = types::lookup<types::CALLER_NAME, int32_t>(callerNameStr);
+
+ std::string directionStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_DIRECTION, &directionStr);
+ const auto direction = types::lookup<types::AAUDIO_DIRECTION, int32_t>(directionStr);
+
+ int32_t framesPerBurst = -1;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_BURSTFRAMES, &framesPerBurst);
+
+ int32_t bufferSizeInFrames = -1;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_BUFFERSIZEFRAMES, &bufferSizeInFrames);
+
+ int32_t bufferCapacityInFrames = -1;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_BUFFERCAPACITYFRAMES, &bufferCapacityInFrames);
+
+ int32_t channelCount = -1;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_CHANNELCOUNT, &channelCount);
+
+ int64_t totalFramesTransferred = -1;
+ // TODO: log and get total frames transferred
+
+ std::string perfModeRequestedStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_PERFORMANCEMODE, &perfModeRequestedStr);
+ const auto perfModeRequested =
+ types::lookup<types::AAUDIO_PERFORMANCE_MODE, int32_t>(perfModeRequestedStr);
+
+ int32_t perfModeActual = 0;
+ // TODO: log and get actual performance mode
+
+ std::string sharingModeStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_SHARINGMODE, &sharingModeStr);
+ const auto sharingMode = types::lookup<types::AAUDIO_SHARING_MODE, int32_t>(sharingModeStr);
+
+ int32_t xrunCount = -1;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_UNDERRUN, &xrunCount);
+
+ std::string deviceType;
+ // TODO: only routed device id is logged, but no device type
+
+ int32_t formatApp = 0;
+ // TODO: log format from app
+
+ std::string formatDeviceStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_ENCODING, &formatDeviceStr);
+ const auto formatDevice = types::lookup<types::ENCODING, int32_t>(formatDeviceStr);
+
+ LOG(LOG_LEVEL) << "key:" << key
+ << " caller_name:" << callerName << "(" << callerNameStr << ")"
+ << " path:" << path
+ << " direction:" << direction << "(" << directionStr << ")"
+ << " frames_per_burst:" << framesPerBurst
+ << " buffer_size:" << bufferSizeInFrames
+ << " buffer_capacity:" << bufferCapacityInFrames
+ << " channel_count:" << channelCount
+ << " total_frames_transferred:" << totalFramesTransferred
+ << " perf_mode_requested:" << perfModeRequested << "(" << perfModeRequestedStr << ")"
+ << " perf_mode_actual:" << perfModeActual
+ << " sharing:" << sharingMode << "(" << sharingModeStr << ")"
+ << " xrun_count:" << xrunCount
+ << " device_type:" << deviceType
+ << " format_app:" << formatApp
+ << " format_device: " << formatDevice << "(" << formatDeviceStr << ")";
+
+ // TODO: send the metric to statsd when the proto is ready
+ // if (mAudioAnalytics.mDeliverStatistics) {
+ // const auto [ result, str ] = sendToStatsd(AAudioStreamFields,
+ // CONDITION(android::util::MEDIAMETRICS_AAUDIOSTREAM_REPORTED)
+ // , callerName
+ // , path
+ // , direction
+ // , framesPerBurst
+ // , bufferSizeInFrames
+ // , bufferCapacityInFrames
+ // , channelCount
+ // , totalFramesTransferred
+ // , perfModeRequested
+ // , perfModeActual
+ // , sharingMode
+ // , xrunCount
+ // , deviceType.c_str()
+ // , formatApp
+ // , formatDevice
+ // );
+ // ALOGV("%s: statsd %s", __func__, str.c_str());
+ // mAudioAnalytics.mStatsdLog.log("%s", str.c_str());
+ // }
+}
+
} // namespace android::mediametrics
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/AudioAnalytics.h
index df097b1..07872ef 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/AudioAnalytics.h
@@ -189,6 +189,29 @@
int32_t mA2dpConnectionUnknowns GUARDED_BY(mLock) = 0;
} mDeviceConnection{*this};
+ // AAudioStreamInfo is a nested class which collect aaudio stream info from both client and
+ // server side.
+ class AAudioStreamInfo {
+ public:
+ // All the enum here must be kept the same as the ones defined in atoms.proto
+ enum CallerPath {
+ CALLER_PATH_UNKNOWN = 0,
+ CALLER_PATH_LEGACY = 1,
+ CALLER_PATH_MMAP = 2,
+ };
+
+ explicit AAudioStreamInfo(AudioAnalytics &audioAnalytics)
+ : mAudioAnalytics(audioAnalytics) {}
+
+ void endAAudioStream(
+ const std::shared_ptr<const android::mediametrics::Item> &item,
+ CallerPath path) const;
+
+ private:
+
+ AudioAnalytics &mAudioAnalytics;
+ } mAAudioStreamInfo{*this};
+
AudioPowerUsage mAudioPowerUsage{this};
};
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index 5d044bb..44e96ec 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -154,6 +154,40 @@
return map;
}
+const std::unordered_map<std::string, int32_t>& getAAudioDirection() {
+ // DO NOT MODIFY VALUES(OK to add new ones).
+ // This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
+ static std::unordered_map<std::string, int32_t> map {
+ // UNKNOWN is -1
+ {"AAUDIO_DIRECTION_OUTPUT", 0},
+ {"AAUDIO_DIRECTION_INPUT", 1},
+ };
+ return map;
+}
+
+const std::unordered_map<std::string, int32_t>& getAAudioPerformanceMode() {
+ // DO NOT MODIFY VALUES(OK to add new ones).
+ // This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
+ static std::unordered_map<std::string, int32_t> map {
+ // UNKNOWN is -1
+ {"AAUDIO_PERFORMANCE_MODE_NONE", 10},
+ {"AAUDIO_PERFORMANCE_MODE_POWER_SAVING", 11},
+ {"AAUDIO_PERFORMANCE_MODE_LOW_LATENCY", 12},
+ };
+ return map;
+}
+
+const std::unordered_map<std::string, int32_t>& getAAudioSharingMode() {
+ // DO NOT MODIFY VALUES(OK to add new ones).
+ // This may be found in frameworks/av/media/libaaudio/include/aaudio/AAudio.h
+ static std::unordered_map<std::string, int32_t> map {
+ // UNKNOWN is -1
+ {"AAUDIO_SHARING_MODE_EXCLUSIVE", 0},
+ {"AAUDIO_SHARING_MODE_SHARED", 1},
+ };
+ return map;
+}
+
// Helper: Create the corresponding int32 from string flags split with '|'.
template <typename Traits>
int32_t int32FromFlags(const std::string &flags)
@@ -433,4 +467,70 @@
return flagsFromMap(traits, getAudioTrackTraitsMap());
}
+template <>
+std::string lookup<AAUDIO_DIRECTION>(const std::string &direction)
+{
+ auto& map = getAAudioDirection();
+ auto it = map.find(direction);
+ if (it == map.end()) {
+ return "";
+ }
+ return direction;
+}
+
+template <>
+int32_t lookup<AAUDIO_DIRECTION>(const std::string &direction)
+{
+ auto& map = getAAudioDirection();
+ auto it = map.find(direction);
+ if (it == map.end()) {
+ return -1; // return unknown
+ }
+ return it->second;
+}
+
+template <>
+std::string lookup<AAUDIO_PERFORMANCE_MODE>(const std::string &performanceMode)
+{
+ auto& map = getAAudioPerformanceMode();
+ auto it = map.find(performanceMode);
+ if (it == map.end()) {
+ return "";
+ }
+ return performanceMode;
+}
+
+template <>
+int32_t lookup<AAUDIO_PERFORMANCE_MODE>(const std::string &performanceMode)
+{
+ auto& map = getAAudioPerformanceMode();
+ auto it = map.find(performanceMode);
+ if (it == map.end()) {
+ return -1; // return unknown
+ }
+ return it->second;
+}
+
+template <>
+std::string lookup<AAUDIO_SHARING_MODE>(const std::string &sharingMode)
+{
+ auto& map = getAAudioSharingMode();
+ auto it = map.find(sharingMode);
+ if (it == map.end()) {
+ return "";
+ }
+ return sharingMode;
+}
+
+template <>
+int32_t lookup<AAUDIO_SHARING_MODE>(const std::string &sharingMode)
+{
+ auto& map = getAAudioSharingMode();
+ auto it = map.find(sharingMode);
+ if (it == map.end()) {
+ return -1; // return unknown
+ }
+ return it->second;
+}
+
} // namespace android::mediametrics::types
diff --git a/services/mediametrics/AudioTypes.h b/services/mediametrics/AudioTypes.h
index e1deeb1..4394d79 100644
--- a/services/mediametrics/AudioTypes.h
+++ b/services/mediametrics/AudioTypes.h
@@ -40,6 +40,9 @@
// Enumeration for all the string translations to integers (generally int32_t) unless noted.
enum AudioEnumCategory {
+ AAUDIO_DIRECTION,
+ AAUDIO_PERFORMANCE_MODE,
+ AAUDIO_SHARING_MODE,
CALLER_NAME,
CONTENT_TYPE,
ENCODING,
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index bf6e428..9d380ec 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -25,6 +25,7 @@
#include <android/content/pm/IPackageManagerNative.h> // package info
#include <audio_utils/clock.h> // clock conversions
#include <binder/IPCThreadState.h> // get calling uid
+#include <binder/IServiceManager.h> // checkCallingPermission
#include <cutils/properties.h> // for property_get
#include <mediautils/MemoryLeakTrackUtil.h>
#include <memunreachable/memunreachable.h>
diff --git a/services/mediametrics/MediaMetricsService.h b/services/mediametrics/MediaMetricsService.h
index 792b7f0..bcae397 100644
--- a/services/mediametrics/MediaMetricsService.h
+++ b/services/mediametrics/MediaMetricsService.h
@@ -24,7 +24,7 @@
// IMediaMetricsService must include Vector, String16, Errors
#include <android-base/thread_annotations.h>
-#include <media/IMediaMetricsService.h>
+#include <android/media/BnMediaMetricsService.h>
#include <mediautils/ServiceUtilities.h>
#include <utils/String8.h>
@@ -32,12 +32,18 @@
namespace android {
-class MediaMetricsService : public BnMediaMetricsService
+class MediaMetricsService : public media::BnMediaMetricsService
{
public:
MediaMetricsService();
~MediaMetricsService() override;
+ // AIDL interface
+ binder::Status submitBuffer(const std::vector<uint8_t>& buffer) override {
+ status_t status = submitBuffer((char *)buffer.data(), buffer.size());
+ return binder::Status::fromStatusT(status);
+ }
+
/**
* Submits the indicated record to the mediaanalytics service.
*
@@ -45,11 +51,11 @@
* \return status failure, which is negative on binder transaction failure.
* As the transaction is one-way, remote failures will not be reported.
*/
- status_t submit(mediametrics::Item *item) override {
+ status_t submit(mediametrics::Item *item) {
return submitInternal(item, false /* release */);
}
- status_t submitBuffer(const char *buffer, size_t length) override {
+ status_t submitBuffer(const char *buffer, size_t length) {
mediametrics::Item *item = new mediametrics::Item();
return item->readFromByteString(buffer, length)
?: submitInternal(item, true /* release */);
@@ -81,7 +87,7 @@
// Internal call where release is true if ownership of item is transferred
// to the service (that is, the service will eventually delete the item).
- status_t submitInternal(mediametrics::Item *item, bool release) override;
+ status_t submitInternal(mediametrics::Item *item, bool release);
private:
void processExpirations();
diff --git a/services/mediametrics/fuzzer/Android.bp b/services/mediametrics/fuzzer/Android.bp
index df4c867..6ac9d20 100644
--- a/services/mediametrics/fuzzer/Android.bp
+++ b/services/mediametrics/fuzzer/Android.bp
@@ -43,6 +43,7 @@
"libstagefright",
"libstatslog",
"libutils",
+ "mediametricsservice-aidl-unstable-cpp",
],
include_dirs: [
diff --git a/services/mediametrics/tests/Android.bp b/services/mediametrics/tests/Android.bp
index c2e0759..94112b0 100644
--- a/services/mediametrics/tests/Android.bp
+++ b/services/mediametrics/tests/Android.bp
@@ -19,6 +19,7 @@
"libmediametricsservice",
"libmediautils",
"libutils",
+ "mediametricsservice-aidl-unstable-cpp",
],
header_libs: [
diff --git a/services/tuner/TunerDemux.cpp b/services/tuner/TunerDemux.cpp
index 0e0cd3b..8346992 100644
--- a/services/tuner/TunerDemux.cpp
+++ b/services/tuner/TunerDemux.cpp
@@ -18,6 +18,7 @@
#include "TunerDvr.h"
#include "TunerDemux.h"
+#include "TunerTimeFilter.h"
using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterType;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
@@ -97,7 +98,70 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
- *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filterSp, cbSp);
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerFilter>(filterSp, cbSp, type, subType);
+ return Status::ok();
+}
+
+Status TunerDemux::openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result status;
+ sp<ITimeFilter> filterSp;
+ mDemux->openTimeFilter([&](Result r, const sp<ITimeFilter>& filter) {
+ filterSp = filter;
+ status = r;
+ });
+ if (status != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+ }
+
+ *_aidl_return = ::ndk::SharedRefBase::make<TunerTimeFilter>(filterSp);
+ return Status::ok();
+}
+
+Status TunerDemux::getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ uint32_t avSyncHwId;
+ Result res;
+ sp<IFilter> halFilter = static_cast<TunerFilter*>(tunerFilter.get())->getHalFilter();
+ mDemux->getAvSyncHwId(halFilter,
+ [&](Result r, uint32_t id) {
+ res = r;
+ avSyncHwId = id;
+ });
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ *_aidl_return = (int)avSyncHwId;
+ return Status::ok();
+}
+
+Status TunerDemux::getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ uint64_t time;
+ Result res;
+ mDemux->getAvSyncTime(static_cast<uint32_t>(avSyncHwId),
+ [&](Result r, uint64_t ts) {
+ res = r;
+ time = ts;
+ });
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ *_aidl_return = (int64_t)time;
return Status::ok();
}
@@ -124,4 +188,43 @@
*_aidl_return = ::ndk::SharedRefBase::make<TunerDvr>(hidlDvr, dvrType);
return Status::ok();
}
+
+Status TunerDemux::connectCiCam(int ciCamId) {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res = mDemux->connectCiCam(static_cast<uint32_t>(ciCamId));
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
+Status TunerDemux::disconnectCiCam() {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res = mDemux->disconnectCiCam();
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
+Status TunerDemux::close() {
+ if (mDemux == nullptr) {
+ ALOGE("IDemux is not initialized.");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res = mDemux->close();
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
} // namespace android
diff --git a/services/tuner/TunerDemux.h b/services/tuner/TunerDemux.h
index 675bb7c..9f294c6 100644
--- a/services/tuner/TunerDemux.h
+++ b/services/tuner/TunerDemux.h
@@ -27,9 +27,11 @@
using ::aidl::android::media::tv::tuner::ITunerFilter;
using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
using ::aidl::android::media::tv::tuner::ITunerFrontend;
+using ::aidl::android::media::tv::tuner::ITunerTimeFilter;
using ::android::hardware::tv::tuner::V1_0::IDemux;
using ::android::hardware::tv::tuner::V1_0::IDvr;
using ::android::hardware::tv::tuner::V1_0::IDvrCallback;
+using ::android::hardware::tv::tuner::V1_0::ITimeFilter;
using namespace std;
@@ -43,10 +45,16 @@
Status setFrontendDataSource(const shared_ptr<ITunerFrontend>& frontend) override;
Status openFilter(
int mainType, int subtype, int bufferSize, const shared_ptr<ITunerFilterCallback>& cb,
- shared_ptr<ITunerFilter>* _aidl_return);
+ shared_ptr<ITunerFilter>* _aidl_return) override;
+ Status openTimeFilter(shared_ptr<ITunerTimeFilter>* _aidl_return) override;
+ Status getAvSyncHwId(const shared_ptr<ITunerFilter>& tunerFilter, int* _aidl_return) override;
+ Status getAvSyncTime(int avSyncHwId, int64_t* _aidl_return) override;
Status openDvr(
int dvbType, int bufferSize, const shared_ptr<ITunerDvrCallback>& cb,
shared_ptr<ITunerDvr>* _aidl_return) override;
+ Status connectCiCam(int ciCamId) override;
+ Status disconnectCiCam() override;
+ Status close() override;
private:
sp<IDemux> mDemux;
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index 722d36d..af5a600 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -16,19 +16,35 @@
#define LOG_TAG "TunerFilter"
-#include <aidlcommonsupport/NativeHandle.h>
#include "TunerFilter.h"
-using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
-using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
+using ::aidl::android::media::tv::tuner::TunerFilterSectionCondition;
+
+using ::android::hardware::hidl_handle;
+using ::android::hardware::tv::tuner::V1_0::DemuxAlpLengthType;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
+using ::android::hardware::tv::tuner::V1_0::DemuxIpAddress;
+using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterType;
+using ::android::hardware::tv::tuner::V1_0::DemuxMmtpPid;
+using ::android::hardware::tv::tuner::V1_0::DemuxRecordScIndexType;
+using ::android::hardware::tv::tuner::V1_0::DemuxStreamId;
+using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterType;
using ::android::hardware::tv::tuner::V1_0::Result;
+using ::android::hardware::tv::tuner::V1_1::AudioStreamType;
+using ::android::hardware::tv::tuner::V1_1::Constant;
+using ::android::hardware::tv::tuner::V1_1::VideoStreamType;
namespace android {
-TunerFilter::TunerFilter(sp<IFilter> filter, sp<IFilterCallback> callback) {
+using namespace std;
+
+TunerFilter::TunerFilter(
+ sp<IFilter> filter, sp<IFilterCallback> callback, int mainType, int subType) {
mFilter = filter;
mFilter_1_1 = ::android::hardware::tv::tuner::V1_1::IFilter::castFrom(filter);
mFilterCallback = callback;
+ mMainType = mainType;
+ mSubType = subType;
}
TunerFilter::~TunerFilter() {
@@ -37,11 +53,27 @@
mFilterCallback = nullptr;
}
-DemuxFilterAvSettings TunerFilter::getAvSettings(const TunerFilterSettings& settings) {
- DemuxFilterAvSettings av {
- .isPassthrough = settings.get<TunerFilterSettings::av>().isPassthrough,
- };
- return av;
+Status TunerFilter::getQueueDesc(AidlMQDesc* _aidl_return) {
+ if (mFilter == NULL) {
+ ALOGE("IFilter is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ MQDesc dvrMQDesc;
+ Result res;
+ mFilter->getQueueDesc([&](Result r, const MQDesc& desc) {
+ dvrMQDesc = desc;
+ res = r;
+ });
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ AidlMQDesc aidlMQDesc;
+ unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(
+ dvrMQDesc, &aidlMQDesc);
+ *_aidl_return = move(aidlMQDesc);
+ return Status::ok();
}
Status TunerFilter::getId(int32_t* _aidl_return) {
@@ -86,33 +118,387 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- // TODO: more filter types.
- TunerFilterSettings tunerSettings;
- DemuxFilterSettings halSettings;
+ DemuxFilterSettings settings;
switch (config.getTag()) {
case TunerFilterConfiguration::ts: {
- uint16_t tpid = static_cast<uint16_t>(config.get<TunerFilterConfiguration::ts>().tpid);
- tunerSettings = config.get<TunerFilterConfiguration::ts>().filterSettings;
- DemuxTsFilterSettings ts {
- .tpid = tpid,
- };
-
- switch (tunerSettings.getTag()) {
- case TunerFilterSettings::av: {
- ts.filterSettings.av(getAvSettings(tunerSettings));
- break;
- }
- }
+ getHidlTsSettings(config, settings);
+ break;
+ }
+ case TunerFilterConfiguration::mmtp: {
+ getHidlMmtpSettings(config, settings);
+ break;
+ }
+ case TunerFilterConfiguration::ip: {
+ getHidlIpSettings(config, settings);
+ break;
+ }
+ case TunerFilterConfiguration::tlv: {
+ getHidlTlvSettings(config, settings);
+ break;
+ }
+ case TunerFilterConfiguration::alp: {
+ getHidlAlpSettings(config, settings);
break;
}
}
- Result res = mFilter->configure(halSettings);
+
+ Result res = mFilter->configure(settings);
if (res != Result::SUCCESS) {
return Status::fromServiceSpecificError(static_cast<int32_t>(res));
}
return Status::ok();
}
+Status TunerFilter::configureMonitorEvent(int monitorEventType) {
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res = mFilter_1_1->configureMonitorEvent(monitorEventType);
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
+Status TunerFilter::configureIpFilterContextId(int cid) {
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res = mFilter_1_1->configureIpCid(cid);
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
+Status TunerFilter::configureAvStreamType(int avStreamType) {
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ AvStreamType type;
+ if (!getHidlAvStreamType(avStreamType, type)) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::INVALID_STATE));
+ }
+
+ Result res = mFilter_1_1->configureAvStreamType(type);
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
+Status TunerFilter::setDataSource(const shared_ptr<ITunerFilter>& filter) {
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ ITunerFilter* tunerFilter = filter.get();
+ sp<IFilter> hidlFilter = static_cast<TunerFilter*>(tunerFilter)->getHalFilter();
+ Result res = mFilter->setDataSource(hidlFilter);
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
+void TunerFilter::getHidlTsSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
+ auto tsConf = config.get<TunerFilterConfiguration::ts>();
+ DemuxTsFilterSettings ts{
+ .tpid = static_cast<uint16_t>(tsConf.tpid),
+ };
+
+ TunerFilterSettings tunerSettings = tsConf.filterSettings;
+ switch (tunerSettings.getTag()) {
+ case TunerFilterSettings::av: {
+ ts.filterSettings.av(getAvSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::section: {
+ ts.filterSettings.section(getSectionSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::pesData: {
+ ts.filterSettings.pesData(getPesDataSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::record: {
+ ts.filterSettings.record(getRecordSettings(tunerSettings));
+ break;
+ }
+ default: {
+ ts.filterSettings.noinit();
+ break;
+ }
+ }
+ settings.ts(ts);
+}
+
+void TunerFilter::getHidlMmtpSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
+ auto mmtpConf = config.get<TunerFilterConfiguration::mmtp>();
+ DemuxMmtpFilterSettings mmtp{
+ .mmtpPid = static_cast<DemuxMmtpPid>(mmtpConf.mmtpPid),
+ };
+
+ TunerFilterSettings tunerSettings = mmtpConf.filterSettings;
+ switch (tunerSettings.getTag()) {
+ case TunerFilterSettings::av: {
+ mmtp.filterSettings.av(getAvSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::section: {
+ mmtp.filterSettings.section(getSectionSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::pesData: {
+ mmtp.filterSettings.pesData(getPesDataSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::record: {
+ mmtp.filterSettings.record(getRecordSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::download: {
+ mmtp.filterSettings.download(getDownloadSettings(tunerSettings));
+ break;
+ }
+ default: {
+ mmtp.filterSettings.noinit();
+ break;
+ }
+ }
+ settings.mmtp(mmtp);
+}
+
+void TunerFilter::getHidlIpSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
+ auto ipConf = config.get<TunerFilterConfiguration::ip>();
+ DemuxIpAddress ipAddr{
+ .srcPort = static_cast<uint16_t>(ipConf.ipAddr.srcPort),
+ .dstPort = static_cast<uint16_t>(ipConf.ipAddr.dstPort),
+ };
+ ipConf.ipAddr.srcIpAddress.isIpV6
+ ? ipAddr.srcIpAddress.v6(getIpV6Address(ipConf.ipAddr.srcIpAddress))
+ : ipAddr.srcIpAddress.v4(getIpV4Address(ipConf.ipAddr.srcIpAddress));
+ ipConf.ipAddr.dstIpAddress.isIpV6
+ ? ipAddr.dstIpAddress.v6(getIpV6Address(ipConf.ipAddr.dstIpAddress))
+ : ipAddr.dstIpAddress.v4(getIpV4Address(ipConf.ipAddr.dstIpAddress));
+ DemuxIpFilterSettings ip{
+ .ipAddr = ipAddr,
+ };
+
+ TunerFilterSettings tunerSettings = ipConf.filterSettings;
+ switch (tunerSettings.getTag()) {
+ case TunerFilterSettings::section: {
+ ip.filterSettings.section(getSectionSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::isPassthrough: {
+ ip.filterSettings.bPassthrough(tunerSettings.isPassthrough);
+ break;
+ }
+ default: {
+ ip.filterSettings.noinit();
+ break;
+ }
+ }
+ settings.ip(ip);
+}
+
+hidl_array<uint8_t, IP_V6_LENGTH> TunerFilter::getIpV6Address(TunerDemuxIpAddress addr) {
+ hidl_array<uint8_t, IP_V6_LENGTH> ip = {0};
+ if (addr.addr.size() != IP_V6_LENGTH) {
+ return ip;
+ }
+ copy(addr.addr.begin(), addr.addr.end(), ip.data());
+ return ip;
+}
+
+hidl_array<uint8_t, IP_V4_LENGTH> TunerFilter::getIpV4Address(TunerDemuxIpAddress addr) {
+ hidl_array<uint8_t, IP_V4_LENGTH> ip = {0};
+ if (addr.addr.size() != IP_V4_LENGTH) {
+ return ip;
+ }
+ copy(addr.addr.begin(), addr.addr.end(), ip.data());
+ return ip;
+}
+
+void TunerFilter::getHidlTlvSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
+ auto tlvConf = config.get<TunerFilterConfiguration::tlv>();
+ DemuxTlvFilterSettings tlv{
+ .packetType = static_cast<uint8_t>(tlvConf.packetType),
+ .isCompressedIpPacket = tlvConf.isCompressedIpPacket,
+ };
+
+ TunerFilterSettings tunerSettings = tlvConf.filterSettings;
+ switch (tunerSettings.getTag()) {
+ case TunerFilterSettings::section: {
+ tlv.filterSettings.section(getSectionSettings(tunerSettings));
+ break;
+ }
+ case TunerFilterSettings::isPassthrough: {
+ tlv.filterSettings.bPassthrough(tunerSettings.isPassthrough);
+ break;
+ }
+ default: {
+ tlv.filterSettings.noinit();
+ break;
+ }
+ }
+ settings.tlv(tlv);
+}
+
+void TunerFilter::getHidlAlpSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings) {
+ auto alpConf = config.get<TunerFilterConfiguration::alp>();
+ DemuxAlpFilterSettings alp{
+ .packetType = static_cast<uint8_t>(alpConf.packetType),
+ .lengthType = static_cast<DemuxAlpLengthType>(alpConf.lengthType),
+ };
+
+ TunerFilterSettings tunerSettings = alpConf.filterSettings;
+ switch (tunerSettings.getTag()) {
+ case TunerFilterSettings::section: {
+ alp.filterSettings.section(getSectionSettings(tunerSettings));
+ break;
+ }
+ default: {
+ alp.filterSettings.noinit();
+ break;
+ }
+ }
+ settings.alp(alp);
+}
+
+DemuxFilterAvSettings TunerFilter::getAvSettings(const TunerFilterSettings& settings) {
+ DemuxFilterAvSettings av {
+ .isPassthrough = settings.get<TunerFilterSettings::av>().isPassthrough,
+ };
+ return av;
+}
+
+DemuxFilterSectionSettings TunerFilter::getSectionSettings(const TunerFilterSettings& settings) {
+ auto s = settings.get<TunerFilterSettings::section>();
+ DemuxFilterSectionSettings section{
+ .isCheckCrc = s.isCheckCrc,
+ .isRepeat = s.isRepeat,
+ .isRaw = s.isRaw,
+ };
+
+ switch (s.condition.getTag()) {
+ case TunerFilterSectionCondition::sectionBits: {
+ auto sectionBits = s.condition.get<TunerFilterSectionCondition::sectionBits>();
+ vector<uint8_t> filter(sectionBits.filter.begin(), sectionBits.filter.end());
+ vector<uint8_t> mask(sectionBits.mask.begin(), sectionBits.mask.end());
+ vector<uint8_t> mode(sectionBits.mode.begin(), sectionBits.mode.end());
+ section.condition.sectionBits({
+ .filter = filter,
+ .mask = mask,
+ .mode = mode,
+ });
+ break;
+ }
+ case TunerFilterSectionCondition::tableInfo: {
+ auto tableInfo = s.condition.get<TunerFilterSectionCondition::tableInfo>();
+ section.condition.tableInfo({
+ .tableId = static_cast<uint16_t>(tableInfo.tableId),
+ .version = static_cast<uint16_t>(tableInfo.version),
+ });
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ return section;
+}
+
+DemuxFilterPesDataSettings TunerFilter::getPesDataSettings(const TunerFilterSettings& settings) {
+ DemuxFilterPesDataSettings pes{
+ .streamId = static_cast<DemuxStreamId>(
+ settings.get<TunerFilterSettings::pesData>().streamId),
+ .isRaw = settings.get<TunerFilterSettings::pesData>().isRaw,
+ };
+ return pes;
+}
+
+DemuxFilterRecordSettings TunerFilter::getRecordSettings(const TunerFilterSettings& settings) {
+ auto r = settings.get<TunerFilterSettings::record>();
+ DemuxFilterRecordSettings record{
+ .tsIndexMask = static_cast<uint32_t>(r.tsIndexMask),
+ .scIndexType = static_cast<DemuxRecordScIndexType>(r.scIndexType),
+ };
+
+ switch (r.scIndexMask.getTag()) {
+ case TunerFilterScIndexMask::sc: {
+ record.scIndexMask.sc(static_cast<uint32_t>(
+ r.scIndexMask.get<TunerFilterScIndexMask::sc>()));
+ break;
+ }
+ case TunerFilterScIndexMask::scHevc: {
+ record.scIndexMask.scHevc(static_cast<uint32_t>(
+ r.scIndexMask.get<TunerFilterScIndexMask::scHevc>()));
+ break;
+ }
+ }
+ return record;
+}
+
+DemuxFilterDownloadSettings TunerFilter::getDownloadSettings(const TunerFilterSettings& settings) {
+ DemuxFilterDownloadSettings download {
+ .downloadId = static_cast<uint32_t>(
+ settings.get<TunerFilterSettings::download>().downloadId),
+ };
+ return download;
+}
+
+Status TunerFilter::getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) {
+ if (mFilter_1_1 == nullptr) {
+ ALOGE("IFilter_1_1 is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res;
+ mFilter_1_1->getAvSharedHandle([&](Result r, hidl_handle avMemory, uint64_t avMemSize) {
+ res = r;
+ if (res == Result::SUCCESS) {
+ TunerFilterSharedHandleInfo info{
+ .handle = dupToAidl(hidl_handle(avMemory.getNativeHandle())),
+ .size = static_cast<int64_t>(avMemSize),
+ };
+ *_aidl_return = move(info);
+ } else {
+ _aidl_return = NULL;
+ }
+ });
+
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+Status TunerFilter::releaseAvHandle(
+ const ::aidl::android::hardware::common::NativeHandle& handle, int64_t avDataId) {
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res = mFilter->releaseAvHandle(hidl_handle(makeFromAidl(handle)), avDataId);
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
+
Status TunerFilter::start() {
if (mFilter == nullptr) {
ALOGE("IFilter is not initialized");
@@ -149,53 +535,95 @@
return Status::ok();
}
+Status TunerFilter::close() {
+ if (mFilter == nullptr) {
+ ALOGE("IFilter is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+ Result res = mFilter->close();
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+ return Status::ok();
+}
+
sp<IFilter> TunerFilter::getHalFilter() {
return mFilter;
}
-/////////////// FilterCallback ///////////////////////
-
-void TunerFilter::FilterCallback::getMediaEvent(
- std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res) {
- for (DemuxFilterEvent::Event e : events) {
- DemuxFilterMediaEvent mediaEvent = e.media();
- TunerFilterMediaEvent tunerMedia;
-
- tunerMedia.streamId = static_cast<int>(mediaEvent.streamId);
- tunerMedia.isPtsPresent = mediaEvent.isPtsPresent;
- tunerMedia.pts = static_cast<long>(mediaEvent.pts);
- tunerMedia.dataLength = static_cast<long>(mediaEvent.dataLength);
- tunerMedia.offset = static_cast<long>(mediaEvent.offset);
- tunerMedia.isSecureMemory = mediaEvent.isSecureMemory;
- tunerMedia.avDataId = static_cast<long>(mediaEvent.avDataId);
- tunerMedia.mpuSequenceNumber = static_cast<int>(mediaEvent.mpuSequenceNumber);
- tunerMedia.isPesPrivateData = mediaEvent.isPesPrivateData;
-
- if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
- tunerMedia.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
- }
-
- TunerFilterEvent tunerEvent;
- tunerEvent.set<TunerFilterEvent::media>(std::move(tunerMedia));
- res.push_back(std::move(tunerEvent));
- }
+bool TunerFilter::isAudioFilter() {
+ return (mMainType == (int)DemuxFilterMainType::TS
+ && mSubType == (int)DemuxTsFilterType::AUDIO)
+ || (mMainType == (int)DemuxFilterMainType::MMTP
+ && mSubType == (int)DemuxMmtpFilterType::AUDIO);
}
+bool TunerFilter::isVideoFilter() {
+ return (mMainType == (int)DemuxFilterMainType::TS
+ && mSubType == (int)DemuxTsFilterType::VIDEO)
+ || (mMainType == (int)DemuxFilterMainType::MMTP
+ && mSubType == (int)DemuxMmtpFilterType::VIDEO);
+}
+
+bool TunerFilter::getHidlAvStreamType(int avStreamType, AvStreamType& type) {
+ if (isAudioFilter()) {
+ type.audio(static_cast<AudioStreamType>(avStreamType));
+ return true;
+ }
+
+ if (isVideoFilter()) {
+ type.video(static_cast<VideoStreamType>(avStreamType));
+ return true;
+ }
+
+ return false;
+}
+
+/////////////// FilterCallback ///////////////////////
+
Return<void> TunerFilter::FilterCallback::onFilterStatus(DemuxFilterStatus status) {
- mTunerFilterCallback->onFilterStatus((int)status);
+ if (mTunerFilterCallback != NULL) {
+ mTunerFilterCallback->onFilterStatus((int)status);
+ }
return Void();
}
Return<void> TunerFilter::FilterCallback::onFilterEvent(const DemuxFilterEvent& filterEvent) {
- ALOGD("FilterCallback::onFilterEvent");
- std::vector<DemuxFilterEvent::Event> events = filterEvent.events;
- std::vector<TunerFilterEvent> tunerEvent;
+ vector<DemuxFilterEventExt::Event> emptyEventsExt;
+ DemuxFilterEventExt emptyFilterEventExt {
+ .events = emptyEventsExt,
+ };
+ onFilterEvent_1_1(filterEvent, emptyFilterEventExt);
+ return Void();
+}
- if (!events.empty()) {
- DemuxFilterEvent::Event event = events[0];
- switch (event.getDiscriminator()) {
- case DemuxFilterEvent::Event::hidl_discriminator::media: {
- getMediaEvent(events, tunerEvent);
+Return<void> TunerFilter::FilterCallback::onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
+ const DemuxFilterEventExt& filterEventExt) {
+ if (mTunerFilterCallback != NULL) {
+ vector<DemuxFilterEvent::Event> events = filterEvent.events;
+ vector<DemuxFilterEventExt::Event> eventsExt = filterEventExt.events;
+ vector<TunerFilterEvent> tunerEvent;
+
+ getAidlFilterEvent(events, eventsExt, tunerEvent);
+ mTunerFilterCallback->onFilterEvent(tunerEvent);
+ }
+ return Void();
+}
+
+/////////////// FilterCallback Helper Methods ///////////////////////
+
+void TunerFilter::FilterCallback::getAidlFilterEvent(vector<DemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEventExt::Event>& eventsExt,
+ vector<TunerFilterEvent>& tunerEvent) {
+ if (events.empty() && !eventsExt.empty()) {
+ auto eventExt = eventsExt[0];
+ switch (eventExt.getDiscriminator()) {
+ case DemuxFilterEventExt::Event::hidl_discriminator::monitorEvent: {
+ getMonitorEvent(eventsExt, tunerEvent);
+ break;
+ }
+ case DemuxFilterEventExt::Event::hidl_discriminator::startId: {
+ getRestartEvent(eventsExt, tunerEvent);
break;
}
default: {
@@ -203,8 +631,278 @@
}
}
}
- mTunerFilterCallback->onFilterEvent(&tunerEvent);
- return Void();
+
+ if (!events.empty()) {
+ auto event = events[0];
+ switch (event.getDiscriminator()) {
+ case DemuxFilterEvent::Event::hidl_discriminator::media: {
+ getMediaEvent(events, tunerEvent);
+ break;
+ }
+ case DemuxFilterEvent::Event::hidl_discriminator::section: {
+ getSectionEvent(events, tunerEvent);
+ break;
+ }
+ case DemuxFilterEvent::Event::hidl_discriminator::pes: {
+ getPesEvent(events, tunerEvent);
+ break;
+ }
+ case DemuxFilterEvent::Event::hidl_discriminator::tsRecord: {
+ getTsRecordEvent(events, eventsExt, tunerEvent);
+ break;
+ }
+ case DemuxFilterEvent::Event::hidl_discriminator::mmtpRecord: {
+ getMmtpRecordEvent(events, eventsExt, tunerEvent);
+ break;
+ }
+ case DemuxFilterEvent::Event::hidl_discriminator::download: {
+ getDownloadEvent(events, tunerEvent);
+ break;
+ }
+ case DemuxFilterEvent::Event::hidl_discriminator::ipPayload: {
+ getIpPayloadEvent(events, tunerEvent);
+ break;
+ }
+ case DemuxFilterEvent::Event::hidl_discriminator::temi: {
+ getTemiEvent(events, tunerEvent);
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
}
+void TunerFilter::FilterCallback::getMediaEvent(
+ vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
+ for (DemuxFilterEvent::Event e : events) {
+ DemuxFilterMediaEvent mediaEvent = e.media();
+ TunerFilterMediaEvent tunerMedia;
+
+ tunerMedia.streamId = static_cast<int>(mediaEvent.streamId);
+ tunerMedia.isPtsPresent = mediaEvent.isPtsPresent;
+ tunerMedia.pts = static_cast<long>(mediaEvent.pts);
+ tunerMedia.dataLength = static_cast<int>(mediaEvent.dataLength);
+ tunerMedia.offset = static_cast<int>(mediaEvent.offset);
+ tunerMedia.isSecureMemory = mediaEvent.isSecureMemory;
+ tunerMedia.avDataId = static_cast<long>(mediaEvent.avDataId);
+ tunerMedia.mpuSequenceNumber = static_cast<int>(mediaEvent.mpuSequenceNumber);
+ tunerMedia.isPesPrivateData = mediaEvent.isPesPrivateData;
+
+ if (mediaEvent.extraMetaData.getDiscriminator() ==
+ DemuxFilterMediaEvent::ExtraMetaData::hidl_discriminator::audio) {
+ tunerMedia.isAudioExtraMetaData = true;
+ tunerMedia.audio = {
+ .adFade = static_cast<int8_t>(
+ mediaEvent.extraMetaData.audio().adFade),
+ .adPan = static_cast<int8_t>(
+ mediaEvent.extraMetaData.audio().adPan),
+ .versionTextTag = static_cast<int8_t>(
+ mediaEvent.extraMetaData.audio().versionTextTag),
+ .adGainCenter = static_cast<int8_t>(
+ mediaEvent.extraMetaData.audio().adGainCenter),
+ .adGainFront = static_cast<int8_t>(
+ mediaEvent.extraMetaData.audio().adGainFront),
+ .adGainSurround = static_cast<int8_t>(
+ mediaEvent.extraMetaData.audio().adGainSurround),
+ };
+ } else {
+ tunerMedia.isAudioExtraMetaData = false;
+ }
+
+ if (mediaEvent.avMemory.getNativeHandle() != nullptr) {
+ tunerMedia.avMemory = dupToAidl(mediaEvent.avMemory.getNativeHandle());
+ }
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::media>(move(tunerMedia));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getSectionEvent(
+ vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
+ for (DemuxFilterEvent::Event e : events) {
+ DemuxFilterSectionEvent sectionEvent = e.section();
+ TunerFilterSectionEvent tunerSection;
+
+ tunerSection.tableId = static_cast<char>(sectionEvent.tableId);
+ tunerSection.version = static_cast<char>(sectionEvent.version);
+ tunerSection.sectionNum = static_cast<char>(sectionEvent.sectionNum);
+ tunerSection.dataLength = static_cast<char>(sectionEvent.dataLength);
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::section>(move(tunerSection));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getPesEvent(
+ vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
+ for (DemuxFilterEvent::Event e : events) {
+ DemuxFilterPesEvent pesEvent = e.pes();
+ TunerFilterPesEvent tunerPes;
+
+ tunerPes.streamId = static_cast<char>(pesEvent.streamId);
+ tunerPes.dataLength = static_cast<int>(pesEvent.dataLength);
+ tunerPes.mpuSequenceNumber = static_cast<int>(pesEvent.mpuSequenceNumber);
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::pes>(move(tunerPes));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getTsRecordEvent(vector<DemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ TunerFilterTsRecordEvent tunerTsRecord;
+ DemuxFilterTsRecordEvent tsRecordEvent = events[i].tsRecord();
+
+ TunerFilterScIndexMask scIndexMask;
+ if (tsRecordEvent.scIndexMask.getDiscriminator()
+ == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::sc) {
+ scIndexMask.set<TunerFilterScIndexMask::sc>(
+ static_cast<int>(tsRecordEvent.scIndexMask.sc()));
+ } else if (tsRecordEvent.scIndexMask.getDiscriminator()
+ == DemuxFilterTsRecordEvent::ScIndexMask::hidl_discriminator::scHevc) {
+ scIndexMask.set<TunerFilterScIndexMask::scHevc>(
+ static_cast<int>(tsRecordEvent.scIndexMask.scHevc()));
+ }
+
+ if (tsRecordEvent.pid.getDiscriminator() == DemuxPid::hidl_discriminator::tPid) {
+ tunerTsRecord.pid = static_cast<char>(tsRecordEvent.pid.tPid());
+ } else {
+ tunerTsRecord.pid = static_cast<char>(Constant::INVALID_TS_PID);
+ }
+
+ tunerTsRecord.scIndexMask = scIndexMask;
+ tunerTsRecord.tsIndexMask = static_cast<int>(tsRecordEvent.tsIndexMask);
+ tunerTsRecord.byteNumber = static_cast<long>(tsRecordEvent.byteNumber);
+
+ if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
+ DemuxFilterEventExt::Event::hidl_discriminator::tsRecord) {
+ tunerTsRecord.isExtended = true;
+ tunerTsRecord.pts = static_cast<long>(eventsExt[i].tsRecord().pts);
+ tunerTsRecord.firstMbInSlice = static_cast<int>(eventsExt[i].tsRecord().firstMbInSlice);
+ } else {
+ tunerTsRecord.isExtended = false;
+ }
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::tsRecord>(move(tunerTsRecord));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getMmtpRecordEvent(vector<DemuxFilterEvent::Event>& events,
+ vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
+ for (int i = 0; i < events.size(); i++) {
+ TunerFilterMmtpRecordEvent tunerMmtpRecord;
+ DemuxFilterMmtpRecordEvent mmtpRecordEvent = events[i].mmtpRecord();
+
+ tunerMmtpRecord.scHevcIndexMask = static_cast<int>(mmtpRecordEvent.scHevcIndexMask);
+ tunerMmtpRecord.byteNumber = static_cast<long>(mmtpRecordEvent.byteNumber);
+
+ if (eventsExt.size() > i && eventsExt[i].getDiscriminator() ==
+ DemuxFilterEventExt::Event::hidl_discriminator::mmtpRecord) {
+ tunerMmtpRecord.isExtended = true;
+ tunerMmtpRecord.pts = static_cast<long>(eventsExt[i].mmtpRecord().pts);
+ tunerMmtpRecord.mpuSequenceNumber =
+ static_cast<int>(eventsExt[i].mmtpRecord().mpuSequenceNumber);
+ tunerMmtpRecord.firstMbInSlice =
+ static_cast<int>(eventsExt[i].mmtpRecord().firstMbInSlice);
+ tunerMmtpRecord.tsIndexMask = static_cast<int>(eventsExt[i].mmtpRecord().tsIndexMask);
+ } else {
+ tunerMmtpRecord.isExtended = false;
+ }
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::mmtpRecord>(move(tunerMmtpRecord));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getDownloadEvent(
+ vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
+ for (DemuxFilterEvent::Event e : events) {
+ DemuxFilterDownloadEvent downloadEvent = e.download();
+ TunerFilterDownloadEvent tunerDownload;
+
+ tunerDownload.itemId = static_cast<int>(downloadEvent.itemId);
+ tunerDownload.itemFragmentIndex = static_cast<int>(downloadEvent.itemFragmentIndex);
+ tunerDownload.mpuSequenceNumber = static_cast<int>(downloadEvent.mpuSequenceNumber);
+ tunerDownload.lastItemFragmentIndex = static_cast<int>(downloadEvent.lastItemFragmentIndex);
+ tunerDownload.dataLength = static_cast<char>(downloadEvent.dataLength);
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::download>(move(tunerDownload));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getIpPayloadEvent(
+ vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
+ for (DemuxFilterEvent::Event e : events) {
+ DemuxFilterIpPayloadEvent ipPayloadEvent = e.ipPayload();
+ TunerFilterIpPayloadEvent tunerIpPayload;
+
+ tunerIpPayload.dataLength = static_cast<char>(ipPayloadEvent.dataLength);
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::ipPayload>(move(tunerIpPayload));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getTemiEvent(
+ vector<DemuxFilterEvent::Event>& events, vector<TunerFilterEvent>& res) {
+ for (DemuxFilterEvent::Event e : events) {
+ DemuxFilterTemiEvent temiEvent = e.temi();
+ TunerFilterTemiEvent tunerTemi;
+
+ tunerTemi.pts = static_cast<long>(temiEvent.pts);
+ tunerTemi.descrTag = static_cast<int8_t>(temiEvent.descrTag);
+ vector<uint8_t> descrData = temiEvent.descrData;
+ tunerTemi.descrData.resize(descrData.size());
+ copy(descrData.begin(), descrData.end(), tunerTemi.descrData.begin());
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::temi>(move(tunerTemi));
+ res.push_back(move(tunerEvent));
+ }
+}
+
+void TunerFilter::FilterCallback::getMonitorEvent(
+ vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
+ DemuxFilterMonitorEvent monitorEvent = eventsExt[0].monitorEvent();
+ TunerFilterMonitorEvent tunerMonitor;
+
+ switch (monitorEvent.getDiscriminator()) {
+ case DemuxFilterMonitorEvent::hidl_discriminator::scramblingStatus: {
+ tunerMonitor.set<TunerFilterMonitorEvent::scramblingStatus>(
+ static_cast<int>(monitorEvent.scramblingStatus()));
+ break;
+ }
+ case DemuxFilterMonitorEvent::hidl_discriminator::cid: {
+ tunerMonitor.set<TunerFilterMonitorEvent::cid>(static_cast<int>(monitorEvent.cid()));
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::monitor>(move(tunerMonitor));
+ res.push_back(move(tunerEvent));
+}
+
+void TunerFilter::FilterCallback::getRestartEvent(
+ vector<DemuxFilterEventExt::Event>& eventsExt, vector<TunerFilterEvent>& res) {
+ TunerFilterEvent tunerEvent;
+ tunerEvent.set<TunerFilterEvent::startId>(static_cast<int>(eventsExt[0].startId()));
+ res.push_back(move(tunerEvent));
+}
} // namespace android
diff --git a/services/tuner/TunerFilter.h b/services/tuner/TunerFilter.h
index 7f5838c..d12b7ac 100644
--- a/services/tuner/TunerFilter.h
+++ b/services/tuner/TunerFilter.h
@@ -19,40 +19,96 @@
#include <aidl/android/media/tv/tuner/BnTunerFilter.h>
#include <aidl/android/media/tv/tuner/ITunerFilterCallback.h>
-#include <android/hardware/tv/tuner/1.1/IFilter.h>
+#include <aidlcommonsupport/NativeHandle.h>
#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <android/hardware/tv/tuner/1.1/IFilter.h>
+#include <android/hardware/tv/tuner/1.1/IFilterCallback.h>
+#include <android/hardware/tv/tuner/1.1/types.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <fmq/ConvertMQDescriptors.h>
+#include <fmq/MessageQueue.h>
using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::hardware::common::fmq::MQDescriptor;
+using ::aidl::android::hardware::common::fmq::SynchronizedReadWrite;
using ::aidl::android::media::tv::tuner::BnTunerFilter;
using ::aidl::android::media::tv::tuner::ITunerFilterCallback;
+using ::aidl::android::media::tv::tuner::TunerDemuxIpAddress;
using ::aidl::android::media::tv::tuner::TunerFilterConfiguration;
+using ::aidl::android::media::tv::tuner::TunerFilterDownloadEvent;
+using ::aidl::android::media::tv::tuner::TunerFilterIpPayloadEvent;
using ::aidl::android::media::tv::tuner::TunerFilterEvent;
using ::aidl::android::media::tv::tuner::TunerFilterMediaEvent;
+using ::aidl::android::media::tv::tuner::TunerFilterMmtpRecordEvent;
+using ::aidl::android::media::tv::tuner::TunerFilterMonitorEvent;
+using ::aidl::android::media::tv::tuner::TunerFilterPesEvent;
+using ::aidl::android::media::tv::tuner::TunerFilterScIndexMask;
+using ::aidl::android::media::tv::tuner::TunerFilterSectionEvent;
+using ::aidl::android::media::tv::tuner::TunerFilterSharedHandleInfo;
using ::aidl::android::media::tv::tuner::TunerFilterSettings;
+using ::aidl::android::media::tv::tuner::TunerFilterTemiEvent;
+using ::aidl::android::media::tv::tuner::TunerFilterTsRecordEvent;
+using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
+using ::android::hardware::hidl_array;
+using ::android::hardware::tv::tuner::V1_0::DemuxAlpFilterSettings;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadEvent;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterDownloadSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterIpPayloadEvent;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterMediaEvent;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterMmtpRecordEvent;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterRecordSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterTemiEvent;
+using ::android::hardware::tv::tuner::V1_0::DemuxFilterTsRecordEvent;
+using ::android::hardware::tv::tuner::V1_0::DemuxIpFilterSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxMmtpFilterSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxTlvFilterSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxTsFilterSettings;
+using ::android::hardware::tv::tuner::V1_0::DemuxPid;
using ::android::hardware::tv::tuner::V1_0::IFilter;
-using ::android::hardware::tv::tuner::V1_0::IFilterCallback;
-
+using ::android::hardware::tv::tuner::V1_1::AvStreamType;
+using ::android::hardware::tv::tuner::V1_1::DemuxFilterEventExt;
+using ::android::hardware::tv::tuner::V1_1::DemuxFilterMonitorEvent;
+using ::android::hardware::tv::tuner::V1_1::DemuxFilterTsRecordEventExt;
+using ::android::hardware::tv::tuner::V1_1::IFilterCallback;
namespace android {
+using MQDesc = MQDescriptorSync<uint8_t>;
+using AidlMQDesc = MQDescriptor<int8_t, SynchronizedReadWrite>;
+
+const static int IP_V4_LENGTH = 4;
+const static int IP_V6_LENGTH = 16;
+
class TunerFilter : public BnTunerFilter {
public:
- TunerFilter(sp<IFilter> filter, sp<IFilterCallback> callback);
+ TunerFilter(sp<IFilter> filter, sp<IFilterCallback> callback, int mainType, int subTyp);
virtual ~TunerFilter();
Status getId(int32_t* _aidl_return) override;
Status getId64Bit(int64_t* _aidl_return) override;
+ Status getQueueDesc(AidlMQDesc* _aidl_return) override;
Status configure(const TunerFilterConfiguration& config) override;
+ Status configureMonitorEvent(int monitorEventType) override;
+ Status configureIpFilterContextId(int cid) override;
+ Status configureAvStreamType(int avStreamType) override;
+ Status getAvSharedHandleInfo(TunerFilterSharedHandleInfo* _aidl_return) override;
+ Status releaseAvHandle(const ::aidl::android::hardware::common::NativeHandle& handle,
+ int64_t avDataId) override;
+ Status setDataSource(const std::shared_ptr<ITunerFilter>& filter) override;
Status start() override;
Status stop() override;
Status flush() override;
+ Status close() override;
sp<IFilter> getHalFilter();
struct FilterCallback : public IFilterCallback {
@@ -60,20 +116,76 @@
: mTunerFilterCallback(tunerFilterCallback) {};
virtual Return<void> onFilterEvent(const DemuxFilterEvent& filterEvent);
+ virtual Return<void> onFilterEvent_1_1(const DemuxFilterEvent& filterEvent,
+ const DemuxFilterEventExt& filterEventExt);
virtual Return<void> onFilterStatus(DemuxFilterStatus status);
+
+ void getAidlFilterEvent(std::vector<DemuxFilterEvent::Event>& events,
+ std::vector<DemuxFilterEventExt::Event>& eventsExt,
+ std::vector<TunerFilterEvent>& tunerEvent);
+
void getMediaEvent(
std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
+ void getSectionEvent(
+ std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
+ void getPesEvent(
+ std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
+ void getTsRecordEvent(
+ std::vector<DemuxFilterEvent::Event>& events,
+ std::vector<DemuxFilterEventExt::Event>& eventsExt,
+ std::vector<TunerFilterEvent>& res);
+ void getMmtpRecordEvent(
+ std::vector<DemuxFilterEvent::Event>& events,
+ std::vector<DemuxFilterEventExt::Event>& eventsExt,
+ std::vector<TunerFilterEvent>& res);
+ void getDownloadEvent(
+ std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
+ void getIpPayloadEvent(
+ std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
+ void getTemiEvent(
+ std::vector<DemuxFilterEvent::Event>& events, std::vector<TunerFilterEvent>& res);
+ void getMonitorEvent(
+ std::vector<DemuxFilterEventExt::Event>& eventsExt,
+ std::vector<TunerFilterEvent>& res);
+ void getRestartEvent(
+ std::vector<DemuxFilterEventExt::Event>& eventsExt,
+ std::vector<TunerFilterEvent>& res);
std::shared_ptr<ITunerFilterCallback> mTunerFilterCallback;
};
private:
DemuxFilterAvSettings getAvSettings(const TunerFilterSettings& settings);
+ DemuxFilterSectionSettings getSectionSettings(const TunerFilterSettings& settings);
+ DemuxFilterPesDataSettings getPesDataSettings(const TunerFilterSettings& settings);
+ DemuxFilterRecordSettings getRecordSettings(const TunerFilterSettings& settings);
+ DemuxFilterDownloadSettings getDownloadSettings(const TunerFilterSettings& settings);
+
+ bool isAudioFilter();
+ bool isVideoFilter();
+ bool getHidlAvStreamType(int avStreamType, AvStreamType& type);
+
+ void getHidlTsSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
+ void getHidlMmtpSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
+ void getHidlIpSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
+ void getHidlTlvSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
+ void getHidlAlpSettings(
+ const TunerFilterConfiguration& config, DemuxFilterSettings& settings);
+
+ hidl_array<uint8_t, IP_V4_LENGTH> getIpV4Address(TunerDemuxIpAddress addr);
+ hidl_array<uint8_t, IP_V6_LENGTH> getIpV6Address(TunerDemuxIpAddress addr);
+
sp<IFilter> mFilter;
sp<::android::hardware::tv::tuner::V1_1::IFilter> mFilter_1_1;
sp<IFilterCallback> mFilterCallback;
int32_t mId;
int64_t mId64Bit;
+ int mMainType;
+ int mSubType;
};
} // namespace android
diff --git a/services/tuner/TunerFrontend.cpp b/services/tuner/TunerFrontend.cpp
index e92489d..a919f98 100644
--- a/services/tuner/TunerFrontend.cpp
+++ b/services/tuner/TunerFrontend.cpp
@@ -20,6 +20,8 @@
using ::aidl::android::media::tv::tuner::TunerFrontendAtsc3PlpSettings;
using ::aidl::android::media::tv::tuner::TunerFrontendScanAtsc3PlpInfo;
+using ::aidl::android::media::tv::tuner::TunerFrontendStatusAtsc3PlpInfo;
+using ::aidl::android::media::tv::tuner::TunerFrontendUnionSettings;
using ::android::hardware::tv::tuner::V1_0::FrontendAnalogSifStandard;
using ::android::hardware::tv::tuner::V1_0::FrontendAnalogType;
using ::android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
@@ -64,11 +66,29 @@
using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtMode;
using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtModulation;
using ::android::hardware::tv::tuner::V1_0::FrontendIsdbtSettings;
+using ::android::hardware::tv::tuner::V1_0::FrontendModulationStatus;
using ::android::hardware::tv::tuner::V1_0::FrontendScanAtsc3PlpInfo;
using ::android::hardware::tv::tuner::V1_0::FrontendScanType;
-using ::android::hardware::tv::tuner::V1_0::FrontendSettings;;
+using ::android::hardware::tv::tuner::V1_0::FrontendStatusType;
using ::android::hardware::tv::tuner::V1_0::Result;
+using ::android::hardware::tv::tuner::V1_1::FrontendAnalogAftFlag;
+using ::android::hardware::tv::tuner::V1_1::FrontendBandwidth;
+using ::android::hardware::tv::tuner::V1_1::FrontendCableTimeInterleaveMode;
+using ::android::hardware::tv::tuner::V1_1::FrontendDvbcBandwidth;
+using ::android::hardware::tv::tuner::V1_1::FrontendDtmbBandwidth;
+using ::android::hardware::tv::tuner::V1_1::FrontendDtmbCodeRate;
+using ::android::hardware::tv::tuner::V1_1::FrontendDtmbGuardInterval;
+using ::android::hardware::tv::tuner::V1_1::FrontendDtmbModulation;
+using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTimeInterleaveMode;
+using ::android::hardware::tv::tuner::V1_1::FrontendDtmbTransmissionMode;
+using ::android::hardware::tv::tuner::V1_1::FrontendDvbsScanType;
+using ::android::hardware::tv::tuner::V1_1::FrontendGuardInterval;
+using ::android::hardware::tv::tuner::V1_1::FrontendInterleaveMode;
using ::android::hardware::tv::tuner::V1_1::FrontendModulation;
+using ::android::hardware::tv::tuner::V1_1::FrontendRollOff;
+using ::android::hardware::tv::tuner::V1_1::FrontendTransmissionMode;
+using ::android::hardware::tv::tuner::V1_1::FrontendSpectralInversion;
+using ::android::hardware::tv::tuner::V1_1::FrontendStatusTypeExt1_1;
namespace android {
@@ -80,6 +100,7 @@
TunerFrontend::~TunerFrontend() {
mFrontend = NULL;
+ mFrontend_1_1 = NULL;
mId = -1;
}
@@ -103,12 +124,44 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
-Status TunerFrontend::tune(const TunerFrontendSettings& /*settings*/) {
- return Status::ok();
+Status TunerFrontend::tune(const TunerFrontendSettings& settings) {
+ if (mFrontend == NULL) {
+ ALOGE("IFrontend is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result status;
+ FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
+ if (settings.isExtended) {
+ if (mFrontend_1_1 == NULL) {
+ ALOGE("IFrontend_1_1 is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+ FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
+ status = mFrontend_1_1->tune_1_1(frontendSettings, frontendSettingsExt);
+ } else {
+ status = mFrontend->tune(frontendSettings);
+ }
+
+ if (status == Result::SUCCESS) {
+ return Status::ok();
+ }
+
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
Status TunerFrontend::stopTune() {
- return Status::ok();
+ if (mFrontend == NULL) {
+ ALOGD("IFrontend is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result status = mFrontend->stopTune();
+ if (status == Result::SUCCESS) {
+ return Status::ok();
+ }
+
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
Status TunerFrontend::scan(const TunerFrontendSettings& settings, int frontendScanType) {
@@ -117,167 +170,21 @@
return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
}
- // TODO: extend TunerFrontendSettings to use 1.1 types
- FrontendSettings frontendSettings;
- switch (settings.getTag()) {
- case TunerFrontendSettings::analog:
- frontendSettings.analog({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::analog>().frequency),
- .type = static_cast<FrontendAnalogType>(
- settings.get<TunerFrontendSettings::analog>().signalType),
- .sifStandard = static_cast<FrontendAnalogSifStandard>(
- settings.get<TunerFrontendSettings::analog>().sifStandard),
- });
- break;
- case TunerFrontendSettings::atsc:
- frontendSettings.atsc({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::atsc>().frequency),
- .modulation = static_cast<FrontendAtscModulation>(
- settings.get<TunerFrontendSettings::atsc>().modulation),
- });
- break;
- case TunerFrontendSettings::atsc3:
- frontendSettings.atsc3({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::atsc3>().frequency),
- .bandwidth = static_cast<FrontendAtsc3Bandwidth>(
- settings.get<TunerFrontendSettings::atsc3>().bandwidth),
- .demodOutputFormat = static_cast<FrontendAtsc3DemodOutputFormat>(
- settings.get<TunerFrontendSettings::atsc3>().demodOutputFormat),
- .plpSettings = getAtsc3PlpSettings(settings.get<TunerFrontendSettings::atsc3>()),
- });
- break;
- case TunerFrontendSettings::cable:
- frontendSettings.dvbc({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::cable>().frequency),
- .modulation = static_cast<FrontendDvbcModulation>(
- settings.get<TunerFrontendSettings::cable>().modulation),
- .fec = static_cast<FrontendInnerFec>(
- settings.get<TunerFrontendSettings::cable>().innerFec),
- .symbolRate = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::cable>().symbolRate),
- .outerFec = static_cast<FrontendDvbcOuterFec>(
- settings.get<TunerFrontendSettings::cable>().outerFec),
- .annex = static_cast<FrontendDvbcAnnex>(
- settings.get<TunerFrontendSettings::cable>().annex),
- .spectralInversion = static_cast<FrontendDvbcSpectralInversion>(
- settings.get<TunerFrontendSettings::cable>().spectralInversion),
- });
- break;
- case TunerFrontendSettings::dvbs:
- frontendSettings.dvbs({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::dvbs>().frequency),
- .modulation = static_cast<FrontendDvbsModulation>(
- settings.get<TunerFrontendSettings::dvbs>().modulation),
- .coderate = getDvbsCodeRate(
- settings.get<TunerFrontendSettings::dvbs>().codeRate),
- .symbolRate = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::dvbs>().symbolRate),
- .rolloff = static_cast<FrontendDvbsRolloff>(
- settings.get<TunerFrontendSettings::dvbs>().rolloff),
- .pilot = static_cast<FrontendDvbsPilot>(
- settings.get<TunerFrontendSettings::dvbs>().pilot),
- .inputStreamId = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::dvbs>().inputStreamId),
- .standard = static_cast<FrontendDvbsStandard>(
- settings.get<TunerFrontendSettings::dvbs>().standard),
- .vcmMode = static_cast<FrontendDvbsVcmMode>(
- settings.get<TunerFrontendSettings::dvbs>().vcm),
- });
- break;
- case TunerFrontendSettings::dvbt:
- frontendSettings.dvbt({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::dvbt>().frequency),
- .transmissionMode = static_cast<FrontendDvbtTransmissionMode>(
- settings.get<TunerFrontendSettings::dvbt>().transmissionMode),
- .bandwidth = static_cast<FrontendDvbtBandwidth>(
- settings.get<TunerFrontendSettings::dvbt>().bandwidth),
- .constellation = static_cast<FrontendDvbtConstellation>(
- settings.get<TunerFrontendSettings::dvbt>().constellation),
- .hierarchy = static_cast<FrontendDvbtHierarchy>(
- settings.get<TunerFrontendSettings::dvbt>().hierarchy),
- .hpCoderate = static_cast<FrontendDvbtCoderate>(
- settings.get<TunerFrontendSettings::dvbt>().hpCodeRate),
- .lpCoderate = static_cast<FrontendDvbtCoderate>(
- settings.get<TunerFrontendSettings::dvbt>().lpCodeRate),
- .guardInterval = static_cast<FrontendDvbtGuardInterval>(
- settings.get<TunerFrontendSettings::dvbt>().guardInterval),
- .isHighPriority = settings.get<TunerFrontendSettings::dvbt>().isHighPriority,
- .standard = static_cast<FrontendDvbtStandard>(
- settings.get<TunerFrontendSettings::dvbt>().standard),
- .isMiso = settings.get<TunerFrontendSettings::dvbt>().isMiso,
- .plpMode = static_cast<FrontendDvbtPlpMode>(
- settings.get<TunerFrontendSettings::dvbt>().plpMode),
- .plpId = static_cast<uint8_t>(
- settings.get<TunerFrontendSettings::dvbt>().plpId),
- .plpGroupId = static_cast<uint8_t>(
- settings.get<TunerFrontendSettings::dvbt>().plpGroupId),
- });
- break;
- case TunerFrontendSettings::isdbs:
- frontendSettings.isdbs({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::isdbs>().frequency),
- .streamId = static_cast<uint16_t>(
- settings.get<TunerFrontendSettings::isdbs>().streamId),
- .streamIdType = static_cast<FrontendIsdbsStreamIdType>(
- settings.get<TunerFrontendSettings::isdbs>().streamIdType),
- .modulation = static_cast<FrontendIsdbsModulation>(
- settings.get<TunerFrontendSettings::isdbs>().modulation),
- .coderate = static_cast<FrontendIsdbsCoderate>(
- settings.get<TunerFrontendSettings::isdbs>().codeRate),
- .symbolRate = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::isdbs>().symbolRate),
- .rolloff = static_cast<FrontendIsdbsRolloff>(
- settings.get<TunerFrontendSettings::isdbs>().rolloff),
- });
- break;
- case TunerFrontendSettings::isdbs3:
- frontendSettings.isdbs3({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::isdbs3>().frequency),
- .streamId = static_cast<uint16_t>(
- settings.get<TunerFrontendSettings::isdbs3>().streamId),
- .streamIdType = static_cast<FrontendIsdbsStreamIdType>(
- settings.get<TunerFrontendSettings::isdbs3>().streamIdType),
- .modulation = static_cast<FrontendIsdbs3Modulation>(
- settings.get<TunerFrontendSettings::isdbs3>().modulation),
- .coderate = static_cast<FrontendIsdbs3Coderate>(
- settings.get<TunerFrontendSettings::isdbs3>().codeRate),
- .symbolRate = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::isdbs3>().symbolRate),
- .rolloff = static_cast<FrontendIsdbs3Rolloff>(
- settings.get<TunerFrontendSettings::isdbs3>().rolloff),
- });
- break;
- case TunerFrontendSettings::isdbt:
- frontendSettings.isdbt({
- .frequency = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::isdbt>().frequency),
- .modulation = static_cast<FrontendIsdbtModulation>(
- settings.get<TunerFrontendSettings::isdbt>().modulation),
- .bandwidth = static_cast<FrontendIsdbtBandwidth>(
- settings.get<TunerFrontendSettings::isdbt>().bandwidth),
- .mode = static_cast<FrontendIsdbtMode>(
- settings.get<TunerFrontendSettings::isdbt>().mode),
- .coderate = static_cast<FrontendIsdbtCoderate>(
- settings.get<TunerFrontendSettings::isdbt>().codeRate),
- .guardInterval = static_cast<FrontendIsdbtGuardInterval>(
- settings.get<TunerFrontendSettings::isdbt>().guardInterval),
- .serviceAreaId = static_cast<uint32_t>(
- settings.get<TunerFrontendSettings::isdbt>().serviceAreaId),
- });
- break;
- default:
- break;
+ Result status;
+ FrontendSettings frontendSettings = getHidlFrontendSettings(settings);
+ if (settings.isExtended) {
+ if (mFrontend_1_1 == NULL) {
+ ALOGE("IFrontend_1_1 is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+ FrontendSettingsExt1_1 frontendSettingsExt = getHidlFrontendSettingsExt(settings);
+ status = mFrontend_1_1->scan_1_1(frontendSettings,
+ static_cast<FrontendScanType>(frontendScanType), frontendSettingsExt);
+ } else {
+ status = mFrontend->scan(
+ frontendSettings, static_cast<FrontendScanType>(frontendScanType));
}
- Result status = mFrontend->scan(
- frontendSettings, static_cast<FrontendScanType>(frontendScanType));
+
if (status == Result::SUCCESS) {
return Status::ok();
}
@@ -286,7 +193,17 @@
}
Status TunerFrontend::stopScan() {
- return Status::ok();
+ if (mFrontend == NULL) {
+ ALOGD("IFrontend is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result status = mFrontend->stopScan();
+ if (status == Result::SUCCESS) {
+ return Status::ok();
+ }
+
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
}
Status TunerFrontend::setLnb(int /*lnbHandle*/) {
@@ -298,11 +215,68 @@
}
Status TunerFrontend::close() {
+ if (mFrontend == NULL) {
+ ALOGD("IFrontend is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result status = mFrontend->close();
+ if (status == Result::SUCCESS) {
+ return Status::ok();
+ }
+
+ return Status::fromServiceSpecificError(static_cast<int32_t>(status));
+}
+
+Status TunerFrontend::getStatus(const vector<int32_t>& statusTypes,
+ vector<TunerFrontendStatus>* _aidl_return) {
+ if (mFrontend == NULL) {
+ ALOGD("IFrontend is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res;
+ vector<FrontendStatus> status;
+ vector<FrontendStatusType> types;
+ for (auto s : statusTypes) {
+ types.push_back(static_cast<FrontendStatusType>(s));
+ }
+
+ mFrontend->getStatus(types, [&](Result r, const hidl_vec<FrontendStatus>& s) {
+ res = r;
+ status = s;
+ });
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ getAidlFrontendStatus(status, *_aidl_return);
return Status::ok();
}
-Status TunerFrontend::getStatus(const vector<int32_t>& /*statusTypes*/,
- vector<TunerFrontendStatus>* /*_aidl_return*/) {
+Status TunerFrontend::getStatusExtended_1_1(const vector<int32_t>& statusTypes,
+ vector<TunerFrontendStatus>* _aidl_return) {
+ if (mFrontend_1_1 == NULL) {
+ ALOGD("IFrontend_1_1 is not initialized");
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ Result res;
+ vector<FrontendStatusExt1_1> status;
+ vector<FrontendStatusTypeExt1_1> types;
+ for (auto s : statusTypes) {
+ types.push_back(static_cast<FrontendStatusTypeExt1_1>(s));
+ }
+
+ mFrontend_1_1->getStatusExt1_1(types, [&](Result r, const hidl_vec<FrontendStatusExt1_1>& s) {
+ res = r;
+ status = s;
+ });
+ if (res != Result::SUCCESS) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+ }
+
+ getAidlFrontendStatusExt(status, *_aidl_return);
return Status::ok();
}
@@ -394,7 +368,7 @@
vector<TunerFrontendScanAtsc3PlpInfo> tunerPlpInfos;
for (int i = 0; i < plpInfos.size(); i++) {
auto info = plpInfos[i];
- int plpId = (int) info.plpId;
+ int8_t plpId = (int8_t) info.plpId;
bool lls = (bool) info.bLlsFlag;
TunerFrontendScanAtsc3PlpInfo plpInfo{
.plpId = plpId,
@@ -420,24 +394,34 @@
case FrontendScanMessageTypeExt1_1::MODULATION: {
FrontendModulation m = message.modulation();
int modulation;
- if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::dvbc) {
- modulation = (int) m.dvbc();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::dvbt) {
- modulation = (int) m.dvbt();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::dvbs) {
- modulation = (int) m.dvbs();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::isdbs) {
- modulation = (int) m.isdbs();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::isdbs3) {
- modulation = (int) m.isdbs3();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::isdbt) {
- modulation = (int) m.isdbt();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::atsc) {
- modulation = (int) m.atsc();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::atsc3) {
- modulation = (int) m.atsc3();
- } else if (m.getDiscriminator() == FrontendModulation::hidl_discriminator::dtmb) {
- modulation = (int) m.dtmb();
+ switch (m.getDiscriminator()) {
+ case FrontendModulation::hidl_discriminator::dvbc:
+ modulation = (int) m.dvbc();
+ break;
+ case FrontendModulation::hidl_discriminator::dvbt:
+ modulation = (int) m.dvbt();
+ break;
+ case FrontendModulation::hidl_discriminator::dvbs:
+ modulation = (int) m.dvbs();
+ break;
+ case FrontendModulation::hidl_discriminator::isdbs:
+ modulation = (int) m.isdbs();
+ break;
+ case FrontendModulation::hidl_discriminator::isdbs3:
+ modulation = (int) m.isdbs3();
+ break;
+ case FrontendModulation::hidl_discriminator::isdbt:
+ modulation = (int) m.isdbt();
+ break;
+ case FrontendModulation::hidl_discriminator::atsc:
+ modulation = (int) m.atsc();
+ break;
+ case FrontendModulation::hidl_discriminator::atsc3:
+ modulation = (int) m.atsc3();
+ break;
+ case FrontendModulation::hidl_discriminator::dtmb:
+ modulation = (int) m.dtmb();
+ break;
}
scanMessage.set<TunerFrontendScanMessage::modulation>(modulation);
break;
@@ -459,6 +443,341 @@
/////////////// TunerFrontend Helper Methods ///////////////////////
+void TunerFrontend::getAidlFrontendStatus(
+ vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
+ for (FrontendStatus s : hidlStatus) {
+ TunerFrontendStatus status;
+ switch (s.getDiscriminator()) {
+ case FrontendStatus::hidl_discriminator::isDemodLocked: {
+ status.set<TunerFrontendStatus::isDemodLocked>(s.isDemodLocked());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::snr: {
+ status.set<TunerFrontendStatus::snr>((int)s.snr());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::ber: {
+ status.set<TunerFrontendStatus::ber>((int)s.ber());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::per: {
+ status.set<TunerFrontendStatus::per>((int)s.per());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::preBer: {
+ status.set<TunerFrontendStatus::preBer>((int)s.preBer());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::signalQuality: {
+ status.set<TunerFrontendStatus::signalQuality>((int)s.signalQuality());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::signalStrength: {
+ status.set<TunerFrontendStatus::signalStrength>((int)s.signalStrength());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::symbolRate: {
+ status.set<TunerFrontendStatus::symbolRate>((int)s.symbolRate());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::innerFec: {
+ status.set<TunerFrontendStatus::innerFec>((long)s.innerFec());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::modulation: {
+ switch (s.modulation().getDiscriminator()) {
+ case FrontendModulationStatus::hidl_discriminator::dvbc:
+ status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbc());
+ aidlStatus.push_back(status);
+ break;
+ case FrontendModulationStatus::hidl_discriminator::dvbs:
+ status.set<TunerFrontendStatus::modulation>((int)s.modulation().dvbs());
+ aidlStatus.push_back(status);
+ break;
+ case FrontendModulationStatus::hidl_discriminator::isdbs:
+ status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs());
+ aidlStatus.push_back(status);
+ break;
+ case FrontendModulationStatus::hidl_discriminator::isdbs3:
+ status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbs3());
+ aidlStatus.push_back(status);
+ break;
+ case FrontendModulationStatus::hidl_discriminator::isdbt:
+ status.set<TunerFrontendStatus::modulation>((int)s.modulation().isdbt());
+ aidlStatus.push_back(status);
+ break;
+ }
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::inversion: {
+ status.set<TunerFrontendStatus::inversion>((int)s.inversion());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::lnbVoltage: {
+ status.set<TunerFrontendStatus::lnbVoltage>((int)s.lnbVoltage());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::plpId: {
+ status.set<TunerFrontendStatus::plpId>((int8_t)s.plpId());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::isEWBS: {
+ status.set<TunerFrontendStatus::isEWBS>(s.isEWBS());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::agc: {
+ status.set<TunerFrontendStatus::agc>((int8_t)s.agc());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::isLnaOn: {
+ status.set<TunerFrontendStatus::isLnaOn>(s.isLnaOn());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::isLayerError: {
+ vector<bool> e(s.isLayerError().begin(), s.isLayerError().end());
+ status.set<TunerFrontendStatus::isLayerError>(e);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::mer: {
+ status.set<TunerFrontendStatus::mer>((int)s.mer());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::freqOffset: {
+ status.set<TunerFrontendStatus::freqOffset>((int)s.freqOffset());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::hierarchy: {
+ status.set<TunerFrontendStatus::hierarchy>((int)s.hierarchy());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::isRfLocked: {
+ status.set<TunerFrontendStatus::isRfLocked>(s.isRfLocked());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatus::hidl_discriminator::plpInfo: {
+ vector<TunerFrontendStatusAtsc3PlpInfo> info;
+ for (auto i : s.plpInfo()) {
+ info.push_back({
+ .plpId = (int8_t)i.plpId,
+ .isLocked = i.isLocked,
+ .uec = (int)i.uec,
+ });
+ }
+ status.set<TunerFrontendStatus::plpInfo>(info);
+ aidlStatus.push_back(status);
+ break;
+ }
+ }
+ }
+}
+
+void TunerFrontend::getAidlFrontendStatusExt(
+ vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus) {
+ for (FrontendStatusExt1_1 s : hidlStatus) {
+ TunerFrontendStatus status;
+ switch (s.getDiscriminator()) {
+ case FrontendStatusExt1_1::hidl_discriminator::modulations: {
+ vector<int> aidlMod;
+ for (auto m : s.modulations()) {
+ switch (m.getDiscriminator()) {
+ case FrontendModulation::hidl_discriminator::dvbc:
+ aidlMod.push_back((int)m.dvbc());
+ break;
+ case FrontendModulation::hidl_discriminator::dvbs:
+ aidlMod.push_back((int)m.dvbs());
+ break;
+ case FrontendModulation::hidl_discriminator::dvbt:
+ aidlMod.push_back((int)m.dvbt());
+ break;
+ case FrontendModulation::hidl_discriminator::isdbs:
+ aidlMod.push_back((int)m.isdbs());
+ break;
+ case FrontendModulation::hidl_discriminator::isdbs3:
+ aidlMod.push_back((int)m.isdbs3());
+ break;
+ case FrontendModulation::hidl_discriminator::isdbt:
+ aidlMod.push_back((int)m.isdbt());
+ break;
+ case FrontendModulation::hidl_discriminator::atsc:
+ aidlMod.push_back((int)m.atsc());
+ break;
+ case FrontendModulation::hidl_discriminator::atsc3:
+ aidlMod.push_back((int)m.atsc3());
+ break;
+ case FrontendModulation::hidl_discriminator::dtmb:
+ aidlMod.push_back((int)m.dtmb());
+ break;
+ }
+ }
+ status.set<TunerFrontendStatus::modulations>(aidlMod);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::bers: {
+ vector<int> b(s.bers().begin(), s.bers().end());
+ status.set<TunerFrontendStatus::bers>(b);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::codeRates: {
+ vector<int64_t> codeRates;
+ for (auto c : s.codeRates()) {
+ codeRates.push_back((long)c);
+ }
+ status.set<TunerFrontendStatus::codeRates>(codeRates);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::bandwidth: {
+ switch (s.bandwidth().getDiscriminator()) {
+ case FrontendBandwidth::hidl_discriminator::atsc3:
+ status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().atsc3());
+ break;
+ case FrontendBandwidth::hidl_discriminator::dvbc:
+ status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbc());
+ break;
+ case FrontendBandwidth::hidl_discriminator::dvbt:
+ status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dvbt());
+ break;
+ case FrontendBandwidth::hidl_discriminator::isdbt:
+ status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().isdbt());
+ break;
+ case FrontendBandwidth::hidl_discriminator::dtmb:
+ status.set<TunerFrontendStatus::bandwidth>((int)s.bandwidth().dtmb());
+ break;
+ }
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::interval: {
+ switch (s.interval().getDiscriminator()) {
+ case FrontendGuardInterval::hidl_discriminator::dvbt:
+ status.set<TunerFrontendStatus::interval>((int)s.interval().dvbt());
+ break;
+ case FrontendGuardInterval::hidl_discriminator::isdbt:
+ status.set<TunerFrontendStatus::interval>((int)s.interval().isdbt());
+ break;
+ case FrontendGuardInterval::hidl_discriminator::dtmb:
+ status.set<TunerFrontendStatus::interval>((int)s.interval().dtmb());
+ break;
+ }
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::transmissionMode: {
+ switch (s.transmissionMode().getDiscriminator()) {
+ case FrontendTransmissionMode::hidl_discriminator::dvbt:
+ status.set<TunerFrontendStatus::transmissionMode>(
+ (int)s.transmissionMode().dvbt());
+ break;
+ case FrontendTransmissionMode::hidl_discriminator::isdbt:
+ status.set<TunerFrontendStatus::transmissionMode>(
+ (int)s.transmissionMode().isdbt());
+ break;
+ case FrontendTransmissionMode::hidl_discriminator::dtmb:
+ status.set<TunerFrontendStatus::transmissionMode>(
+ (int)s.transmissionMode().dtmb());
+ break;
+ }
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::uec: {
+ status.set<TunerFrontendStatus::uec>((int)s.uec());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::systemId: {
+ status.set<TunerFrontendStatus::systemId>((char16_t)s.systemId());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::interleaving: {
+ vector<int> aidlInter;
+ for (auto i : s.interleaving()) {
+ switch (i.getDiscriminator()) {
+ case FrontendInterleaveMode::hidl_discriminator::atsc3:
+ aidlInter.push_back((int)i.atsc3());
+ break;
+ case FrontendInterleaveMode::hidl_discriminator::dvbc:
+ aidlInter.push_back((int)i.dvbc());
+ break;
+ case FrontendInterleaveMode::hidl_discriminator::dtmb:
+ aidlInter.push_back((int)i.dtmb());
+ break;
+ }
+ }
+ status.set<TunerFrontendStatus::interleaving>(aidlInter);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::isdbtSegment: {
+ auto seg = s.isdbtSegment();
+ vector<uint8_t> i(seg.begin(), seg.end());
+ status.set<TunerFrontendStatus::isdbtSegment>(i);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::tsDataRate: {
+ vector<int> ts(s.tsDataRate().begin(), s.tsDataRate().end());
+ status.set<TunerFrontendStatus::tsDataRate>(ts);
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::rollOff: {
+ switch (s.rollOff().getDiscriminator()) {
+ case FrontendRollOff::hidl_discriminator::dvbs:
+ status.set<TunerFrontendStatus::interleaving>((int)s.rollOff().dvbs());
+ break;
+ case FrontendRollOff::hidl_discriminator::isdbs:
+ status.set<TunerFrontendStatus::interleaving>((int)s.rollOff().isdbs());
+ break;
+ case FrontendRollOff::hidl_discriminator::isdbs3:
+ status.set<TunerFrontendStatus::interleaving>((int)s.rollOff().isdbs3());
+ break;
+ }
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::isMiso: {
+ status.set<TunerFrontendStatus::isMiso>(s.isMiso());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::isLinear: {
+ status.set<TunerFrontendStatus::isLinear>(s.isLinear());
+ aidlStatus.push_back(status);
+ break;
+ }
+ case FrontendStatusExt1_1::hidl_discriminator::isShortFrames: {
+ status.set<TunerFrontendStatus::isShortFrames>(s.isShortFrames());
+ aidlStatus.push_back(status);
+ break;
+ }
+ }
+ }
+}
+
hidl_vec<FrontendAtsc3PlpSettings> TunerFrontend::getAtsc3PlpSettings(
const TunerFrontendAtsc3Settings& settings) {
int len = settings.plpSettings.size();
@@ -500,4 +819,217 @@
};
return coderate;
}
+
+FrontendSettings TunerFrontend::getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings) {
+ auto settings = aidlSettings.settings;
+ FrontendSettings frontendSettings;
+
+ switch (settings.getTag()) {
+ case TunerFrontendUnionSettings::analog: {
+ auto analog = settings.get<TunerFrontendUnionSettings::analog>();
+ frontendSettings.analog({
+ .frequency = static_cast<uint32_t>(analog.frequency),
+ .type = static_cast<FrontendAnalogType>(analog.signalType),
+ .sifStandard = static_cast<FrontendAnalogSifStandard>(analog.sifStandard),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::atsc: {
+ auto atsc = settings.get<TunerFrontendUnionSettings::atsc>();
+ frontendSettings.atsc({
+ .frequency = static_cast<uint32_t>(atsc.frequency),
+ .modulation = static_cast<FrontendAtscModulation>(atsc.modulation),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::atsc3: {
+ auto atsc3 = settings.get<TunerFrontendUnionSettings::atsc3>();
+ frontendSettings.atsc3({
+ .frequency = static_cast<uint32_t>(atsc3.frequency),
+ .bandwidth = static_cast<FrontendAtsc3Bandwidth>(atsc3.bandwidth),
+ .demodOutputFormat = static_cast<FrontendAtsc3DemodOutputFormat>(
+ atsc3.demodOutputFormat),
+ .plpSettings = getAtsc3PlpSettings(atsc3),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::cable: {
+ auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
+ frontendSettings.dvbc({
+ .frequency = static_cast<uint32_t>(dvbc.frequency),
+ .modulation = static_cast<FrontendDvbcModulation>(dvbc.modulation),
+ .fec = static_cast<FrontendInnerFec>(dvbc.innerFec),
+ .symbolRate = static_cast<uint32_t>(dvbc.symbolRate),
+ .outerFec = static_cast<FrontendDvbcOuterFec>(dvbc.outerFec),
+ .annex = static_cast<FrontendDvbcAnnex>(dvbc.annex),
+ .spectralInversion = static_cast<FrontendDvbcSpectralInversion>(
+ dvbc.spectralInversion),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::dvbs: {
+ auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
+ frontendSettings.dvbs({
+ .frequency = static_cast<uint32_t>(dvbs.frequency),
+ .modulation = static_cast<FrontendDvbsModulation>(dvbs.modulation),
+ .coderate = getDvbsCodeRate(dvbs.codeRate),
+ .symbolRate = static_cast<uint32_t>(dvbs.symbolRate),
+ .rolloff = static_cast<FrontendDvbsRolloff>(dvbs.rolloff),
+ .pilot = static_cast<FrontendDvbsPilot>(dvbs.pilot),
+ .inputStreamId = static_cast<uint32_t>(dvbs.inputStreamId),
+ .standard = static_cast<FrontendDvbsStandard>(dvbs.standard),
+ .vcmMode = static_cast<FrontendDvbsVcmMode>(dvbs.vcm),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::dvbt: {
+ auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
+ frontendSettings.dvbt({
+ .frequency = static_cast<uint32_t>(dvbt.frequency),
+ .transmissionMode = static_cast<FrontendDvbtTransmissionMode>(
+ dvbt.transmissionMode),
+ .bandwidth = static_cast<FrontendDvbtBandwidth>(dvbt.bandwidth),
+ .constellation = static_cast<FrontendDvbtConstellation>(dvbt.constellation),
+ .hierarchy = static_cast<FrontendDvbtHierarchy>(dvbt.hierarchy),
+ .hpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.hpCodeRate),
+ .lpCoderate = static_cast<FrontendDvbtCoderate>(dvbt.lpCodeRate),
+ .guardInterval = static_cast<FrontendDvbtGuardInterval>(dvbt.guardInterval),
+ .isHighPriority = dvbt.isHighPriority,
+ .standard = static_cast<FrontendDvbtStandard>(dvbt.standard),
+ .isMiso = dvbt.isMiso,
+ .plpMode = static_cast<FrontendDvbtPlpMode>(dvbt.plpMode),
+ .plpId = static_cast<uint8_t>(dvbt.plpId),
+ .plpGroupId = static_cast<uint8_t>(dvbt.plpGroupId),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::isdbs: {
+ auto isdbs = settings.get<TunerFrontendUnionSettings::isdbs>();
+ frontendSettings.isdbs({
+ .frequency = static_cast<uint32_t>(isdbs.frequency),
+ .streamId = static_cast<uint16_t>(isdbs.streamId),
+ .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs.streamIdType),
+ .modulation = static_cast<FrontendIsdbsModulation>(isdbs.modulation),
+ .coderate = static_cast<FrontendIsdbsCoderate>(isdbs.codeRate),
+ .symbolRate = static_cast<uint32_t>(isdbs.symbolRate),
+ .rolloff = static_cast<FrontendIsdbsRolloff>(isdbs.rolloff),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::isdbs3: {
+ auto isdbs3 = settings.get<TunerFrontendUnionSettings::isdbs3>();
+ frontendSettings.isdbs3({
+ .frequency = static_cast<uint32_t>(isdbs3.frequency),
+ .streamId = static_cast<uint16_t>(isdbs3.streamId),
+ .streamIdType = static_cast<FrontendIsdbsStreamIdType>(isdbs3.streamIdType),
+ .modulation = static_cast<FrontendIsdbs3Modulation>(isdbs3.modulation),
+ .coderate = static_cast<FrontendIsdbs3Coderate>(isdbs3.codeRate),
+ .symbolRate = static_cast<uint32_t>(isdbs3.symbolRate),
+ .rolloff = static_cast<FrontendIsdbs3Rolloff>(isdbs3.rolloff),
+ });
+ break;
+ }
+ case TunerFrontendUnionSettings::isdbt: {
+ auto isdbt = settings.get<TunerFrontendUnionSettings::isdbt>();
+ frontendSettings.isdbt({
+ .frequency = static_cast<uint32_t>(isdbt.frequency),
+ .modulation = static_cast<FrontendIsdbtModulation>(isdbt.modulation),
+ .bandwidth = static_cast<FrontendIsdbtBandwidth>(isdbt.bandwidth),
+ .mode = static_cast<FrontendIsdbtMode>(isdbt.mode),
+ .coderate = static_cast<FrontendIsdbtCoderate>(isdbt.codeRate),
+ .guardInterval = static_cast<FrontendIsdbtGuardInterval>(isdbt.guardInterval),
+ .serviceAreaId = static_cast<uint32_t>(isdbt.serviceAreaId),
+ });
+ break;
+ }
+ default:
+ break;
+ }
+
+ return frontendSettings;
+}
+
+FrontendSettingsExt1_1 TunerFrontend::getHidlFrontendSettingsExt(
+ const TunerFrontendSettings& aidlSettings) {
+ FrontendSettingsExt1_1 frontendSettingsExt{
+ .endFrequency = static_cast<uint32_t>(aidlSettings.endFrequency),
+ .inversion = static_cast<FrontendSpectralInversion>(aidlSettings.inversion),
+ };
+
+ auto settings = aidlSettings.settings;
+ switch (settings.getTag()) {
+ case TunerFrontendUnionSettings::analog: {
+ auto analog = settings.get<TunerFrontendUnionSettings::analog>();
+ if (analog.isExtended) {
+ frontendSettingsExt.settingExt.analog({
+ .aftFlag = static_cast<FrontendAnalogAftFlag>(analog.aftFlag),
+ });
+ } else {
+ frontendSettingsExt.settingExt.noinit();
+ }
+ break;
+ }
+ case TunerFrontendUnionSettings::cable: {
+ auto dvbc = settings.get<TunerFrontendUnionSettings::cable>();
+ if (dvbc.isExtended) {
+ frontendSettingsExt.settingExt.dvbc({
+ .interleaveMode = static_cast<FrontendCableTimeInterleaveMode>(
+ dvbc.interleaveMode),
+ .bandwidth = static_cast<FrontendDvbcBandwidth>(
+ dvbc.bandwidth),
+ });
+ } else {
+ frontendSettingsExt.settingExt.noinit();
+ }
+ break;
+ }
+ case TunerFrontendUnionSettings::dvbs: {
+ auto dvbs = settings.get<TunerFrontendUnionSettings::dvbs>();
+ if (dvbs.isExtended) {
+ frontendSettingsExt.settingExt.dvbs({
+ .scanType = static_cast<FrontendDvbsScanType>(dvbs.scanType),
+ .isDiseqcRxMessage = dvbs.isDiseqcRxMessage,
+ });
+ } else {
+ frontendSettingsExt.settingExt.noinit();
+ }
+ break;
+ }
+ case TunerFrontendUnionSettings::dvbt: {
+ auto dvbt = settings.get<TunerFrontendUnionSettings::dvbt>();
+ if (dvbt.isExtended) {
+ frontendSettingsExt.settingExt.dvbt({
+ .constellation =
+ static_cast<hardware::tv::tuner::V1_1::FrontendDvbtConstellation>(
+ dvbt.constellation),
+ .transmissionMode =
+ static_cast<hardware::tv::tuner::V1_1::FrontendDvbtTransmissionMode>(
+ dvbt.transmissionMode),
+ });
+ } else {
+ frontendSettingsExt.settingExt.noinit();
+ }
+ break;
+ }
+ case TunerFrontendUnionSettings::dtmb: {
+ auto dtmb = settings.get<TunerFrontendUnionSettings::dtmb>();
+ frontendSettingsExt.settingExt.dtmb({
+ .frequency = static_cast<uint32_t>(dtmb.frequency),
+ .transmissionMode = static_cast<FrontendDtmbTransmissionMode>(
+ dtmb.transmissionMode),
+ .bandwidth = static_cast<FrontendDtmbBandwidth>(dtmb.bandwidth),
+ .modulation = static_cast<FrontendDtmbModulation>(dtmb.modulation),
+ .codeRate = static_cast<FrontendDtmbCodeRate>(dtmb.codeRate),
+ .guardInterval = static_cast<FrontendDtmbGuardInterval>(dtmb.guardInterval),
+ .interleaveMode = static_cast<FrontendDtmbTimeInterleaveMode>(dtmb.interleaveMode),
+ });
+ break;
+ }
+ default:
+ frontendSettingsExt.settingExt.noinit();
+ break;
+ }
+
+ return frontendSettingsExt;
+}
} // namespace android
diff --git a/services/tuner/TunerFrontend.h b/services/tuner/TunerFrontend.h
index 99cdcdf..729640c 100644
--- a/services/tuner/TunerFrontend.h
+++ b/services/tuner/TunerFrontend.h
@@ -41,10 +41,14 @@
using ::android::hardware::tv::tuner::V1_0::FrontendId;
using ::android::hardware::tv::tuner::V1_0::FrontendScanMessage;
using ::android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
+using ::android::hardware::tv::tuner::V1_0::FrontendSettings;
+using ::android::hardware::tv::tuner::V1_0::FrontendStatus;
using ::android::hardware::tv::tuner::V1_0::IFrontend;
using ::android::hardware::tv::tuner::V1_1::IFrontendCallback;
using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageExt1_1;
using ::android::hardware::tv::tuner::V1_1::FrontendScanMessageTypeExt1_1;
+using ::android::hardware::tv::tuner::V1_1::FrontendSettingsExt1_1;
+using ::android::hardware::tv::tuner::V1_1::FrontendStatusExt1_1;
using namespace std;
@@ -66,6 +70,8 @@
Status close() override;
Status getStatus(const vector<int32_t>& statusTypes,
vector<TunerFrontendStatus>* _aidl_return) override;
+ Status getStatusExtended_1_1(const vector<int32_t>& statusTypes,
+ vector<TunerFrontendStatus>* _aidl_return) override;
Status getFrontendId(int* _aidl_return) override;
struct FrontendCallback : public IFrontendCallback {
@@ -85,6 +91,12 @@
hidl_vec<FrontendAtsc3PlpSettings> getAtsc3PlpSettings(
const TunerFrontendAtsc3Settings& settings);
FrontendDvbsCodeRate getDvbsCodeRate(const TunerFrontendDvbsCodeRate& codeRate);
+ FrontendSettings getHidlFrontendSettings(const TunerFrontendSettings& aidlSettings);
+ FrontendSettingsExt1_1 getHidlFrontendSettingsExt(const TunerFrontendSettings& aidlSettings);
+ void getAidlFrontendStatus(
+ vector<FrontendStatus>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
+ void getAidlFrontendStatusExt(
+ vector<FrontendStatusExt1_1>& hidlStatus, vector<TunerFrontendStatus>& aidlStatus);
int mId;
sp<IFrontend> mFrontend;
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index da9f541..4376188 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -92,7 +92,27 @@
return Status::ok();
}
- ALOGD("open demux failed, res = %d", res);
+ ALOGW("open demux failed, res = %d", res);
+ return Status::fromServiceSpecificError(static_cast<int32_t>(res));
+}
+
+Status TunerService::getDemuxCaps(TunerDemuxCapabilities* _aidl_return) {
+ ALOGD("getDemuxCaps");
+ if (!getITuner()) {
+ return Status::fromServiceSpecificError(static_cast<int32_t>(Result::NOT_INITIALIZED));
+ }
+ Result res;
+ DemuxCapabilities caps;
+ mTuner->getDemuxCaps([&](Result r, const DemuxCapabilities& demuxCaps) {
+ caps = demuxCaps;
+ res = r;
+ });
+ if (res == Result::SUCCESS) {
+ *_aidl_return = getAidlDemuxCaps(caps);
+ return Status::ok();
+ }
+
+ ALOGW("Get demux caps failed, res = %d", res);
return Status::fromServiceSpecificError(static_cast<int32_t>(res));
}
@@ -343,6 +363,26 @@
return res;
}
+TunerDemuxCapabilities TunerService::getAidlDemuxCaps(DemuxCapabilities caps) {
+ TunerDemuxCapabilities aidlCaps{
+ .numDemux = (int)caps.numDemux,
+ .numRecord = (int)caps.numRecord,
+ .numPlayback = (int)caps.numPlayback,
+ .numTsFilter = (int)caps.numTsFilter,
+ .numSectionFilter = (int)caps.numSectionFilter,
+ .numAudioFilter = (int)caps.numAudioFilter,
+ .numVideoFilter = (int)caps.numVideoFilter,
+ .numPesFilter = (int)caps.numPesFilter,
+ .numPcrFilter = (int)caps.numPcrFilter,
+ .numBytesInSectionFilter = (int)caps.numBytesInSectionFilter,
+ .filterCaps = (int)caps.filterCaps,
+ .bTimeFilter = caps.bTimeFilter,
+ };
+ aidlCaps.linkCaps.resize(caps.linkCaps.size());
+ copy(caps.linkCaps.begin(), caps.linkCaps.end(), aidlCaps.linkCaps.begin());
+ return aidlCaps;
+}
+
TunerFrontendInfo TunerService::convertToAidlFrontendInfo(FrontendInfo halInfo) {
TunerFrontendInfo info{
.type = (int)halInfo.type,
diff --git a/services/tuner/TunerService.h b/services/tuner/TunerService.h
index 942d409..e18b793 100644
--- a/services/tuner/TunerService.h
+++ b/services/tuner/TunerService.h
@@ -31,6 +31,7 @@
using ::aidl::android::media::tv::tuner::ITunerDemux;
using ::aidl::android::media::tv::tuner::ITunerFrontend;
using ::aidl::android::media::tv::tuner::ITunerLnb;
+using ::aidl::android::media::tv::tuner::TunerDemuxCapabilities;
using ::aidl::android::media::tv::tuner::TunerFrontendInfo;
using ::aidl::android::media::tv::tunerresourcemanager::ITunerResourceManager;
@@ -42,6 +43,7 @@
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
+using ::android::hardware::tv::tuner::V1_0::DemuxCapabilities;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterAvSettings;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
using ::android::hardware::tv::tuner::V1_0::DemuxFilterMainType;
@@ -113,6 +115,7 @@
Status openLnb(int lnbHandle, shared_ptr<ITunerLnb>* _aidl_return) override;
Status openLnbByName(const string& lnbName, shared_ptr<ITunerLnb>* _aidl_return) override;
Status openDemux(int32_t demuxHandle, std::shared_ptr<ITunerDemux>* _aidl_return) override;
+ Status getDemuxCaps(TunerDemuxCapabilities* _aidl_return) override;
Status updateTunerResources() override;
private:
@@ -125,6 +128,7 @@
Result getHidlFrontendInfo(int id, FrontendInfo& info);
vector<int> getLnbHandles();
+ TunerDemuxCapabilities getAidlDemuxCaps(DemuxCapabilities caps);
TunerFrontendInfo convertToAidlFrontendInfo(FrontendInfo halInfo);
sp<ITuner> mTuner;
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
index f6de618..73b00ae 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerDemux.aidl
@@ -21,6 +21,7 @@
import android.media.tv.tuner.ITunerFilter;
import android.media.tv.tuner.ITunerFilterCallback;
import android.media.tv.tuner.ITunerFrontend;
+import android.media.tv.tuner.ITunerTimeFilter;
/**
* Tuner Demux interface handles tuner related operations.
@@ -41,7 +42,37 @@
in int mainType, in int subtype, in int bufferSize, in ITunerFilterCallback cb);
/**
+ * Open time filter of the demux.
+ */
+ ITunerTimeFilter openTimeFilter();
+
+ /**
+ * Get hardware sync ID for audio and video.
+ */
+ int getAvSyncHwId(ITunerFilter tunerFilter);
+
+ /**
+ * Get current time stamp to use for A/V sync.
+ */
+ long getAvSyncTime(in int avSyncHwId);
+
+ /**
* Open a DVR (Digital Video Record) instance in the demux.
*/
ITunerDvr openDvr(in int dvbType, in int bufferSize, in ITunerDvrCallback cb);
+
+ /**
+ * Connect Conditional Access Modules (CAM) through Common Interface (CI).
+ */
+ void connectCiCam(in int ciCamId);
+
+ /**
+ * Disconnect Conditional Access Modules (CAM).
+ */
+ void disconnectCiCam();
+
+ /**
+ * Releases the ITunerDemux instance.
+ */
+ void close();
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
index 37166aa..10d4c3b 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilter.aidl
@@ -16,7 +16,11 @@
package android.media.tv.tuner;
+import android.hardware.common.fmq.MQDescriptor;
+import android.hardware.common.fmq.SynchronizedReadWrite;
+import android.hardware.common.NativeHandle;
import android.media.tv.tuner.TunerFilterConfiguration;
+import android.media.tv.tuner.TunerFilterSharedHandleInfo;
/**
* Tuner Filter interface handles tuner related operations.
@@ -35,11 +39,46 @@
long getId64Bit();
/**
+ * Get the descriptor of the Filter's FMQ.
+ */
+ MQDescriptor<byte, SynchronizedReadWrite> getQueueDesc();
+
+ /**
* Configure the filter.
*/
void configure(in TunerFilterConfiguration config);
/**
+ * Configure the monitor event of the Filter.
+ */
+ void configureMonitorEvent(in int monitorEventType);
+
+ /**
+ * Configure the context id of the IP Filter.
+ */
+ void configureIpFilterContextId(in int cid);
+
+ /**
+ * Configure the stream type of the media Filter.
+ */
+ void configureAvStreamType(in int avStreamType);
+
+ /**
+ * Get the a/v shared memory handle
+ */
+ TunerFilterSharedHandleInfo getAvSharedHandleInfo();
+
+ /**
+ * Release the handle reported by the HAL for AV memory.
+ */
+ void releaseAvHandle(in NativeHandle handle, in long avDataId);
+
+ /**
+ * Set the filter's data source.
+ */
+ void setDataSource(ITunerFilter filter);
+
+ /**
* Start the filter.
*/
void start();
@@ -53,4 +92,9 @@
* Flush the filter.
*/
void flush();
+
+ /**
+ * Close the filter.
+ */
+ void close();
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
index f9f86ac..e7a52a7 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFilterCallback.aidl
@@ -32,5 +32,5 @@
/**
* Notify the client that a new filter event happened.
*/
- void onFilterEvent(out TunerFilterEvent[] filterEvent);
+ void onFilterEvent(in TunerFilterEvent[] filterEvent);
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
index bfc3e30..254e16a 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerFrontend.aidl
@@ -83,6 +83,11 @@
TunerFrontendStatus[] getStatus(in int[] statusTypes);
/**
+ * Gets the 1.1 extended statuses of the frontend.
+ */
+ TunerFrontendStatus[] getStatusExtended_1_1(in int[] statusTypes);
+
+ /**
* Gets the id of the frontend.
*/
int getFrontendId();
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
index 2fabbe5..c6259ae 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
@@ -22,6 +22,7 @@
import android.media.tv.tuner.ITunerDemux;
import android.media.tv.tuner.ITunerFrontend;
import android.media.tv.tuner.ITunerLnb;
+import android.media.tv.tuner.TunerDemuxCapabilities;
import android.media.tv.tuner.TunerFrontendInfo;
/**
@@ -82,6 +83,13 @@
ITunerDemux openDemux(in int demuxHandle);
/**
+ * Retrieve the Tuner Demux capabilities.
+ *
+ * @return the demux’s capabilities.
+ */
+ TunerDemuxCapabilities getDemuxCaps();
+
+ /**
* Update Tuner Resources in TunerResourceManager.
*/
// TODO: b/178124017 update TRM in TunerService independently.
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
new file mode 100644
index 0000000..df3374a
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerAudioExtraMetaData.aidl
@@ -0,0 +1,36 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Extra Meta Data from AD (Audio Descriptor) according to ETSI TS 101 154 V2.1.1.
+ *
+ * {@hide}
+ */
+parcelable TunerAudioExtraMetaData {
+ byte adFade;
+
+ byte adPan;
+
+ byte versionTextTag;
+
+ byte adGainCenter;
+
+ byte adGainFront;
+
+ byte adGainSurround;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
new file mode 100644
index 0000000..71ab151
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxCapabilities.aidl
@@ -0,0 +1,50 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Tuner Demux capabilities interface.
+ *
+ * {@hide}
+ */
+parcelable TunerDemuxCapabilities {
+ int numDemux;
+
+ int numRecord;
+
+ int numPlayback;
+
+ int numTsFilter;
+
+ int numSectionFilter;
+
+ int numAudioFilter;
+
+ int numVideoFilter;
+
+ int numPesFilter;
+
+ int numPcrFilter;
+
+ int numBytesInSectionFilter;
+
+ int filterCaps;
+
+ int[] linkCaps;
+
+ boolean bTimeFilter;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
new file mode 100644
index 0000000..b65f404
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddress.aidl
@@ -0,0 +1,28 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Demux ip address configure.
+ *
+ * {@hide}
+ */
+parcelable TunerDemuxIpAddress {
+ boolean isIpV6;
+
+ byte[] addr;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
new file mode 100644
index 0000000..b244388
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerDemuxIpAddressSettings.aidl
@@ -0,0 +1,34 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerDemuxIpAddress;
+
+/**
+ * Filter Settings for an Ip filter.
+ *
+ * {@hide}
+ */
+parcelable TunerDemuxIpAddressSettings {
+ TunerDemuxIpAddress srcIpAddress;
+
+ TunerDemuxIpAddress dstIpAddress;
+
+ char srcPort;
+
+ char dstPort;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
new file mode 100644
index 0000000..4c9e3af
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterAlpConfiguration.aidl
@@ -0,0 +1,32 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFilterSettings;
+
+/**
+ * Filter Settings for an ALP filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterAlpConfiguration {
+ byte packetType;
+
+ byte lengthType;
+
+ TunerFilterSettings filterSettings;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
index c208dde..808cfd1 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterConfiguration.aidl
@@ -16,6 +16,10 @@
package android.media.tv.tuner;
+import android.media.tv.tuner.TunerFilterAlpConfiguration;
+import android.media.tv.tuner.TunerFilterIpConfiguration;
+import android.media.tv.tuner.TunerFilterMmtpConfiguration;
+import android.media.tv.tuner.TunerFilterTlvConfiguration;
import android.media.tv.tuner.TunerFilterTsConfiguration;
/**
@@ -25,4 +29,12 @@
*/
union TunerFilterConfiguration {
TunerFilterTsConfiguration ts;
+
+ TunerFilterMmtpConfiguration mmtp;
+
+ TunerFilterIpConfiguration ip;
+
+ TunerFilterTlvConfiguration tlv;
+
+ TunerFilterAlpConfiguration alp;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
new file mode 100644
index 0000000..b971dd3
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadEvent.aidl
@@ -0,0 +1,40 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Event for Download data.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterDownloadEvent {
+ int itemId;
+
+ /**
+ * MPU sequence number of filtered data (only for MMTP)
+ */
+ int mpuSequenceNumber;
+
+ int itemFragmentIndex;
+
+ int lastItemFragmentIndex;
+
+ /**
+ * Data size in bytes of filtered data
+ */
+ char dataLength;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
new file mode 100644
index 0000000..417a5fe
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterDownloadSettings.aidl
@@ -0,0 +1,26 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Settings for downloading.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterDownloadSettings {
+ int downloadId;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
index ad95112..1305510 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterEvent.aidl
@@ -16,7 +16,15 @@
package android.media.tv.tuner;
+import android.media.tv.tuner.TunerFilterDownloadEvent;
+import android.media.tv.tuner.TunerFilterIpPayloadEvent;
import android.media.tv.tuner.TunerFilterMediaEvent;
+import android.media.tv.tuner.TunerFilterMmtpRecordEvent;
+import android.media.tv.tuner.TunerFilterMonitorEvent;
+import android.media.tv.tuner.TunerFilterPesEvent;
+import android.media.tv.tuner.TunerFilterSectionEvent;
+import android.media.tv.tuner.TunerFilterTemiEvent;
+import android.media.tv.tuner.TunerFilterTsRecordEvent;
/**
* Filter events.
@@ -25,4 +33,22 @@
*/
union TunerFilterEvent {
TunerFilterMediaEvent media;
+
+ TunerFilterSectionEvent section;
+
+ TunerFilterPesEvent pes;
+
+ TunerFilterTsRecordEvent tsRecord;
+
+ TunerFilterMmtpRecordEvent mmtpRecord;
+
+ TunerFilterDownloadEvent download;
+
+ TunerFilterIpPayloadEvent ipPayload;
+
+ TunerFilterTemiEvent temi;
+
+ TunerFilterMonitorEvent monitor;
+
+ int startId;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
new file mode 100644
index 0000000..8b4d889
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpConfiguration.aidl
@@ -0,0 +1,31 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerDemuxIpAddressSettings;
+import android.media.tv.tuner.TunerFilterSettings;
+
+/**
+ * Filter Settings for a ip filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterIpConfiguration {
+ TunerDemuxIpAddressSettings ipAddr;
+
+ TunerFilterSettings filterSettings;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
new file mode 100644
index 0000000..d5bda93
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterIpPayloadEvent.aidl
@@ -0,0 +1,29 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Event for IP payload data.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterIpPayloadEvent {
+ /**
+ * Data size in bytes of ip data
+ */
+ char dataLength;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
index 486a15c..5842c0d 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMediaEvent.aidl
@@ -17,6 +17,7 @@
package android.media.tv.tuner;
import android.hardware.common.NativeHandle;
+import android.media.tv.tuner.TunerAudioExtraMetaData;
/**
* Filter Event for Audio or Video Filter.
@@ -71,5 +72,13 @@
boolean isPesPrivateData;
- // TODO: add ExtraMetaData
+ /**
+ * If TunerAudioExtraMetaData field is valid or not
+ */
+ boolean isAudioExtraMetaData;
+
+ /**
+ * Only valid when isAudioExtraMetaData is true
+ */
+ TunerAudioExtraMetaData audio;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
new file mode 100644
index 0000000..162ca8e
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpConfiguration.aidl
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFilterSettings;
+
+/**
+ * Filter Settings for an mmtp filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterMmtpConfiguration {
+ char mmtpPid;
+
+ TunerFilterSettings filterSettings;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
new file mode 100644
index 0000000..b8871cf
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMmtpRecordEvent.aidl
@@ -0,0 +1,57 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Event for an MMTP Record Filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterMmtpRecordEvent {
+ int scHevcIndexMask;
+
+ /**
+ * Byte number from beginning of the filter's output
+ */
+ long byteNumber;
+
+ /**
+ * If the current event contains extended information or not
+ */
+ boolean isExtended;
+
+ /**
+ * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
+ * and has the same format as the PTS in ISO/IEC 13818-1.
+ */
+ long pts;
+
+ /**
+ * MPU sequence number of the filtered data. This is only used for MMTP.
+ */
+ int mpuSequenceNumber;
+
+ /**
+ * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
+ */
+ int firstMbInSlice;
+
+ /**
+ * TS index mask.
+ */
+ int tsIndexMask;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
new file mode 100644
index 0000000..31ab5e6
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterMonitorEvent.aidl
@@ -0,0 +1,34 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter monitor events.
+ *
+ * {@hide}
+ */
+union TunerFilterMonitorEvent {
+ /**
+ * New scrambling status.
+ */
+ int scramblingStatus;
+
+ /**
+ * New cid for the IP filter.
+ */
+ int cid;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
new file mode 100644
index 0000000..312f314
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesDataSettings.aidl
@@ -0,0 +1,28 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Settings for Pes Data.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterPesDataSettings {
+ char streamId;
+
+ boolean isRaw;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
new file mode 100644
index 0000000..f7ee286
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterPesEvent.aidl
@@ -0,0 +1,36 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Event for PES Filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterPesEvent {
+ char streamId;
+
+ /**
+ * Data size in bytes of PES data
+ */
+ int dataLength;
+
+ /**
+ * MPU sequence number of filtered data
+ */
+ int mpuSequenceNumber;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
new file mode 100644
index 0000000..29be624
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterRecordSettings.aidl
@@ -0,0 +1,32 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFilterScIndexMask;
+
+/**
+ * Filter Settings for recording.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterRecordSettings {
+ int tsIndexMask;
+
+ int scIndexType;
+
+ TunerFilterScIndexMask scIndexMask;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
new file mode 100644
index 0000000..ed37fce
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterScIndexMask.aidl
@@ -0,0 +1,28 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter SC Index Mask
+ *
+ * {@hide}
+ */
+union TunerFilterScIndexMask {
+ int sc;
+
+ int scHevc;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
new file mode 100644
index 0000000..dd4f842
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionBits.aidl
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Bits settings of a section Filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterSectionBits {
+ byte[] filter;
+
+ byte[] mask;
+
+ byte[] mode;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
new file mode 100644
index 0000000..00aabe4
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionCondition.aidl
@@ -0,0 +1,31 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFilterSectionBits;
+import android.media.tv.tuner.TunerFilterSectionTableInfo;
+
+/**
+ * Section filter condition settings.
+ *
+ * {@hide}
+ */
+union TunerFilterSectionCondition {
+ TunerFilterSectionBits sectionBits;
+
+ TunerFilterSectionTableInfo tableInfo;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
new file mode 100644
index 0000000..5f20926
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionEvent.aidl
@@ -0,0 +1,44 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Event for Section Filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterSectionEvent {
+ /**
+ * Table ID of filtered data
+ */
+ char tableId;
+
+ /**
+ * Version number of filtered data
+ */
+ char version;
+
+ /**
+ * Section number of filtered data
+ */
+ char sectionNum;
+
+ /**
+ * Data size in bytes of filtered data
+ */
+ char dataLength;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
new file mode 100644
index 0000000..22129b6
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionSettings.aidl
@@ -0,0 +1,34 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFilterSectionCondition;
+
+/**
+ * Filter Settings for a section filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterSectionSettings {
+ TunerFilterSectionCondition condition;
+
+ boolean isCheckCrc;
+
+ boolean isRepeat;
+
+ boolean isRaw;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
new file mode 100644
index 0000000..cc78c9d
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSectionTableInfo.aidl
@@ -0,0 +1,28 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Table info settings of a section Filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterSectionTableInfo {
+ char tableId;
+
+ char version;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
index 8b9e9c2..eb7eaa5 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSettings.aidl
@@ -17,6 +17,10 @@
package android.media.tv.tuner;
import android.media.tv.tuner.TunerFilterAvSettings;
+import android.media.tv.tuner.TunerFilterDownloadSettings;
+import android.media.tv.tuner.TunerFilterPesDataSettings;
+import android.media.tv.tuner.TunerFilterRecordSettings;
+import android.media.tv.tuner.TunerFilterSectionSettings;
/**
* Filter Settings.
@@ -24,5 +28,17 @@
* {@hide}
*/
union TunerFilterSettings {
+ boolean nothing;
+
TunerFilterAvSettings av;
+
+ TunerFilterSectionSettings section;
+
+ TunerFilterPesDataSettings pesData;
+
+ TunerFilterRecordSettings record;
+
+ TunerFilterDownloadSettings download;
+
+ boolean isPassthrough;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
new file mode 100644
index 0000000..122dfc3
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterSharedHandleInfo.aidl
@@ -0,0 +1,29 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.hardware.common.NativeHandle;
+
+/**
+ * Filter Shared Handle Information.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterSharedHandleInfo {
+ NativeHandle handle;
+ long size;
+}
\ No newline at end of file
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
new file mode 100644
index 0000000..4c4e993
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTemiEvent.aidl
@@ -0,0 +1,40 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Filter Event for Timed External Media Information (TEMI) data.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterTemiEvent {
+ /**
+ * Presentation Time Stamp for audio or video frame. It based on 90KHz has
+ * the same format as PTS (Presentation Time Stamp) in ISO/IEC 13818-1.
+ */
+ long pts;
+
+ /**
+ * TEMI Descriptor Tag
+ */
+ byte descrTag;
+
+ /**
+ * TEMI Descriptor
+ */
+ byte[] descrData;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
new file mode 100644
index 0000000..0b237b4
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTlvConfiguration.aidl
@@ -0,0 +1,32 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFilterSettings;
+
+/**
+ * Filter Settings for a tlv filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterTlvConfiguration {
+ byte packetType;
+
+ boolean isCompressedIpPacket;
+
+ TunerFilterSettings filterSettings;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
index 5b94988..2e386e6 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsConfiguration.aidl
@@ -24,6 +24,7 @@
* {@hide}
*/
parcelable TunerFilterTsConfiguration {
- int tpid;
+ char tpid;
+
TunerFilterSettings filterSettings;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
new file mode 100644
index 0000000..c52a749
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFilterTsRecordEvent.aidl
@@ -0,0 +1,56 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFilterScIndexMask;
+
+/**
+ * Filter Event for TS Record Filter.
+ *
+ * {@hide}
+ */
+parcelable TunerFilterTsRecordEvent {
+ char pid;
+
+ int tsIndexMask;
+
+ /**
+ * Indexes of record output
+ */
+ TunerFilterScIndexMask scIndexMask;
+
+ /**
+ * Byte number from beginning of the filter's output
+ */
+ long byteNumber;
+
+ /**
+ * If the current event contains extended information or not
+ */
+ boolean isExtended;
+
+ /**
+ * The Presentation Time Stamp(PTS) for the audio or video frame. It is based on 90KHz
+ * and has the same format as the PTS in ISO/IEC 13818-1.
+ */
+ long pts;
+
+ /**
+ * Specifies the address of the first macroblock in the slice defined in ITU-T Rec. H.264.
+ */
+ int firstMbInSlice;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
index b6d07c3..40cd8c9 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendAnalogSettings.aidl
@@ -33,4 +33,11 @@
* Standard Interchange Format (SIF) setting
*/
int sifStandard;
+
+ /**
+ * Fields after isExtended are only valid when isExtended is true
+ */
+ boolean isExtended;
+
+ int aftFlag;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
index 3984f2c..b9bcf29 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendCableSettings.aidl
@@ -51,4 +51,13 @@
* Spectral Inversion Type.
*/
int spectralInversion;
+
+ /**
+ * Fields after isExtended are only valid when isExtended is true
+ */
+ boolean isExtended;
+
+ int interleaveMode;
+
+ int bandwidth;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
new file mode 100644
index 0000000..45e7ff9
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDtmbSettings.aidl
@@ -0,0 +1,38 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * DTMB Frontend Settings interface.
+ *
+ * {@hide}
+ */
+parcelable TunerFrontendDtmbSettings {
+ int frequency;
+
+ int transmissionMode;
+
+ int bandwidth;
+
+ int modulation;
+
+ int codeRate;
+
+ int guardInterval;
+
+ int interleaveMode;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
index 554a502..ec3e4b9 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbsSettings.aidl
@@ -53,4 +53,13 @@
* Vcm mode.
*/
int vcm;
+
+ /**
+ * Fields after isExtended are only valid when isExtended is true
+ */
+ boolean isExtended;
+
+ int scanType;
+
+ boolean isDiseqcRxMessage;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
index c72396b..14c942a 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendDvbtSettings.aidl
@@ -67,4 +67,9 @@
* Physical Layer Pipe (PLP) Group Id
*/
int plpGroupId;
+
+ /**
+ * Fields after isExtended are only valid when isExtended is true
+ */
+ boolean isExtended;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
index ca4a9af..1b8fcbb 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendScanAtsc3PlpInfo.aidl
@@ -22,7 +22,7 @@
* {@hide}
*/
parcelable TunerFrontendScanAtsc3PlpInfo {
- int plpId;
+ byte plpId;
boolean llsFlag;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
index a382941..70a5f3e 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendSettings.aidl
@@ -1,5 +1,5 @@
/**
- * Copyright 2020, The Android Open Source Project
+ * Copyright 2021, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,37 +16,19 @@
package android.media.tv.tuner;
-import android.media.tv.tuner.TunerFrontendAnalogSettings;
-import android.media.tv.tuner.TunerFrontendAtscSettings;
-import android.media.tv.tuner.TunerFrontendAtsc3Settings;
-import android.media.tv.tuner.TunerFrontendCableSettings;
-import android.media.tv.tuner.TunerFrontendDvbsSettings;
-import android.media.tv.tuner.TunerFrontendDvbtSettings;
-import android.media.tv.tuner.TunerFrontendIsdbsSettings;
-import android.media.tv.tuner.TunerFrontendIsdbs3Settings;
-import android.media.tv.tuner.TunerFrontendIsdbtSettings;
+import android.media.tv.tuner.TunerFrontendUnionSettings;
/**
- * Analog Frontend Settings interface.
+ * Frontend Settings interface.
*
* {@hide}
*/
-union TunerFrontendSettings {
- TunerFrontendAnalogSettings analog;
+parcelable TunerFrontendSettings {
+ TunerFrontendUnionSettings settings;
- TunerFrontendAtscSettings atsc;
+ boolean isExtended;
- TunerFrontendAtsc3Settings atsc3;
+ int endFrequency;
- TunerFrontendCableSettings cable;
-
- TunerFrontendDvbsSettings dvbs;
-
- TunerFrontendDvbtSettings dvbt;
-
- TunerFrontendIsdbsSettings isdbs;
-
- TunerFrontendIsdbs3Settings isdbs3;
-
- TunerFrontendIsdbtSettings isdbt;
+ int inversion;
}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
index 41f9f0e..2b3c01b 100644
--- a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatus.aidl
@@ -16,9 +16,172 @@
package android.media.tv.tuner;
+import android.media.tv.tuner.TunerFrontendStatusAtsc3PlpInfo;
+
/**
* Tuner Frontend Status interface.
*
* {@hide}
*/
-parcelable TunerFrontendStatus {}
+union TunerFrontendStatus {
+ /**
+ * Lock status for Demod in True/False.
+ */
+ boolean isDemodLocked;
+
+ /**
+ * SNR value measured by 0.001 dB.
+ */
+ int snr;
+
+ /**
+ * The number of error bits per 1 billion bits.
+ */
+ int ber;
+
+ /**
+ * The number of error packages per 1 billion packages.
+ */
+ int per;
+
+ /**
+ * The number of error bits per 1 billion bits before FEC.
+ */
+ int preBer;
+
+ /**
+ * Signal Quality in percent.
+ */
+ int signalQuality;
+
+ /**
+ * Signal Strength measured by 0.001 dBm.
+ */
+ int signalStrength;
+
+ /**
+ * Symbols per second
+ */
+ int symbolRate;
+
+ long innerFec;
+
+ /**
+ * Check frontend type to decide the hidl type value
+ */
+ int modulation;
+
+ int inversion;
+
+ int lnbVoltage;
+
+ byte plpId;
+
+ boolean isEWBS;
+
+ /**
+ * AGC value is normalized from 0 to 255.
+ */
+ byte agc;
+
+ boolean isLnaOn;
+
+ boolean[] isLayerError;
+
+ /**
+ * MER value measured by 0.001 dB
+ */
+ int mer;
+
+ /**
+ * Frequency difference in Hertz.
+ */
+ int freqOffset;
+
+ int hierarchy;
+
+ boolean isRfLocked;
+
+ /**
+ * A list of PLP status for tuned PLPs for ATSC3 frontend.
+ */
+ TunerFrontendStatusAtsc3PlpInfo[] plpInfo;
+
+ // 1.1 Extension Starting
+
+ /**
+ * Extended modulation status. Check frontend type to decide the hidl type value.
+ */
+ int[] modulations;
+
+ /**
+ * Extended bit error ratio status.
+ */
+ int[] bers;
+
+ /**
+ * Extended code rate status.
+ */
+ long[] codeRates;
+
+ /**
+ * Extended bandwidth status. Check frontend type to decide the hidl type value.
+ */
+ int bandwidth;
+
+ /**
+ * Extended guard interval status. Check frontend type to decide the hidl type value.
+ */
+ int interval;
+
+ /**
+ * Extended transmission mode status. Check frontend type to decide the hidl type value.
+ */
+ int transmissionMode;
+
+ /**
+ * Uncorrectable Error Counts of the frontend's Physical Layer Pipe (PLP)
+ * since the last tune operation.
+ */
+ int uec;
+
+ /**
+ * The current DVB-T2 system id status.
+ */
+ char systemId;
+
+ /**
+ * Frontend Interleaving Modes. Check frontend type to decide the hidl type value.
+ */
+ int[] interleaving;
+
+ /**
+ * Segments in ISDB-T Specification of all the channels.
+ */
+ byte[] isdbtSegment;
+
+ /**
+ * Transport Stream Data Rate in BPS of the current channel.
+ */
+ int[] tsDataRate;
+
+ /**
+ * Roll Off Type status of the frontend. Check frontend type to decide the hidl type value.
+ */
+ int rollOff;
+
+ /**
+ * If the frontend currently supports MISO or not.
+ */
+ boolean isMiso;
+
+ /**
+ * If the frontend code rate is linear or not.
+ */
+ boolean isLinear;
+
+ /**
+ * If short frames are enabled or not.
+ */
+ boolean isShortFrames;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
new file mode 100644
index 0000000..4116c34
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendStatusAtsc3PlpInfo.aidl
@@ -0,0 +1,39 @@
+/**
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+/**
+ * Atsc3 Frontend Physical Layer Pipe Info in Frontend status.
+ *
+ * {@hide}
+ */
+parcelable TunerFrontendStatusAtsc3PlpInfo {
+ /**
+ * PLP Id value.
+ */
+ byte plpId;
+
+ /**
+ * Demod Lock/Unlock status of this particular PLP.
+ */
+ boolean isLocked;
+
+ /**
+ * Uncorrectable Error Counts (UEC) of this particular PLP since last tune operation.
+ */
+ int uec;
+}
diff --git a/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
new file mode 100644
index 0000000..c362c2a
--- /dev/null
+++ b/services/tuner/aidl/android/media/tv/tuner/TunerFrontendUnionSettings.aidl
@@ -0,0 +1,55 @@
+/**
+ * Copyright 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.tv.tuner;
+
+import android.media.tv.tuner.TunerFrontendAnalogSettings;
+import android.media.tv.tuner.TunerFrontendAtscSettings;
+import android.media.tv.tuner.TunerFrontendAtsc3Settings;
+import android.media.tv.tuner.TunerFrontendCableSettings;
+import android.media.tv.tuner.TunerFrontendDtmbSettings;
+import android.media.tv.tuner.TunerFrontendDvbsSettings;
+import android.media.tv.tuner.TunerFrontendDvbtSettings;
+import android.media.tv.tuner.TunerFrontendIsdbsSettings;
+import android.media.tv.tuner.TunerFrontendIsdbs3Settings;
+import android.media.tv.tuner.TunerFrontendIsdbtSettings;
+
+/**
+ * Frontend Settings Union interface.
+ *
+ * {@hide}
+ */
+union TunerFrontendUnionSettings {
+ TunerFrontendAnalogSettings analog;
+
+ TunerFrontendAtscSettings atsc;
+
+ TunerFrontendAtsc3Settings atsc3;
+
+ TunerFrontendCableSettings cable;
+
+ TunerFrontendDvbsSettings dvbs;
+
+ TunerFrontendDvbtSettings dvbt;
+
+ TunerFrontendIsdbsSettings isdbs;
+
+ TunerFrontendIsdbs3Settings isdbs3;
+
+ TunerFrontendIsdbtSettings isdbt;
+
+ TunerFrontendDtmbSettings dtmb;
+}