Merge "cameraserver: Remove leftover error log message for openAidlSession." into tm-dev
diff --git a/METADATA b/METADATA
index aabda36..146bfcb 100644
--- a/METADATA
+++ b/METADATA
@@ -2,22 +2,22 @@
# CONSULT THE OWNERS AND opensource-licensing@google.com BEFORE
# DEPENDING ON IT IN YOUR PROJECT. ***
third_party {
- # would be NOTICE save for Widevine Master License Agreement in:
- # drm/mediadrm/plugins/clearkey/hidl/DeviceFiles.cpp
- # drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
- # drm/mediadrm/plugins/clearkey/hidl/include/DeviceFiles.h
- # drm/mediadrm/plugins/clearkey/hidl/protos/DeviceFiles.proto
- # drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
- # and patent disclaimers in:
- # media/codec2/components/aac/patent_disclaimer.txt
- # media/codec2/components/amr_nb_wb/patent_disclaimer.txt
- # media/codec2/components/mp3/patent_disclaimer.txt
- # media/codec2/components/mpeg4_h263/patent_disclaimer.txt
- # media/codecs/amrnb/patent_disclaimer.txt
- # media/codecs/amrwb/dec/patent_disclaimer.txt
- # media/codecs/amrwb/enc/patent_disclaimer.txt
- # media/codecs/m4v_h263/patent_disclaimer.txt
- # media/codecs/mp3dec/patent_disclaimer.txt
- # media/libstagefright/codecs/aacenc/patent_disclaimer.txt
+ license_note: "would be NOTICE save for Widevine Master License Agreement in:\n"
+ " drm/mediadrm/plugins/clearkey/hidl/DeviceFiles.cpp\n"
+ " drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp\n"
+ " drm/mediadrm/plugins/clearkey/hidl/include/DeviceFiles.h\n"
+ " drm/mediadrm/plugins/clearkey/hidl/protos/DeviceFiles.proto\n"
+ " drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h\n"
+ " and patent disclaimers in:\n"
+ " media/codec2/components/aac/patent_disclaimer.txt\n"
+ " media/codec2/components/amr_nb_wb/patent_disclaimer.txt\n"
+ " media/codec2/components/mp3/patent_disclaimer.txt\n"
+ " media/codec2/components/mpeg4_h263/patent_disclaimer.txt\n"
+ " media/codecs/amrnb/patent_disclaimer.txt\n"
+ " media/codecs/amrwb/dec/patent_disclaimer.txt\n"
+ " media/codecs/amrwb/enc/patent_disclaimer.txt\n"
+ " media/codecs/m4v_h263/patent_disclaimer.txt\n"
+ " media/codecs/mp3dec/patent_disclaimer.txt\n"
+ " media/libstagefright/codecs/aacenc/patent_disclaimer.txt"
license_type: BY_EXCEPTION_ONLY
}
diff --git a/apex/Android.bp b/apex/Android.bp
index b9abd12..b9b9bde 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -23,7 +23,6 @@
apex_defaults {
name: "com.android.media-defaults",
- updatable: true,
bootclasspath_fragments: ["com.android.media-bootclasspath-fragment"],
systemserverclasspath_fragments: ["com.android.media-systemserverclasspath-fragment"],
multilib: {
@@ -67,14 +66,13 @@
// Use a custom AndroidManifest.xml used for API targeting.
androidManifest: ":com.android.media-androidManifest",
- // IMPORTANT: For the APEX to be installed on Android 10 (API 29),
- // min_sdk_version should be 29. This enables the build system to make
+ // IMPORTANT: q-launched-apex-module enables the build system to make
// sure the package compatible to Android 10 in two ways:
// - build the APEX package compatible to Android 10
// so that the package can be installed.
// - build artifacts (lib/javalib/bin) against Android 10 SDK
// so that the artifacts can run.
- min_sdk_version: "29",
+ defaults: ["q-launched-apex-module"],
// Indicates that pre-installed version of this apex can be compressed.
// Whether it actually will be compressed is controlled on per-device basis.
compressible: true,
@@ -126,6 +124,26 @@
// modified by the Soong or platform compat team.
hidden_api: {
max_target_o_low_priority: ["hiddenapi/hiddenapi-max-target-o-low-priority.txt"],
+
+ // The following packages contain classes from other modules on the
+ // bootclasspath. That means that the hidden API flags for this module
+ // has to explicitly list every single class this module provides in
+ // that package to differentiate them from the classes provided by other
+ // modules. That can include private classes that are not part of the
+ // API.
+ split_packages: [
+ "android.media",
+ ],
+
+ // The following packages and all their subpackages currently only
+ // contain classes from this bootclasspath_fragment. Listing a package
+ // here won't prevent other bootclasspath modules from adding classes in
+ // any of those packages but it will prevent them from adding those
+ // classes into an API surface, e.g. public, system, etc.. Doing so will
+ // result in a build failure due to inconsistent flags.
+ package_prefixes: [
+ "android.media.internal",
+ ],
},
}
@@ -148,7 +166,6 @@
apex_defaults {
name: "com.android.media.swcodec-defaults",
- updatable: true,
binaries: [
"mediaswcodec",
],
@@ -172,14 +189,13 @@
// Use a custom AndroidManifest.xml used for API targeting.
androidManifest: ":com.android.media.swcodec-androidManifest",
- // IMPORTANT: For the APEX to be installed on Android 10 (API 29),
- // min_sdk_version should be 29. This enables the build system to make
+ // IMPORTANT: q-launched-apex-module enables the build system to make
// sure the package compatible to Android 10 in two ways:
// - build the APEX package compatible to Android 10
// so that the package can be installed.
// - build artifacts (lib/javalib/bin) against Android 10 SDK
// so that the artifacts can run.
- min_sdk_version: "29",
+ defaults: ["q-launched-apex-module"],
// Indicates that pre-installed version of this apex can be compressed.
// Whether it actually will be compressed is controlled on per-device basis.
compressible: true,
diff --git a/camera/CameraSessionStats.cpp b/camera/CameraSessionStats.cpp
index 2a07ffc..05341bf 100644
--- a/camera/CameraSessionStats.cpp
+++ b/camera/CameraSessionStats.cpp
@@ -52,6 +52,12 @@
return err;
}
+ float maxPreviewFps = 0;
+ if ((err = parcel->readFloat(&maxPreviewFps)) != OK) {
+ ALOGE("%s: Failed to read maxPreviewFps from parcel", __FUNCTION__);
+ return err;
+ }
+
int dataSpace = 0;
if ((err = parcel->readInt32(&dataSpace)) != OK) {
ALOGE("%s: Failed to read dataSpace from parcel", __FUNCTION__);
@@ -112,14 +118,14 @@
return err;
}
- int dynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
- if ((err = parcel->readInt32(&dynamicRangeProfile)) != OK) {
+ int64_t dynamicRangeProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+ if ((err = parcel->readInt64(&dynamicRangeProfile)) != OK) {
ALOGE("%s: Failed to read dynamic range profile type from parcel", __FUNCTION__);
return err;
}
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
- if ((err = parcel->readInt32(&streamUseCase)) != OK) {
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+ if ((err = parcel->readInt64(&streamUseCase)) != OK) {
ALOGE("%s: Failed to read stream use case from parcel", __FUNCTION__);
return err;
}
@@ -127,6 +133,7 @@
mWidth = width;
mHeight = height;
mFormat = format;
+ mMaxPreviewFps = maxPreviewFps;
mDataSpace = dataSpace;
mUsage = usage;
mRequestCount = requestCount;
@@ -166,6 +173,11 @@
return err;
}
+ if ((err = parcel->writeFloat(mMaxPreviewFps)) != OK) {
+ ALOGE("%s: Failed to write stream maxPreviewFps!", __FUNCTION__);
+ return err;
+ }
+
if ((err = parcel->writeInt32(mDataSpace)) != OK) {
ALOGE("%s: Failed to write stream dataSpace!", __FUNCTION__);
return err;
@@ -216,12 +228,12 @@
return err;
}
- if ((err = parcel->writeInt32(mDynamicRangeProfile)) != OK) {
+ if ((err = parcel->writeInt64(mDynamicRangeProfile)) != OK) {
ALOGE("%s: Failed to write dynamic range profile type", __FUNCTION__);
return err;
}
- if ((err = parcel->writeInt32(mStreamUseCase)) != OK) {
+ if ((err = parcel->writeInt64(mStreamUseCase)) != OK) {
ALOGE("%s: Failed to write stream use case!", __FUNCTION__);
return err;
}
@@ -247,6 +259,7 @@
mApiLevel(0),
mIsNdk(false),
mLatencyMs(-1),
+ mMaxPreviewFps(0),
mSessionType(0),
mInternalReconfigure(0),
mRequestCount(0),
@@ -263,6 +276,7 @@
mApiLevel(apiLevel),
mIsNdk(isNdk),
mLatencyMs(latencyMs),
+ mMaxPreviewFps(0),
mSessionType(0),
mInternalReconfigure(0),
mRequestCount(0),
@@ -319,6 +333,12 @@
return err;
}
+ float maxPreviewFps;
+ if ((err = parcel->readFloat(&maxPreviewFps)) != OK) {
+ ALOGE("%s: Failed to read maxPreviewFps from parcel", __FUNCTION__);
+ return err;
+ }
+
int32_t sessionType;
if ((err = parcel->readInt32(&sessionType)) != OK) {
ALOGE("%s: Failed to read session type from parcel", __FUNCTION__);
@@ -362,6 +382,7 @@
mApiLevel = apiLevel;
mIsNdk = isNdk;
mLatencyMs = latencyMs;
+ mMaxPreviewFps = maxPreviewFps;
mSessionType = sessionType;
mInternalReconfigure = internalReconfigure;
mRequestCount = requestCount;
@@ -415,6 +436,11 @@
return err;
}
+ if ((err = parcel->writeFloat(mMaxPreviewFps)) != OK) {
+ ALOGE("%s: Failed to write maxPreviewFps!", __FUNCTION__);
+ return err;
+ }
+
if ((err = parcel->writeInt32(mSessionType)) != OK) {
ALOGE("%s: Failed to write session type!", __FUNCTION__);
return err;
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 5b8da34..11d4960 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -81,7 +81,7 @@
return mDynamicRangeProfile;
}
-int OutputConfiguration::getStreamUseCase() const {
+int64_t OutputConfiguration::getStreamUseCase() const {
return mStreamUseCase;
}
@@ -192,8 +192,8 @@
return err;
}
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
- if ((err = parcel->readInt32(&streamUseCase)) != OK) {
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+ if ((err = parcel->readInt64(&streamUseCase)) != OK) {
ALOGE("%s: Failed to read stream use case from parcel", __FUNCTION__);
return err;
}
@@ -232,8 +232,8 @@
mDynamicRangeProfile = dynamicProfile;
ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d,"
- " physicalCameraId = %s, isMultiResolution = %d, streamUseCase = %d, timestampBase = %d,"
- " mirrorMode = %d",
+ " physicalCameraId = %s, isMultiResolution = %d, streamUseCase = %" PRId64
+ ", timestampBase = %d, mirrorMode = %d",
__FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType,
String8(mPhysicalCameraId).string(), mIsMultiResolution, mStreamUseCase, timestampBase,
mMirrorMode);
@@ -317,7 +317,7 @@
err = parcel->writeInt64(mDynamicRangeProfile);
if (err != OK) return err;
- err = parcel->writeInt32(mStreamUseCase);
+ err = parcel->writeInt64(mStreamUseCase);
if (err != OK) return err;
err = parcel->writeInt32(mTimestampBase);
diff --git a/camera/include/camera/CameraSessionStats.h b/camera/include/camera/CameraSessionStats.h
index 26dc70c..15f5622 100644
--- a/camera/include/camera/CameraSessionStats.h
+++ b/camera/include/camera/CameraSessionStats.h
@@ -37,6 +37,7 @@
int mWidth;
int mHeight;
int mFormat;
+ float mMaxPreviewFps;
int mDataSpace;
int64_t mUsage;
@@ -65,20 +66,20 @@
// Dynamic range profile
int64_t mDynamicRangeProfile;
// Stream use case
- int mStreamUseCase;
+ int64_t mStreamUseCase;
CameraStreamStats() :
- mWidth(0), mHeight(0), mFormat(0), mDataSpace(0), mUsage(0),
+ mWidth(0), mHeight(0), mFormat(0), mMaxPreviewFps(0), mDataSpace(0), mUsage(0),
mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
mMaxHalBuffers(0), mMaxAppBuffers(0), mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
mDynamicRangeProfile(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD),
mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {}
- CameraStreamStats(int width, int height, int format, int dataSpace, int64_t usage,
- int maxHalBuffers, int maxAppBuffers, int dynamicRangeProfile,
+ CameraStreamStats(int width, int height, int format, float maxPreviewFps, int dataSpace,
+ int64_t usage, int maxHalBuffers, int maxAppBuffers, int dynamicRangeProfile,
int streamUseCase)
- : mWidth(width), mHeight(height), mFormat(format), mDataSpace(dataSpace),
- mUsage(usage), mRequestCount(0), mErrorCount(0), mStartLatencyMs(0),
- mMaxHalBuffers(maxHalBuffers), mMaxAppBuffers(maxAppBuffers),
+ : mWidth(width), mHeight(height), mFormat(format), mMaxPreviewFps(maxPreviewFps),
+ mDataSpace(dataSpace), mUsage(usage), mRequestCount(0), mErrorCount(0),
+ mStartLatencyMs(0), mMaxHalBuffers(maxHalBuffers), mMaxAppBuffers(maxAppBuffers),
mHistogramType(HISTOGRAM_TYPE_UNKNOWN),
mDynamicRangeProfile(dynamicRangeProfile),
mStreamUseCase(streamUseCase) {}
@@ -123,6 +124,7 @@
bool mIsNdk;
// latency in ms for camera open, close, or session creation.
int mLatencyMs;
+ float mMaxPreviewFps;
// Session info and statistics
int mSessionType;
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index 6b0f333..b842885 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -63,7 +63,7 @@
bool isShared() const;
String16 getPhysicalCameraId() const;
bool isMultiResolution() const;
- int getStreamUseCase() const;
+ int64_t getStreamUseCase() const;
int getTimestampBase() const;
int getMirrorMode() const;
@@ -185,7 +185,7 @@
bool mIsMultiResolution;
std::vector<int32_t> mSensorPixelModesUsed;
int64_t mDynamicRangeProfile;
- int mStreamUseCase;
+ int64_t mStreamUseCase;
int mTimestampBase;
int mMirrorMode;
};
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 4c492f0..4891034 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -4216,7 +4216,7 @@
/**
* <p>The stream use cases supported by this camera device.</p>
*
- * <p>Type: int32[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)</p>
+ * <p>Type: int64[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)</p>
*
* <p>This tag may appear in:
* <ul>
@@ -4260,7 +4260,7 @@
* reprocessable session, constrained high speed session, or RAW stream combinations, the
* application should leave stream use cases within the session as DEFAULT.</p>
*/
- ACAMERA_SCALER_AVAILABLE_STREAM_USE_CASES = // int32[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)
+ ACAMERA_SCALER_AVAILABLE_STREAM_USE_CASES = // int64[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)
ACAMERA_SCALER_START + 25,
ACAMERA_SCALER_END,
diff --git a/drm/libmediadrm/CryptoHalAidl.cpp b/drm/libmediadrm/CryptoHalAidl.cpp
index 3dc62e9..bda664a 100644
--- a/drm/libmediadrm/CryptoHalAidl.cpp
+++ b/drm/libmediadrm/CryptoHalAidl.cpp
@@ -353,7 +353,9 @@
err = statusAidlToStatusT(statusAidl);
std::string msgStr(statusAidl.getMessage());
- *errorDetailMsg = toString8(msgStr);
+ if (errorDetailMsg != nullptr) {
+ *errorDetailMsg = toString8(msgStr);
+ }
if (err != OK) {
ALOGE("Failed on decrypt, error description:%s", statusAidl.getDescription().c_str());
return err;
@@ -415,4 +417,4 @@
return DrmUtils::GetLogMessagesAidl<ICryptoPluginAidl>(mPlugin, logs);
}
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/drm/libmediadrm/CryptoHalHidl.cpp b/drm/libmediadrm/CryptoHalHidl.cpp
index cbb6ddf..a290704 100644
--- a/drm/libmediadrm/CryptoHalHidl.cpp
+++ b/drm/libmediadrm/CryptoHalHidl.cpp
@@ -342,7 +342,9 @@
[&](Status_V1_2 status, uint32_t hBytesWritten, hidl_string hDetailedError) {
if (status == Status_V1_2::OK) {
bytesWritten = hBytesWritten;
- *errorDetailMsg = toString8(hDetailedError);
+ if (errorDetailMsg != nullptr) {
+ *errorDetailMsg = toString8(hDetailedError);
+ }
}
err = toStatusT(status);
});
@@ -353,7 +355,9 @@
[&](Status status, uint32_t hBytesWritten, hidl_string hDetailedError) {
if (status == Status::OK) {
bytesWritten = hBytesWritten;
- *errorDetailMsg = toString8(hDetailedError);
+ if (errorDetailMsg != nullptr) {
+ *errorDetailMsg = toString8(hDetailedError);
+ }
}
err = toStatusT(status);
});
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index aa40793..c394d5a 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -286,4 +286,11 @@
return mDrmHalHidl->getLogMessages(logs);
}
+status_t DrmHal::getSupportedSchemes(std::vector<uint8_t> &schemes) const {
+ status_t statusResult;
+ statusResult = mDrmHalAidl->getSupportedSchemes(schemes);
+ if (statusResult == OK) return statusResult;
+ return mDrmHalHidl->getSupportedSchemes(schemes);
+}
+
} // namespace android
diff --git a/drm/libmediadrm/DrmHalAidl.cpp b/drm/libmediadrm/DrmHalAidl.cpp
index 284abd5..bdd83e9 100644
--- a/drm/libmediadrm/DrmHalAidl.cpp
+++ b/drm/libmediadrm/DrmHalAidl.cpp
@@ -1189,6 +1189,25 @@
return serializedMetrics;
}
+status_t DrmHalAidl::getSupportedSchemes(std::vector<uint8_t> &schemes) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mFactories.empty()) return UNKNOWN_ERROR;
+ for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
+ CryptoSchemes curSchemes{};
+ auto err = mFactories[i]->getSupportedCryptoSchemes(&curSchemes);
+ if (!err.isOk()) {
+ continue;
+ }
+
+ for (auto uuidObj : curSchemes.uuids) {
+ schemes.insert(schemes.end(), uuidObj.uuid.begin(), uuidObj.uuid.end());
+ }
+ }
+
+ return OK;
+}
+
void DrmHalAidl::cleanup() {
closeOpenSessions();
diff --git a/drm/libmediadrm/DrmHalHidl.cpp b/drm/libmediadrm/DrmHalHidl.cpp
index c83b52b..c38dbef 100644
--- a/drm/libmediadrm/DrmHalHidl.cpp
+++ b/drm/libmediadrm/DrmHalHidl.cpp
@@ -20,6 +20,7 @@
#include <aidl/android/media/BnResourceManagerClient.h>
#include <android/binder_manager.h>
#include <android/hardware/drm/1.2/types.h>
+#include <android/hardware/drm/1.3/IDrmFactory.h>
#include <android/hidl/manager/1.2/IServiceManager.h>
#include <hidl/ServiceManagement.h>
#include <media/EventMetric.h>
@@ -1514,4 +1515,23 @@
return DrmUtils::GetLogMessages<drm::V1_4::IDrmPlugin>(mPlugin, logs);
}
+status_t DrmHalHidl::getSupportedSchemes(std::vector<uint8_t> &schemes) const {
+ Mutex::Autolock autoLock(mLock);
+ for (auto &factory : mFactories) {
+ sp<drm::V1_3::IDrmFactory> factoryV1_3 = drm::V1_3::IDrmFactory::castFrom(factory);
+ if (factoryV1_3 == nullptr) {
+ continue;
+ }
+
+ factoryV1_3->getSupportedCryptoSchemes(
+ [&](const hardware::hidl_vec<hardware::hidl_array<uint8_t, 16>>& schemes_hidl) {
+ for (const auto &scheme : schemes_hidl) {
+ schemes.insert(schemes.end(), scheme.data(), scheme.data() + scheme.size());
+ }
+ });
+ }
+
+ return OK;
+}
+
} // namespace android
diff --git a/drm/libmediadrm/DrmUtils.cpp b/drm/libmediadrm/DrmUtils.cpp
index 731755b..be0cd4b 100644
--- a/drm/libmediadrm/DrmUtils.cpp
+++ b/drm/libmediadrm/DrmUtils.cpp
@@ -177,7 +177,7 @@
[](const char* instance, void* context) {
auto fullName = std::string(IDrmFactoryAidl::descriptor) + "/" + std::string(instance);
auto factory = IDrmFactoryAidl::fromBinder(
- ::ndk::SpAIBinder(AServiceManager_getService(fullName.c_str())));
+ ::ndk::SpAIBinder(AServiceManager_waitForService(fullName.c_str())));
if (factory == nullptr) {
ALOGE("not found IDrmFactory. Instance name:[%s]", fullName.c_str());
return;
diff --git a/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp b/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
index eabd41f..597b72d 100644
--- a/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
+++ b/drm/libmediadrm/fuzzer/mediadrm_fuzzer.cpp
@@ -20,6 +20,7 @@
#include <binder/MemoryDealer.h>
#include <hidlmemory/FrameworkUtils.h>
+#include <media/stagefright/foundation/AString.h>
#include <mediadrm/CryptoHal.h>
#include <mediadrm/DrmHal.h>
#include <utils/String8.h>
@@ -401,7 +402,7 @@
.secureMemory = nullptr};
const uint64_t offset = 0;
- AString *errorDetailMsg = nullptr;
+ AString errorDetailMsg;
CryptoPlugin::Mode mode;
bool shouldPassRandomCryptoMode = mFuzzedDataProvider->ConsumeBool();
if (shouldPassRandomCryptoMode) {
@@ -411,7 +412,7 @@
kCryptoMode[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(0, kNumCryptoMode - 1)];
}
mCrypto->decrypt(keyId, iv, mode, pattern, sourceBuffer, offset, subSamples, numSubSamples,
- destBuffer, errorDetailMsg);
+ destBuffer, &errorDetailMsg);
if (heapSeqNum >= 0) {
mCrypto->unsetHeap(heapSeqNum);
diff --git a/drm/libmediadrm/include/mediadrm/DrmHal.h b/drm/libmediadrm/include/mediadrm/DrmHal.h
index f5e75ac..eab597b 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHal.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHal.h
@@ -117,6 +117,7 @@
Vector<uint8_t> const &sessionId,
const char *playbackId);
virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
+ virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const;
private:
sp<IDrm> mDrmHalHidl;
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalAidl.h b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
index e35140e..0f51ce9 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
@@ -105,6 +105,7 @@
bool* required) const;
virtual status_t setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId);
virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const;
+ virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const;
::ndk::ScopedAStatus onEvent(EventTypeAidl in_eventType,
const std::vector<uint8_t>& in_sessionId,
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalHidl.h b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
index 94ef285..11f0608 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
@@ -184,6 +184,7 @@
const char *playbackId);
virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
+ virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const;
// Methods of IDrmPluginListener
Return<void> sendEvent(EventType eventType,
diff --git a/drm/libmediadrm/include/mediadrm/IDrm.h b/drm/libmediadrm/include/mediadrm/IDrm.h
index a88784d..ee2be6a 100644
--- a/drm/libmediadrm/include/mediadrm/IDrm.h
+++ b/drm/libmediadrm/include/mediadrm/IDrm.h
@@ -165,6 +165,8 @@
virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const = 0;
+ virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const = 0;
+
protected:
IDrm() {}
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 66fe488..980ce55 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -31,6 +31,7 @@
#include <chrono>
#include <cstddef>
#include <cstdint>
+#include <cstring>
#include <ctime>
#include <deque>
#include <endian.h>
@@ -100,7 +101,8 @@
template <typename... Args>
void LogToBuffer(android_LogPriority level, const uint8_t uuid[16], const char *fmt, Args... args) {
- const uint64_t* uuid2 = reinterpret_cast<const uint64_t*>(uuid);
+ uint64_t uuid2[2] = {};
+ std::memcpy(uuid2, uuid, sizeof(uuid2));
std::string uuidFmt("uuid=[%lx %lx] ");
uuidFmt += fmt;
LogToBuffer(level, uuidFmt.c_str(), htobe64(uuid2[0]), htobe64(uuid2[1]), args...);
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index fd4ef95..9919e90 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,19 +1,10 @@
{
"presubmit": [
{
- "name": "CtsMediaDrmTestCases",
+ "name": "CtsMediaDrmFrameworkTestCases",
"options" : [
{
"include-annotation": "android.platform.test.annotations.Presubmit"
- },
- {
- "include-filter": "android.mediadrm.cts.MediaDrmClearkeyTest"
- },
- {
- "include-filter": "android.mediadrm.cts.MediaDrmMetricsTest"
- },
- {
- "include-filter": "android.mediadrm.cts.NativeMediaDrmClearkeyTest"
}
]
}
diff --git a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
index 7331ded..ea51e9d 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
@@ -28,6 +28,7 @@
#include "DrmPlugin.h"
#include "Session.h"
#include "Utils.h"
+#include "AidlClearKeryProperties.h"
namespace {
const std::string kKeySetIdPrefix("ckid");
@@ -81,12 +82,13 @@
void DrmPlugin::initProperties() {
mStringProperties.clear();
- mStringProperties[kVendorKey] = kVendorValue;
- mStringProperties[kVersionKey] = kVersionValue;
- mStringProperties[kPluginDescriptionKey] = kPluginDescriptionValue;
- mStringProperties[kAlgorithmsKey] = kAlgorithmsValue;
- mStringProperties[kListenerTestSupportKey] = kListenerTestSupportValue;
- mStringProperties[kDrmErrorTestKey] = kDrmErrorTestValue;
+ mStringProperties[kVendorKey] = kAidlVendorValue;
+ mStringProperties[kVersionKey] = kAidlVersionValue;
+ mStringProperties[kPluginDescriptionKey] = kAidlPluginDescriptionValue;
+ mStringProperties[kAlgorithmsKey] = kAidlAlgorithmsValue;
+ mStringProperties[kListenerTestSupportKey] = kAidlListenerTestSupportValue;
+ mStringProperties[kDrmErrorTestKey] = kAidlDrmErrorTestValue;
+ mStringProperties[kAidlVersionKey] = kAidlVersionValue;
std::vector<uint8_t> valueVector;
valueVector.clear();
@@ -377,6 +379,8 @@
value = mStringProperties[kListenerTestSupportKey];
} else if (name == kDrmErrorTestKey) {
value = mStringProperties[kDrmErrorTestKey];
+ } else if (name == kAidlVersionKey) {
+ value = mStringProperties[kAidlVersionKey];
} else {
ALOGE("App requested unknown string property %s", name.c_str());
status = Status::ERROR_DRM_CANNOT_HANDLE;
diff --git a/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
index 019c726..c87aabc 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/aidl/android.hardware.drm-service-lazy.clearkey.rc
@@ -1,9 +1,9 @@
-service vendor.drm-clearkey-service /vendor/bin/hw/android.hardware.drm-service.clearkey
+service vendor.drm-clearkey-service /vendor/bin/hw/android.hardware.drm-service-lazy.clearkey
+ oneshot
disabled
class hal
user media
group mediadrm drmrpc
ioprio rt 4
task_profiles ProcessCapacityHigh
- interface aidl android.hardware.drm.IDrmFactory/clearkey
- interface aidl android.hardware.drm.ICryptoFactory/clearkey
+ interface aidl android.hardware.drm.IDrmFactory/clearkey
\ No newline at end of file
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/AidlClearKeryProperties.h b/drm/mediadrm/plugins/clearkey/aidl/include/AidlClearKeryProperties.h
new file mode 100644
index 0000000..fb2cceb
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/AidlClearKeryProperties.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AIDL_CLEARKEY_PROPERTIES_H
+#define AIDL_CLEARKEY_PROPERTIES_H
+#include <string>
+
+namespace clearkeydrm {
+static const std::string kAidlVendorValue("Google");
+static const std::string kAidlVersionValue("aidl-1");
+static const std::string kAidlPluginDescriptionValue("ClearKey CDM");
+static const std::string kAidlAlgorithmsValue("");
+static const std::string kAidlListenerTestSupportValue("true");
+
+static const std::string kAidlDrmErrorTestValue("");
+static const std::string kAidlResourceContentionValue("resourceContention");
+static const std::string kAidlLostStateValue("lostState");
+static const std::string kAidlFrameTooLargeValue("frameTooLarge");
+static const std::string kAidlInvalidStateValue("invalidState");
+} // namespace clearkeydrm
+
+#endif
\ No newline at end of file
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
index 9a22633..bfda388 100644
--- a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
@@ -34,6 +34,7 @@
static const std::string kLostStateValue("lostState");
static const std::string kFrameTooLargeValue("frameTooLarge");
static const std::string kInvalidStateValue("invalidState");
+static const std::string kAidlVersionKey("aidlVersion");
static const std::string kDeviceIdKey("deviceId");
static const uint8_t kTestDeviceIdData[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 16e794a..d4025e5 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -38,13 +38,13 @@
VideoFrame(uint32_t width, uint32_t height,
uint32_t displayWidth, uint32_t displayHeight,
uint32_t tileWidth, uint32_t tileHeight,
- uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
+ uint32_t angle, uint32_t bpp, uint32_t bitDepth, bool hasData, size_t iccSize):
mWidth(width), mHeight(height),
mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0),
mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
mSize(hasData ? (bpp * width * height) : 0),
- mIccSize(iccSize), mReserved(0) {
+ mIccSize(iccSize), mBitDepth(bitDepth) {
}
void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
@@ -84,7 +84,9 @@
uint32_t mRowBytes; // Number of bytes per row before rotation
uint32_t mSize; // Number of bytes of frame data
uint32_t mIccSize; // Number of bytes of ICC data
- uint32_t mReserved; // (padding to make mData 64-bit aligned)
+ uint32_t mBitDepth; // number of bits per R / G / B channel
+
+ // Adding new items must be 64-bit aligned.
};
}; // namespace android
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
index bb63e1f..7afea91 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
@@ -225,7 +225,7 @@
work->result = C2_CORRUPTED;
return;
}
- uint64_t outTimeStamp =
+ int64_t outTimeStamp =
mProcessedSamples * 1000000ll / mIntf->getSampleRate();
size_t inPos = 0;
size_t outPos = 0;
@@ -266,7 +266,7 @@
ALOGV("causal sample size %d", mFilledLen);
if (mIsFirst && outPos != 0) {
mIsFirst = false;
- mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
+ mAnchorTimeStamp = work->input.ordinal.timestamp.peekll();
}
fillEmptyWork(work);
if (outPos != 0) {
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h
index 6ab14db..4920b23 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h
@@ -54,7 +54,7 @@
bool mIsFirst;
bool mSignalledError;
bool mSignalledOutputEos;
- uint64_t mAnchorTimeStamp;
+ int64_t mAnchorTimeStamp;
uint64_t mProcessedSamples;
int32_t mFilledLen;
int16_t mInputFrame[kNumSamplesPerFrame];
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
index 84728ae..29b1040 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
@@ -307,7 +307,7 @@
work->result = wView.error();
return;
}
- uint64_t outTimeStamp =
+ int64_t outTimeStamp =
mProcessedSamples * 1000000ll / mIntf->getSampleRate();
size_t inPos = 0;
size_t outPos = 0;
@@ -341,7 +341,7 @@
ALOGV("causal sample size %d", mFilledLen);
if (mIsFirst && outPos != 0) {
mIsFirst = false;
- mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
+ mAnchorTimeStamp = work->input.ordinal.timestamp.peekll();
}
fillEmptyWork(work);
if (outPos != 0) {
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h
index 0cc9e9f..72990c3 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h
@@ -55,7 +55,7 @@
bool mIsFirst;
bool mSignalledError;
bool mSignalledOutputEos;
- uint64_t mAnchorTimeStamp;
+ int64_t mAnchorTimeStamp;
uint64_t mProcessedSamples;
int32_t mFilledLen;
int16_t mInputFrame[kNumSamplesPerFrame];
diff --git a/media/codec2/components/avc/Android.bp b/media/codec2/components/avc/Android.bp
index 7f82486..a7ae85b 100644
--- a/media/codec2/components/avc/Android.bp
+++ b/media/codec2/components/avc/Android.bp
@@ -18,6 +18,8 @@
static_libs: ["libavcdec"],
srcs: ["C2SoftAvcDec.cpp"],
+
+ export_include_dirs: ["."],
}
cc_library {
@@ -32,6 +34,8 @@
srcs: ["C2SoftAvcEnc.cpp"],
+ export_include_dirs: ["."],
+
cflags: [
"-Wno-unused-variable",
],
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index f1669fd..8c7f8db 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -9,6 +9,16 @@
default_applicable_licenses: ["frameworks_av_license"],
}
+cc_library_headers {
+ name: "libcodec2_soft_common_headers",
+ defaults: ["libcodec2-impl-defaults"],
+ vendor_available: true,
+
+ export_include_dirs: [
+ "include",
+ ],
+}
+
cc_library {
name: "libcodec2_soft_common",
defaults: ["libcodec2-impl-defaults"],
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 434246f..5295822 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -45,23 +45,23 @@
if (isMonochrome) {
// Fill with neutral U/V values.
- for (size_t i = 0; i < height / 2; ++i) {
- memset(dstV, kNeutralUVBitDepth8, width / 2);
- memset(dstU, kNeutralUVBitDepth8, width / 2);
+ for (size_t i = 0; i < (height + 1) / 2; ++i) {
+ memset(dstV, kNeutralUVBitDepth8, (width + 1) / 2);
+ memset(dstU, kNeutralUVBitDepth8, (width + 1) / 2);
dstV += dstUVStride;
dstU += dstUVStride;
}
return;
}
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, srcV, width / 2);
+ for (size_t i = 0; i < (height + 1) / 2; ++i) {
+ memcpy(dstV, srcV, (width + 1) / 2);
srcV += srcVStride;
dstV += dstUVStride;
}
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, srcU, width / 2);
+ for (size_t i = 0; i < (height + 1) / 2; ++i) {
+ memcpy(dstU, srcU, (width + 1) / 2);
srcU += srcUStride;
dstU += dstUVStride;
}
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 2ed8541..e5fbe99 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -55,8 +55,8 @@
DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
.withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
.withFields({
- C2F(mSize, width).inRange(2, 4096, 2),
- C2F(mSize, height).inRange(2, 4096, 2),
+ C2F(mSize, width).inRange(2, 4096),
+ C2F(mSize, height).inRange(2, 4096),
})
.withSetter(SizeSetter)
.build());
@@ -650,8 +650,12 @@
}
C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
- c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight, format,
- usage, &block);
+ // We always create a graphic block that is width aligned to 16 and height
+ // aligned to 2. We set the correct "crop" value of the image in the call to
+ // createGraphicBuffer() by setting the correct image dimensions.
+ c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16),
+ align(mHeight, 2), format, usage,
+ &block);
if (err != C2_OK) {
ALOGE("fetchGraphicBlock for Output failed with status %d", err);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index b7a5686..4f5caec 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -123,7 +123,7 @@
// matches size limits in codec library
addParameter(
DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
- .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 64, 64))
.withFields({
C2F(mSize, width).inRange(2, 1920, 2),
C2F(mSize, height).inRange(2, 1088, 2),
@@ -133,7 +133,7 @@
addParameter(
DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
- .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+ .withDefault(new C2StreamFrameRateInfo::output(0u, 1.))
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
.withSetter(
Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
diff --git a/media/codec2/hidl/1.2/utils/Component.cpp b/media/codec2/hidl/1.2/utils/Component.cpp
index 8924e6d..7994d32 100644
--- a/media/codec2/hidl/1.2/utils/Component.cpp
+++ b/media/codec2/hidl/1.2/utils/Component.cpp
@@ -520,6 +520,37 @@
if (res != C2_OK) {
mInit = res;
}
+
+ struct ListenerDeathRecipient : public HwDeathRecipient {
+ ListenerDeathRecipient(const wp<Component>& comp)
+ : component{comp} {
+ }
+
+ virtual void serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */
+ ) override {
+ auto strongComponent = component.promote();
+ if (strongComponent) {
+ LOG(INFO) << "Client died ! release the component !!";
+ strongComponent->release();
+ } else {
+ LOG(ERROR) << "Client died ! no component to release !!";
+ }
+ }
+
+ wp<Component> component;
+ };
+
+ mDeathRecipient = new ListenerDeathRecipient(self);
+ Return<bool> transStatus = mListener->linkToDeath(
+ mDeathRecipient, 0);
+ if (!transStatus.isOk()) {
+ LOG(ERROR) << "Listener linkToDeath() transaction failed.";
+ }
+ if (!static_cast<bool>(transStatus)) {
+ LOG(DEBUG) << "Listener linkToDeath() call failed.";
+ }
}
Component::~Component() {
diff --git a/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h b/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h
index 7937664..d0972ee 100644
--- a/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h
+++ b/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h
@@ -142,6 +142,10 @@
friend struct ComponentStore;
struct Listener;
+
+ using HwDeathRecipient = ::android::hardware::hidl_death_recipient;
+ sp<HwDeathRecipient> mDeathRecipient;
+
};
} // namespace utils
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index def8a18..2b9ec7d 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -212,9 +212,8 @@
(OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
&usage, sizeof(usage));
- mSource->configure(
- mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace));
- return OK;
+ return GetStatus(mSource->configure(
+ mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace)));
}
void disconnect() override {
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index 674921e..fe63651 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -7,6 +7,17 @@
default_applicable_licenses: ["frameworks_av_license"],
}
+cc_library_headers {
+ name: "libsfplugin_ccodec_utils_headers",
+ vendor_available: true,
+ min_sdk_version: "29",
+ apex_available: [ "//apex_available:platform", "com.android.media.swcodec", ],
+
+ export_include_dirs: [
+ ".",
+ ],
+}
+
cc_library {
name: "libsfplugin_ccodec_utils",
vendor_available: true,
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index 598500d..4047173 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -100,7 +100,6 @@
"libdmabufheap",
"libfmq",
"libgralloctypes",
- "libhardware",
"libhidlbase",
"libion",
"liblog",
@@ -149,7 +148,6 @@
shared_libs: [
"libui",
"libdl",
- "libhardware",
"libvndksupport",
"libprocessgroup",
],
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index b5200a5..d8d6f06 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -265,6 +265,7 @@
for (const PlaneLayoutComponent &component : plane.components) {
if (!gralloc4::isStandardPlaneLayoutComponentType(component.type)) {
+ mapper.unlock(handle);
return C2_CANNOT_DO;
}
@@ -287,6 +288,7 @@
channel = C2PlaneInfo::CHANNEL_CR;
break;
default:
+ mapper.unlock(handle);
return C2_CORRUPTED;
}
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index fb935b6..eccbf46 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -1127,15 +1127,15 @@
void *data;
size_t size;
- if (AMediaFormat_getBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0,
+ if (AMediaFormat_getBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_2,
&data, &size)
- && size >= 24) {
- const uint8_t *ptr = (const uint8_t *)data + (size - 24);
+ && size >= 5) {
+ const uint8_t *ptr = (const uint8_t *)data;
const uint8_t profile = ptr[2] >> 1;
- const uint8_t bl_compatibility_id = (ptr[4]) >> 4;
+ const uint8_t blCompatibilityId = (ptr[4]) >> 4;
bool create_two_tracks = false;
- if (bl_compatibility_id && bl_compatibility_id != 15) {
+ if (blCompatibilityId && blCompatibilityId != 15) {
create_two_tracks = true;
}
@@ -1168,11 +1168,11 @@
mLastTrack->next = track_b;
track_b->next = NULL;
- // we want to remove the csd-0 key from the metadata, but
+ // we want to remove the csd-2 key from the metadata, but
// don't have an AMediaFormat_* function to do so. Settle
- // for replacing this csd-0 with an empty csd-0.
+ // for replacing this csd-2 with an empty csd-2.
uint8_t emptybuffer[8] = {};
- AMediaFormat_setBuffer(track_b->meta, AMEDIAFORMAT_KEY_CSD_0,
+ AMediaFormat_setBuffer(track_b->meta, AMEDIAFORMAT_KEY_CSD_2,
emptybuffer, 0);
if (4 == profile || 7 == profile || 8 == profile ) {
@@ -1184,8 +1184,6 @@
} else if (10 == profile) {
AMediaFormat_setString(track_b->meta,
AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_AV1);
- AMediaFormat_setBuffer(track_b->meta, AMEDIAFORMAT_KEY_CSD_0,
- data, size - 24);
} // Should never get to else part
mLastTrack = track_b;
@@ -2618,22 +2616,8 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- void *data = nullptr;
- size_t size = 0;
- if (AMediaFormat_getBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
- //if csd-0 is already present, then append dvcc
- auto csd0_dvcc = heapbuffer<uint8_t>(size + chunk_data_size);
-
- memcpy(csd0_dvcc.get(), data, size);
- memcpy(csd0_dvcc.get() + size, buffer.get(), chunk_data_size);
-
- AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0,
- csd0_dvcc.get(), size + chunk_data_size);
- } else {
- //if not set csd-0 directly
- AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0,
+ AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_2,
buffer.get(), chunk_data_size);
- }
AMediaFormat_setString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME,
MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
@@ -3501,7 +3485,7 @@
}
unsigned mask = br.getBits(8);
for (unsigned i = 0; i < 8; i++) {
- if (((0x1 << i) && mask) == 0)
+ if (((0x1 << i) & mask) == 0)
continue;
if (br.numBitsLeft() < 8) {
@@ -4511,12 +4495,12 @@
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
void *data;
size_t size;
- if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)
- || size < 24) {
+ if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)
+ || size != 24) {
return NULL;
}
- const uint8_t *ptr = (const uint8_t *)data + (size - 24);
+ const uint8_t *ptr = (const uint8_t *)data;
// dv_major.dv_minor Should be 1.0 or 2.1
if ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1)) {
return NULL;
@@ -4596,7 +4580,7 @@
return ERROR_MALFORMED;
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
- if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
+ if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)) {
return ERROR_MALFORMED;
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
@@ -5172,11 +5156,11 @@
ALOGV("%s DolbyVision stream detected", __FUNCTION__);
void *data;
size_t size;
- CHECK(AMediaFormat_getBuffer(format, AMEDIAFORMAT_KEY_CSD_0, &data, &size));
+ CHECK(AMediaFormat_getBuffer(format, AMEDIAFORMAT_KEY_CSD_2, &data, &size));
- const uint8_t *ptr = (const uint8_t *)data + (size - 24);
+ const uint8_t *ptr = (const uint8_t *)data;
- CHECK(size >= 24);
+ CHECK(size == 24);
// dv_major.dv_minor Should be 1.0 or 2.1
CHECK(!((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1)));
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 8faecae..aa59a0c 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -65,6 +65,7 @@
"libhidlbase",
"libhidlmemory",
"libjsoncpp",
+ "libmedia_helper",
"libprocessgroup",
"libstagefright_esds",
"libstagefright_foundation_without_imemory",
diff --git a/media/janitors/media_leads_OWNERS b/media/janitors/media_leads_OWNERS
new file mode 100644
index 0000000..b7dbdee
--- /dev/null
+++ b/media/janitors/media_leads_OWNERS
@@ -0,0 +1,9 @@
+# gerrit owner/approvers corresponding to the TLs within the media team
+# loosely (as of 2022/3) fgoldfain@ and direct reports
+arifdikici@google.com
+elaurent@google.com
+fgoldfain@google.com #{LAST_RESORT_SUGGESTION}
+lajos@google.com
+nchalko@google.com
+olly@google.com
+robertshih@google.com
diff --git a/media/libaaudio/TEST_MAPPING b/media/libaaudio/TEST_MAPPING
new file mode 100644
index 0000000..3de5a9f
--- /dev/null
+++ b/media/libaaudio/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsNativeMediaAAudioTestCases",
+ "options" : [
+ {
+ "include-filter": "android.nativemedia.aaudio.AAudioTests#AAudioBasic.*"
+ }
+ ]
+ }
+ ]
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index ed31ec9..1e39e0f 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -96,29 +96,8 @@
setFormat(AUDIO_FORMAT_PCM_FLOAT);
}
- // Maybe change device format to get a FAST path.
- // AudioRecord does not support FAST mode for FLOAT data.
- // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
- // So IF the user asks for low latency FLOAT
- // AND the sampleRate is likely to be compatible with FAST
- // THEN request I16 and convert to FLOAT when passing to user.
- // Note that hard coding 48000 Hz is not ideal because the sampleRate
- // for a FAST path might not be 48000 Hz.
- // It normally is but there is a chance that it is not.
- // And there is no reliable way to know that in advance.
- // Luckily the consequences of a wrong guess are minor.
- // We just may not get a FAST track.
- // But we wouldn't have anyway without this hack.
- constexpr int32_t kMostLikelySampleRateForFast = 48000;
- if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
- && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
- && (audio_channel_count_from_in_mask(channelMask) <= 2) // FAST only for mono and stereo
- && (getSampleRate() == kMostLikelySampleRateForFast
- || getSampleRate() == AAUDIO_UNSPECIFIED)) {
- setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
- } else {
- setDeviceFormat(getFormat());
- }
+
+ setDeviceFormat(getFormat());
// To avoid glitching, let AudioFlinger pick the optimal burst size.
uint32_t notificationFrames = 0;
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a5fb394..0871365 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -307,6 +307,8 @@
int32_t maxSharedAudioHistoryMs)
{
status_t status = NO_ERROR;
+ LOG_ALWAYS_FATAL_IF(mInitialized, "%s: should not be called twice", __func__);
+ mInitialized = true;
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"notificationFrames %u, sessionId %d, transferType %d, flags %#x, attributionSource %s"
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 4c2284b..a7b10b2 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1253,24 +1253,9 @@
return result.value_or(PRODUCT_STRATEGY_NONE);
}
-DeviceTypeSet AudioSystem::getDevicesForStream(audio_stream_type_t stream) {
- const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return DeviceTypeSet{};
-
- auto result = [&]() -> ConversionResult<DeviceTypeSet> {
- AudioStreamType streamAidl = VALUE_OR_RETURN(
- legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
- std::vector<AudioDeviceDescription> resultAidl;
- RETURN_IF_ERROR(statusTFromBinderStatus(
- aps->getDevicesForStream(streamAidl, &resultAidl)));
- return convertContainer<DeviceTypeSet>(resultAidl,
- aidl2legacy_AudioDeviceDescription_audio_devices_t);
- }();
- return result.value_or(DeviceTypeSet{});
-}
-
status_t AudioSystem::getDevicesForAttributes(const AudioAttributes& aa,
- AudioDeviceTypeAddrVector* devices) {
+ AudioDeviceTypeAddrVector* devices,
+ bool forVolume) {
if (devices == nullptr) {
return BAD_VALUE;
}
@@ -1281,7 +1266,7 @@
legacy2aidl_AudioAttributes_AudioAttributesEx(aa));
std::vector<AudioDevice> retAidl;
RETURN_STATUS_IF_ERROR(
- statusTFromBinderStatus(aps->getDevicesForAttributes(aaAidl, &retAidl)));
+ statusTFromBinderStatus(aps->getDevicesForAttributes(aaAidl, forVolume, &retAidl)));
*devices = VALUE_OR_RETURN_STATUS(
convertContainer<AudioDeviceTypeAddrVector>(
retAidl,
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index bceca2d..bec6b10 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -277,10 +277,12 @@
{
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
- (void)set(streamType, sampleRate, format, channelMask,
- frameCount, flags, callback, notificationFrames,
- 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
- attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+ // make_unique does not aggregate init until c++20
+ mSetParams = std::unique_ptr<SetParams>{
+ new SetParams{streamType, sampleRate, format, channelMask, frameCount, flags, callback,
+ notificationFrames, 0 /*sharedBuffer*/, false /*threadCanCallJava*/,
+ sessionId, transferType, offloadInfo, attributionSource, pAttributes,
+ doNotReconnect, maxRequiredSpeed, selectedDeviceId}};
}
namespace {
@@ -355,10 +357,11 @@
} else if (user) {
LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
}
- (void)set(streamType, sampleRate, format, channelMask,
- frameCount, flags, mLegacyCallbackWrapper, notificationFrames,
- 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
- attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+ mSetParams = std::unique_ptr<SetParams>{new SetParams{
+ streamType, sampleRate, format, channelMask, frameCount, flags, mLegacyCallbackWrapper,
+ notificationFrames, 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId,
+ transferType, offloadInfo, attributionSource, pAttributes, doNotReconnect,
+ maxRequiredSpeed, selectedDeviceId}};
}
AudioTrack::AudioTrack(
@@ -387,10 +390,11 @@
{
mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
- (void)set(streamType, sampleRate, format, channelMask,
- 0 /*frameCount*/, flags, callback, notificationFrames,
- sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
- attributionSource, pAttributes, doNotReconnect, maxRequiredSpeed);
+ mSetParams = std::unique_ptr<SetParams>{
+ new SetParams{streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
+ callback, notificationFrames, sharedBuffer, false /*threadCanCallJava*/,
+ sessionId, transferType, offloadInfo, attributionSource, pAttributes,
+ doNotReconnect, maxRequiredSpeed, AUDIO_PORT_HANDLE_NONE}};
}
AudioTrack::AudioTrack(
@@ -424,11 +428,18 @@
} else if (user) {
LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
}
+ mSetParams = std::unique_ptr<SetParams>{new SetParams{
+ streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
+ mLegacyCallbackWrapper, notificationFrames, sharedBuffer, false /*threadCanCallJava*/,
+ sessionId, transferType, offloadInfo, attributionSource, pAttributes, doNotReconnect,
+ maxRequiredSpeed, AUDIO_PORT_HANDLE_NONE}};
+}
- (void)set(streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
- mLegacyCallbackWrapper, notificationFrames, sharedBuffer,
- false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, attributionSource,
- pAttributes, doNotReconnect, maxRequiredSpeed);
+void AudioTrack::onFirstRef() {
+ if (mSetParams) {
+ set(*mSetParams);
+ mSetParams.reset();
+ }
}
AudioTrack::~AudioTrack()
@@ -545,7 +556,6 @@
pid_t myPid;
uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
- sp<IAudioTrackCallback> _callback = callback.promote();
std::string errorMessage;
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
@@ -608,7 +618,7 @@
case TRANSFER_DEFAULT:
if (sharedBuffer != 0) {
transferType = TRANSFER_SHARED;
- } else if (_callback == nullptr|| threadCanCallJava) {
+ } else if (callback == nullptr|| threadCanCallJava) {
transferType = TRANSFER_SYNC;
} else {
transferType = TRANSFER_CALLBACK;
@@ -616,7 +626,7 @@
break;
case TRANSFER_CALLBACK:
case TRANSFER_SYNC_NOTIF_CALLBACK:
- if (_callback == nullptr || sharedBuffer != 0) {
+ if (callback == nullptr || sharedBuffer != 0) {
errorMessage = StringPrintf(
"%s: Transfer type %s but callback == nullptr || sharedBuffer != 0",
convertTransferToText(transferType), __func__);
@@ -771,7 +781,7 @@
mAuxEffectId = 0;
mCallback = callback;
- if (_callback != nullptr) {
+ if (callback != nullptr) {
mAudioTrackThread = sp<AudioTrackThread>::make(*this);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
index d8c18c0..3751f80 100644
--- a/media/libaudioclient/TEST_MAPPING
+++ b/media/libaudioclient/TEST_MAPPING
@@ -1,7 +1,15 @@
{
"presubmit": [
{
- "name": "audio_aidl_conversion_tests"
+ "name": "audio_aidl_conversion_tests"
+ },
+ {
+ "name": "CtsNativeMediaAAudioTestCases",
+ "options" : [
+ {
+ "include-filter": "android.nativemedia.aaudio.AAudioTests#AAudioBasic.*"
+ }
+ ]
}
]
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index 6afe023..10da028 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -227,4 +227,9 @@
int getAAudioHardwareBurstMinUsec();
void setDeviceConnectedState(in AudioPort devicePort, boolean connected);
+
+ // When adding a new method, please review and update
+ // IAudioFlinger.h AudioFlingerServerAdapter::Delegate::TransactionCode
+ // AudioFlinger.cpp AudioFlinger::onTransactWrapper()
+ // AudioFlinger.cpp IAUDIOFLINGER_BINDER_METHOD_MACRO_LIST
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index f10c5d0..8ac89a8 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -137,9 +137,7 @@
int /* product_strategy_t */ getStrategyForStream(AudioStreamType stream);
- AudioDeviceDescription[] getDevicesForStream(AudioStreamType stream);
-
- AudioDevice[] getDevicesForAttributes(in AudioAttributesEx attr);
+ AudioDevice[] getDevicesForAttributes(in AudioAttributesEx attr, boolean forVolume);
int /* audio_io_handle_t */ getOutputForEffect(in EffectDescriptor desc);
@@ -392,4 +390,8 @@
* for the specified audio attributes.
*/
AudioProfile[] getDirectProfilesForAttributes(in AudioAttributesInternal attr);
+
+ // When adding a new method, please review and update
+ // AudioPolicyService.cpp AudioPolicyService::onTransact()
+ // AudioPolicyService.cpp IAUDIOPOLICYSERVICE_BINDER_METHOD_MACRO_LIST
}
diff --git a/media/libaudioclient/aidl/android/media/IEffect.aidl b/media/libaudioclient/aidl/android/media/IEffect.aidl
index 813cd5c..6ec0405 100644
--- a/media/libaudioclient/aidl/android/media/IEffect.aidl
+++ b/media/libaudioclient/aidl/android/media/IEffect.aidl
@@ -62,4 +62,8 @@
* TODO(ytai): Explain how this should be used exactly.
*/
SharedFileRegion getCblk();
+
+ // When adding a new method, please review and update
+ // Effects.cpp AudioFlinger::EffectHandle::onTransact()
+ // Effects.cpp IEFFECT_BINDER_METHOD_MACRO_LIST
}
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index faea716..cb05dd9 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -740,6 +740,7 @@
wp<IAudioRecordCallback> mCallback;
sp<IAudioRecordCallback> mLegacyCallbackWrapper;
+ bool mInitialized = false; // Protect against double set
// for notification APIs
uint32_t mNotificationFramesReq; // requested number of frames between each
// notification callback
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index a1fb125..e89ce15 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -328,9 +328,9 @@
static status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
static product_strategy_t getStrategyForStream(audio_stream_type_t stream);
- static DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
static status_t getDevicesForAttributes(const AudioAttributes &aa,
- AudioDeviceTypeAddrVector *devices);
+ AudioDeviceTypeAddrVector *devices,
+ bool forVolume);
static audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc);
static status_t registerEffect(const effect_descriptor_t *desc,
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 1708cc7..1cf6ef9 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -458,6 +458,38 @@
float maxRequiredSpeed = 1.0f,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+ struct SetParams {
+ audio_stream_type_t streamType;
+ uint32_t sampleRate;
+ audio_format_t format;
+ audio_channel_mask_t channelMask;
+ size_t frameCount;
+ audio_output_flags_t flags;
+ wp<IAudioTrackCallback> callback;
+ int32_t notificationFrames;
+ sp<IMemory> sharedBuffer;
+ bool threadCanCallJava;
+ audio_session_t sessionId;
+ transfer_type transferType;
+ // TODO don't take pointers here
+ const audio_offload_info_t *offloadInfo;
+ AttributionSourceState attributionSource;
+ const audio_attributes_t* pAttributes;
+ bool doNotReconnect;
+ float maxRequiredSpeed;
+ audio_port_handle_t selectedDeviceId;
+ };
+ private:
+ // Note: Consumes parameters
+ void set(SetParams& s) {
+ (void)set(s.streamType, s.sampleRate, s.format, s.channelMask, s.frameCount,
+ s.flags, std::move(s.callback), s.notificationFrames,
+ std::move(s.sharedBuffer), s.threadCanCallJava, s.sessionId,
+ s.transferType, s.offloadInfo, std::move(s.attributionSource),
+ s.pAttributes, s.doNotReconnect, s.maxRequiredSpeed, s.selectedDeviceId);
+ }
+ void onFirstRef() override;
+ public:
status_t set(audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
@@ -1349,6 +1381,8 @@
wp<IAudioTrackCallback> mCallback; // callback handler for events, or NULL
sp<IAudioTrackCallback> mLegacyCallbackWrapper; // wrapper for legacy callback interface
// for notification APIs
+ std::unique_ptr<SetParams> mSetParams; // Temporary copy of ctor params to allow for
+ // deferred set after first reference.
bool mInitialized = false; // Set after track is initialized
// next 2 fields are const after constructor or set()
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index e047378..3c3715d 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -482,9 +482,9 @@
* Legacy server should implement this interface in order to be wrapped.
*/
class Delegate : public IAudioFlinger {
- protected:
friend class AudioFlingerServerAdapter;
-
+ public:
+ // expose the TransactionCode enum for TimeCheck purposes.
enum class TransactionCode {
CREATE_TRACK = media::BnAudioFlingerService::TRANSACTION_createTrack,
CREATE_RECORD = media::BnAudioFlingerService::TRANSACTION_createRecord,
@@ -553,6 +553,7 @@
SET_DEVICE_CONNECTED_STATE = media::BnAudioFlingerService::TRANSACTION_setDeviceConnectedState,
};
+ protected:
/**
* And optional hook, called on every transaction, allowing additional operations to be
* performed before/after the unparceling ofthe data and dispatching to the respective
diff --git a/media/libaudiofoundation/TEST_MAPPING b/media/libaudiofoundation/TEST_MAPPING
index f6d249a..efe8437 100644
--- a/media/libaudiofoundation/TEST_MAPPING
+++ b/media/libaudiofoundation/TEST_MAPPING
@@ -1,7 +1,15 @@
{
"presubmit": [
{
- "name": "audiofoundation_parcelable_test"
+ "name": "audiofoundation_parcelable_test"
+ },
+ {
+ "name": "CtsNativeMediaAAudioTestCases",
+ "options" : [
+ {
+ "include-filter": "android.nativemedia.aaudio.AAudioTests#AAudioBasic.*"
+ }
+ ]
}
]
}
diff --git a/media/libaudiohal/TEST_MAPPING b/media/libaudiohal/TEST_MAPPING
new file mode 100644
index 0000000..3de5a9f
--- /dev/null
+++ b/media/libaudiohal/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsNativeMediaAAudioTestCases",
+ "options" : [
+ {
+ "include-filter": "android.nativemedia.aaudio.AAudioTests#AAudioBasic.*"
+ }
+ ]
+ }
+ ]
+}
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index dd435fe..4002fbf 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -143,8 +143,9 @@
":audio_core_hal_client_sources",
":audio_effect_hal_client_sources",
],
- shared_libs: [
+ static_libs: [
"android.hardware.audio.common@7.0",
+ "android.hardware.audio.common@7.0-enums",
"android.hardware.audio.common@7.0-util",
"android.hardware.audio.effect@7.0",
"android.hardware.audio.effect@7.0-util",
@@ -164,8 +165,9 @@
srcs: [
":audio_core_hal_client_sources",
],
- shared_libs: [
+ static_libs: [
"android.hardware.audio.common@7.0",
+ "android.hardware.audio.common@7.1-enums",
"android.hardware.audio.common@7.1-util",
"android.hardware.audio@7.0",
"android.hardware.audio@7.1",
diff --git a/media/libaudioprocessing/TEST_MAPPING b/media/libaudioprocessing/TEST_MAPPING
new file mode 100644
index 0000000..3de5a9f
--- /dev/null
+++ b/media/libaudioprocessing/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsNativeMediaAAudioTestCases",
+ "options" : [
+ {
+ "include-filter": "android.nativemedia.aaudio.AAudioTests#AAudioBasic.*"
+ }
+ ]
+ }
+ ]
+}
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 041b427..1b8656d 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -47,6 +47,7 @@
info->mRotationAngle = videoFrame->mRotationAngle;
info->mBytesPerPixel = videoFrame->mBytesPerPixel;
info->mDurationUs = videoFrame->mDurationUs;
+ info->mBitDepth = videoFrame->mBitDepth;
if (videoFrame->mIccSize > 0) {
info->mIccData.assign(
videoFrame->getFlattenedIccData(),
@@ -377,13 +378,14 @@
// issue (e.g. by copying).
VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->unsecurePointer());
- ALOGV("Image dimension %dx%d, display %dx%d, angle %d, iccSize %d",
+ ALOGV("Image dimension %dx%d, display %dx%d, angle %d, iccSize %d, bitDepth %d",
videoFrame->mWidth,
videoFrame->mHeight,
videoFrame->mDisplayWidth,
videoFrame->mDisplayHeight,
videoFrame->mRotationAngle,
- videoFrame->mIccSize);
+ videoFrame->mIccSize,
+ videoFrame->mBitDepth);
initFrameInfo(&mImageInfo, videoFrame);
@@ -729,4 +731,13 @@
return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
}
+uint32_t HeifDecoderImpl::getColorDepth() {
+ HeifFrameInfo* info = &mImageInfo;
+ if (info != nullptr) {
+ return mImageInfo.mBitDepth;
+ }
+
+ return 0;
+}
+
} // namespace android
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index 2b9c710..86a8628 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -54,6 +54,8 @@
size_t skipScanlines(size_t count) override;
+ uint32_t getColorDepth() override;
+
private:
struct DecodeThread;
diff --git a/media/libheif/include/HeifDecoderAPI.h b/media/libheif/include/HeifDecoderAPI.h
index fa51aef..dc12486 100644
--- a/media/libheif/include/HeifDecoderAPI.h
+++ b/media/libheif/include/HeifDecoderAPI.h
@@ -46,7 +46,8 @@
uint32_t mHeight;
int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
uint32_t mBytesPerPixel; // Number of bytes for one pixel
- int64_t mDurationUs; // Duration of the frame in us
+ int64_t mDurationUs; // Duration of the frame in us
+ uint32_t mBitDepth; // Number of bits for each of the R/G/B channels
std::vector<uint8_t> mIccData; // ICC data array
};
@@ -162,6 +163,11 @@
*/
virtual size_t skipScanlines(size_t count) = 0;
+ /*
+ * Returns color depth in bits for each of the R/G/B channels.
+ */
+ virtual uint32_t getColorDepth() = 0;
+
private:
HeifDecoder(const HeifFrameInfo&) = delete;
HeifDecoder& operator=(const HeifFrameInfo&) = delete;
diff --git a/media/libmediahelper/Android.bp b/media/libmediahelper/Android.bp
index a433fc6..b9d795d 100644
--- a/media/libmediahelper/Android.bp
+++ b/media/libmediahelper/Android.bp
@@ -29,6 +29,7 @@
cc_library {
name: "libmedia_helper",
vendor_available: true,
+ min_sdk_version: "29",
vndk: {
enabled: true,
},
@@ -58,4 +59,9 @@
enabled: false,
},
},
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.media",
+ "test_com.android.media",
+ ],
}
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index 4247375..90472eb 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -140,6 +140,8 @@
#define AMEDIAMETRICS_PROP_INTERVALCOUNT "intervalCount" // int32
#define AMEDIAMETRICS_PROP_LATENCYMS "latencyMs" // double value
#define AMEDIAMETRICS_PROP_LOGSESSIONID "logSessionId" // hex string, "" none
+#define AMEDIAMETRICS_PROP_METHODCODE "methodCode" // int64_t an int indicating method
+#define AMEDIAMETRICS_PROP_METHODNAME "methodName" // string method name
#define AMEDIAMETRICS_PROP_NAME "name" // string value
#define AMEDIAMETRICS_PROP_ORIGINALFLAGS "originalFlags" // int32
#define AMEDIAMETRICS_PROP_OUTPUTDEVICES "outputDevices" // string value
@@ -224,6 +226,7 @@
#define AMEDIAMETRICS_PROP_EVENT_VALUE_SETVOLUME "setVolume" // AudioTrack
#define AMEDIAMETRICS_PROP_EVENT_VALUE_START "start" // AudioTrack, AudioRecord
#define AMEDIAMETRICS_PROP_EVENT_VALUE_STOP "stop" // AudioTrack, AudioRecord
+#define AMEDIAMETRICS_PROP_EVENT_VALUE_TIMEOUT "timeout" // AudioFlinger, AudioPolicy
#define AMEDIAMETRICS_PROP_EVENT_VALUE_UNDERRUN "underrun" // from Thread
// Possible values for AMEDIAMETRICS_PROP_CALLERNAME
diff --git a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index 609298f..55b1ed7 100644
--- a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -28,6 +28,7 @@
#include <datasource/PlayerServiceDataSourceFactory.h>
#include <datasource/PlayerServiceFileSource.h>
#include <media/IMediaHTTPService.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaCodecList.h>
@@ -194,17 +195,6 @@
return NULL;
}
- if (metaOnly) {
- return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
- }
-
- sp<IMediaSource> source = mExtractor->getTrack(i);
-
- if (source.get() == NULL) {
- ALOGE("unable to instantiate image track.");
- return NULL;
- }
-
const char *mime;
bool isHeif = false;
if (!trackMeta->findCString(kKeyMIMEType, &mime)) {
@@ -223,16 +213,47 @@
trackMeta->setCString(kKeyMIMEType, mime);
}
- bool preferhw = property_get_bool(
- "media.stagefright.thumbnail.prefer_hw_codecs", false);
- uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
- Vector<AString> matchingCodecs;
sp<AMessage> format = new AMessage;
status_t err = convertMetaDataToMessage(trackMeta, &format);
if (err != OK) {
format = NULL;
}
+ uint32_t bitDepth = 8;
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ int32_t profile;
+ if (format->findInt32("profile", &profile)) {
+ if (HEVCProfileMain10 == profile || HEVCProfileMain10HDR10 == profile ||
+ HEVCProfileMain10HDR10Plus == profile) {
+ bitDepth = 10;
+ }
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+ int32_t profile;
+ if (format->findInt32("profile", &profile)) {
+ if (AV1ProfileMain10 == profile || AV1ProfileMain10HDR10 == profile ||
+ AV1ProfileMain10HDR10Plus == profile) {
+ bitDepth = 10;
+ }
+ }
+ }
+
+ if (metaOnly) {
+ return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail, bitDepth);
+ }
+
+ sp<IMediaSource> source = mExtractor->getTrack(i);
+
+ if (source.get() == NULL) {
+ ALOGE("unable to instantiate image track.");
+ return NULL;
+ }
+
+ bool preferhw = property_get_bool(
+ "media.stagefright.thumbnail.prefer_hw_codecs", false);
+ uint32_t flags = preferhw ? 0 : MediaCodecList::kPreferSoftwareCodecs;
+ Vector<AString> matchingCodecs;
+
// If decoding thumbnail check decoder supports thumbnail dimensions instead
int32_t thumbHeight, thumbWidth;
if (thumbnail && format != NULL
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 5da32c9..3df8766 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -50,7 +50,7 @@
sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
- int32_t dstBpp, bool allocRotated, bool metaOnly) {
+ int32_t dstBpp, uint32_t bitDepth, bool allocRotated, bool metaOnly) {
int32_t rotationAngle;
if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0; // By default, no rotation
@@ -105,7 +105,7 @@
}
VideoFrame frame(width, height, displayWidth, displayHeight,
- tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
+ tileWidth, tileHeight, rotationAngle, dstBpp, bitDepth, !metaOnly, iccSize);
size_t size = frame.getFlattenedSize();
sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
@@ -126,15 +126,15 @@
sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
- int32_t dstBpp, bool allocRotated = false) {
- return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+ int32_t dstBpp, uint8_t bitDepth, bool allocRotated = false) {
+ return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth,
allocRotated, false /*metaOnly*/);
}
sp<IMemory> allocMetaFrame(const sp<MetaData>& trackMeta,
int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
- int32_t dstBpp) {
- return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+ int32_t dstBpp, uint8_t bitDepth) {
+ return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth,
false /*allocRotated*/, true /*metaOnly*/);
}
@@ -211,7 +211,7 @@
//static
sp<IMemory> FrameDecoder::getMetadataOnly(
- const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
+ const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail, uint32_t bitDepth) {
OMX_COLOR_FORMATTYPE dstFormat;
ui::PixelFormat captureFormat;
int32_t dstBpp;
@@ -235,7 +235,8 @@
}
}
- sp<IMemory> metaMem = allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp);
+ sp<IMemory> metaMem =
+ allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth);
// try to fill sequence meta's duration based on average frame rate,
// default to 33ms if frame rate is unavailable.
@@ -534,7 +535,6 @@
if (dstFormat() == COLOR_Format32bitABGR2101010) {
videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
} else {
- // TODO: Use Flexible color instead
videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
}
@@ -649,6 +649,11 @@
height = slice_height;
}
+ uint32_t bitDepth = 8;
+ if (COLOR_FormatYUVP010 == srcFormat) {
+ bitDepth = 10;
+ }
+
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
trackMeta(),
@@ -657,6 +662,7 @@
0,
0,
dstBpp(),
+ bitDepth,
mCaptureLayer != nullptr /*allocRotated*/);
if (frameMem == nullptr) {
return NO_MEMORY;
@@ -851,7 +857,6 @@
if (dstFormat() == COLOR_Format32bitABGR2101010) {
videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
} else {
- // TODO: Use Flexible color instead
videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
}
@@ -908,7 +913,7 @@
return ERROR_MALFORMED;
}
- int32_t width, height, stride;
+ int32_t width, height, stride, srcFormat;
if (outputFormat->findInt32("width", &width) == false) {
ALOGE("MediaImageDecoder::onOutputReceived:width is missing in outputFormat");
return ERROR_MALFORMED;
@@ -921,10 +926,19 @@
ALOGE("MediaImageDecoder::onOutputReceived:stride is missing in outputFormat");
return ERROR_MALFORMED;
}
+ if (outputFormat->findInt32("color-format", &srcFormat) == false) {
+ ALOGE("MediaImageDecoder::onOutputReceived: color format is missing in outputFormat");
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t bitDepth = 8;
+ if (COLOR_FormatYUVP010 == srcFormat) {
+ bitDepth = 10;
+ }
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
- trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
+ trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp(), bitDepth);
if (frameMem == nullptr) {
return NO_MEMORY;
@@ -935,9 +949,6 @@
setFrame(frameMem);
}
- int32_t srcFormat;
- CHECK(outputFormat->findInt32("color-format", &srcFormat));
-
ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
uint32_t standard, range, transfer;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index f81a5eb..63d3180 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -36,6 +36,7 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/ColorUtils.h>
@@ -44,6 +45,7 @@
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
#include <media/mediarecorder.h>
@@ -372,9 +374,7 @@
uint8_t mProfileCompatible;
uint8_t mLevelIdc;
- uint8_t mDoviProfile;
- void *mDoviConfigData;
- size_t mDoviConfigDataSize;
+ int32_t mDoviProfile;
void *mCodecSpecificData;
size_t mCodecSpecificDataSize;
@@ -428,7 +428,7 @@
status_t parseHEVCCodecSpecificData(
const uint8_t *data, size_t size, HevcParameterSets ¶mSets);
- status_t makeDoviCodecSpecificData();
+ status_t getDolbyVisionProfile();
// Track authoring progress status
void trackProgressStatus(int64_t timeUs, status_t err = OK);
@@ -628,14 +628,14 @@
}
const char *MPEG4Writer::Track::getDoviFourCC() const {
- if (mDoviProfile == 5) {
+ if (mDoviProfile == DolbyVisionProfileDvheStn) {
return "dvh1";
- } else if (mDoviProfile == 8) {
+ } else if (mDoviProfile == DolbyVisionProfileDvheSt) {
return "hvc1";
- } else if (mDoviProfile == 9 || mDoviProfile == 32) {
+ } else if (mDoviProfile == DolbyVisionProfileDvavSe) {
return "avc1";
}
- return (const char*)NULL;
+ return nullptr;
}
// static
@@ -693,6 +693,11 @@
}
if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+ // For MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+ // getFourCCForMime() requires profile information
+ // to decide the final FourCC codes.
+ // So we let the creation of the new track now and
+ // assign FourCC codes later using getDoviFourCC()
ALOGV("Add source mime '%s'", mime);
} else if (Track::getFourCCForMime(mime) == NULL) {
ALOGE("Unsupported mime '%s'", mime);
@@ -2173,8 +2178,7 @@
mMinCttsOffsetTimeUs(0),
mMinCttsOffsetTicks(0),
mMaxCttsOffsetTicks(0),
- mDoviConfigData(NULL),
- mDoviConfigDataSize(0),
+ mDoviProfile(0),
mCodecSpecificData(NULL),
mCodecSpecificDataSize(0),
mGotAllCodecSpecificData(false),
@@ -2636,7 +2640,7 @@
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
mMeta->findData(kKeyHVCC, &type, &data, &size);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
- makeDoviCodecSpecificData();
+ getDolbyVisionProfile();
if (!mMeta->findData(kKeyAVCC, &type, &data, &size) &&
!mMeta->findData(kKeyHVCC, &type, &data, &size)) {
ALOGE("Failed: No HVCC/AVCC for Dolby Vision ..\n");
@@ -2683,10 +2687,6 @@
mCodecSpecificData = NULL;
}
- if (mDoviConfigData != NULL) {
- free(mDoviConfigData);
- mDoviConfigData = NULL;
- }
}
void MPEG4Writer::Track::initTrackingProgressStatus(MetaData *params) {
@@ -3365,34 +3365,37 @@
return OK;
}
-status_t MPEG4Writer::Track::makeDoviCodecSpecificData() {
+status_t MPEG4Writer::Track::getDolbyVisionProfile() {
uint32_t type;
const void *data = NULL;
size_t size = 0;
- if (mDoviConfigData != NULL) {
- ALOGE("Already have Dolby Vision codec specific data");
- return OK;
+ if (!mMeta->findData(kKeyDVCC, &type, &data, &size) &&
+ !mMeta->findData(kKeyDVVC, &type, &data, &size) &&
+ !mMeta->findData(kKeyDVWC, &type, &data, &size)) {
+ ALOGE("Failed getting Dovi config for Dolby Vision %d", (int)size);
+ return ERROR_MALFORMED;
}
+ static const ALookup<uint8_t, int32_t> dolbyVisionProfileMap = {
+ {1, DolbyVisionProfileDvavPen},
+ {3, DolbyVisionProfileDvheDen},
+ {4, DolbyVisionProfileDvheDtr},
+ {5, DolbyVisionProfileDvheStn},
+ {6, DolbyVisionProfileDvheDth},
+ {7, DolbyVisionProfileDvheDtb},
+ {8, DolbyVisionProfileDvheSt},
+ {9, DolbyVisionProfileDvavSe},
+ {10, DolbyVisionProfileDvav110}
+ };
- if (!mMeta->findData(kKeyDVCC, &type, &data, &size)
- && !mMeta->findData(kKeyDVVC, &type, &data, &size)
- && !mMeta->findData(kKeyDVWC, &type, &data, &size)) {
- ALOGE("Failed getting Dovi config for Dolby Vision %d", (int)size);
- return ERROR_MALFORMED;
+ // Dolby Vision profile information is extracted as per
+ // https://dolby.my.salesforce.com/sfc/p/#700000009YuG/a/4u000000l6FB/076wHYEmyEfz09m0V1bo85_25hlUJjaiWTbzorNmYY4
+ uint8_t dv_profile = ((((uint8_t *)data)[2] >> 1) & 0x7f);
+
+ if (!dolbyVisionProfileMap.map(dv_profile, &mDoviProfile)) {
+ ALOGE("Failed to get Dolby Profile from DV Config data");
+ return ERROR_MALFORMED;
}
-
- mDoviConfigData = malloc(size);
- if (mDoviConfigData == NULL) {
- ALOGE("Failed allocating Dolby Vision config data");
- return ERROR_MALFORMED;
- }
-
- mDoviConfigDataSize = size;
- memcpy(mDoviConfigData, data, size);
-
- mDoviProfile = (((char *)data)[2] >> 1) & 0x7f; //getting profile info
-
return OK;
}
@@ -3542,24 +3545,26 @@
buffer->range_length());
}
if (mIsDovi) {
- err = makeDoviCodecSpecificData();
-
- const void *data = NULL;
- size_t size = 0;
-
- uint32_t type = 0;
- if (mDoviProfile == 9){
- mMeta->findData(kKeyAVCC, &type, &data, &size);
- } else if (mDoviProfile < 9) {
- mMeta->findData(kKeyHVCC, &type, &data, &size);
- }
-
- if (data != NULL && copyCodecSpecificData((uint8_t *)data, size) == OK) {
- mGotAllCodecSpecificData = true;
+ err = getDolbyVisionProfile();
+ if(err == OK) {
+ const void *data = NULL;
+ size_t size = 0;
+ uint32_t type = 0;
+ if (mDoviProfile == DolbyVisionProfileDvavSe) {
+ mMeta->findData(kKeyAVCC, &type, &data, &size);
+ } else if (mDoviProfile < DolbyVisionProfileDvavSe) {
+ mMeta->findData(kKeyHVCC, &type, &data, &size);
+ } else {
+ ALOGW("DV Profiles > DolbyVisionProfileDvavSe are not supported");
+ err = ERROR_MALFORMED;
+ }
+ if (err == OK && data != NULL &&
+ copyCodecSpecificData((uint8_t *)data, size) == OK) {
+ mGotAllCodecSpecificData = true;
+ }
}
}
}
-
buffer->release();
buffer = NULL;
if (OK != err) {
@@ -4429,10 +4434,12 @@
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
writeHvccBox();
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime)) {
- if (mDoviProfile <= 8) {
+ if (mDoviProfile <= DolbyVisionProfileDvheSt) {
writeHvccBox();
- } else if (mDoviProfile == 9 || mDoviProfile == 32) {
+ } else if (mDoviProfile == DolbyVisionProfileDvavSe) {
writeAvccBox();
+ } else {
+ TRESPASS("Unsupported Dolby Vision profile");
}
writeDoviConfigBox();
}
@@ -4482,45 +4489,48 @@
size_t size;
bool found =
meta->findData(kKeyHdrStaticInfo, &type, reinterpret_cast<const void**>(&data), &size);
- if (found && size == 25) {
- uint16_t displayPrimariesRX = U16LE_AT(&data[1]);
- uint16_t displayPrimariesRY = U16LE_AT(&data[3]);
-
- uint16_t displayPrimariesGX = U16LE_AT(&data[5]);
- uint16_t displayPrimariesGY = U16LE_AT(&data[7]);
-
- uint16_t displayPrimariesBX = U16LE_AT(&data[9]);
- uint16_t displayPrimariesBY = U16LE_AT(&data[11]);
-
- uint16_t whitePointX = U16LE_AT(&data[13]);
- uint16_t whitePointY = U16LE_AT(&data[15]);
-
- uint16_t maxDisplayMasteringLuminance = U16LE_AT(&data[17]);
- uint16_t minDisplayMasteringLuminance = U16LE_AT(&data[19]);
-
- uint16_t maxContentLightLevel = U16LE_AT(&data[21]);
- uint16_t maxPicAverageLightLevel = U16LE_AT(&data[23]);
-
- mOwner->beginBox("mdcv");
- mOwner->writeInt16(displayPrimariesGX);
- mOwner->writeInt16(displayPrimariesGY);
- mOwner->writeInt16(displayPrimariesBX);
- mOwner->writeInt16(displayPrimariesBY);
- mOwner->writeInt16(displayPrimariesRX);
- mOwner->writeInt16(displayPrimariesRY);
- mOwner->writeInt16(whitePointX);
- mOwner->writeInt16(whitePointY);
- mOwner->writeInt32(maxDisplayMasteringLuminance * 10000);
- mOwner->writeInt32(minDisplayMasteringLuminance * 10000);
- mOwner->endBox(); // mdcv.
-
- mOwner->beginBox("clli");
- mOwner->writeInt16(maxContentLightLevel);
- mOwner->writeInt16(maxPicAverageLightLevel);
- mOwner->endBox(); // clli.
- } else {
- ALOGW("Ignoring HDR static info with unexpected size %d", (int)size);
+ if (!found) {
+ return; // Nothing to encode.
}
+ if (size != 25) {
+ ALOGW("Ignoring HDR static info with unexpected size %d", (int)size);
+ return;
+ }
+ uint16_t displayPrimariesRX = U16LE_AT(&data[1]);
+ uint16_t displayPrimariesRY = U16LE_AT(&data[3]);
+
+ uint16_t displayPrimariesGX = U16LE_AT(&data[5]);
+ uint16_t displayPrimariesGY = U16LE_AT(&data[7]);
+
+ uint16_t displayPrimariesBX = U16LE_AT(&data[9]);
+ uint16_t displayPrimariesBY = U16LE_AT(&data[11]);
+
+ uint16_t whitePointX = U16LE_AT(&data[13]);
+ uint16_t whitePointY = U16LE_AT(&data[15]);
+
+ uint16_t maxDisplayMasteringLuminance = U16LE_AT(&data[17]);
+ uint16_t minDisplayMasteringLuminance = U16LE_AT(&data[19]);
+
+ uint16_t maxContentLightLevel = U16LE_AT(&data[21]);
+ uint16_t maxPicAverageLightLevel = U16LE_AT(&data[23]);
+
+ mOwner->beginBox("mdcv");
+ mOwner->writeInt16(displayPrimariesGX);
+ mOwner->writeInt16(displayPrimariesGY);
+ mOwner->writeInt16(displayPrimariesBX);
+ mOwner->writeInt16(displayPrimariesBY);
+ mOwner->writeInt16(displayPrimariesRX);
+ mOwner->writeInt16(displayPrimariesRY);
+ mOwner->writeInt16(whitePointX);
+ mOwner->writeInt16(whitePointY);
+ mOwner->writeInt32(maxDisplayMasteringLuminance * 10000);
+ mOwner->writeInt32(minDisplayMasteringLuminance * 10000);
+ mOwner->endBox(); // mdcv.
+
+ mOwner->beginBox("clli");
+ mOwner->writeInt16(maxContentLightLevel);
+ mOwner->writeInt16(maxPicAverageLightLevel);
+ mOwner->endBox(); // clli.
}
void MPEG4Writer::Track::writeAudioFourCCBox() {
@@ -4991,21 +5001,29 @@
}
void MPEG4Writer::Track::writeDoviConfigBox() {
- CHECK(mDoviConfigData);
- CHECK_EQ(mDoviConfigDataSize, 24u);
+ CHECK_NE(mDoviProfile, 0u);
- uint8_t *ptr = (uint8_t *)mDoviConfigData;
- uint8_t profile = (ptr[2] >> 1) & 0x7f;
+ uint32_t type = 0;
+ const void *data = nullptr;
+ size_t size = 0;
+ // check to see which key has the configuration box.
+ if (mMeta->findData(kKeyDVCC, &type, &data, &size) ||
+ mMeta->findData(kKeyDVVC, &type, &data, &size) ||
+ mMeta->findData(kKeyDVWC, &type, &data, &size)) {
- if (profile > 10) {
- mOwner->beginBox("dvwC");
- } else if (profile > 7) {
- mOwner->beginBox("dvvC");
- } else {
- mOwner->beginBox("dvcC");
+ // if this box is present we write the box, or
+ // this mp4 will be interpreted as a backward
+ // compatible stream.
+ if (mDoviProfile > DolbyVisionProfileDvav110) {
+ mOwner->beginBox("dvwC");
+ } else if (mDoviProfile > DolbyVisionProfileDvheDtb) {
+ mOwner->beginBox("dvvC");
+ } else {
+ mOwner->beginBox("dvcC");
+ }
+ mOwner->write(data, size);
+ mOwner->endBox(); // dvwC/dvvC/dvcC
}
- mOwner->write(mDoviConfigData, mDoviConfigDataSize);
- mOwner->endBox(); // dvwC/dvvC/dvcC
}
void MPEG4Writer::Track::writeD263Box() {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 1ec09ea..50a3f0d 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -2151,7 +2151,7 @@
bool reverse) {
AString mediaType;
if (!format->findString("mime", &mediaType)) {
- ALOGW("mapFormat: no mediaType information");
+ ALOGV("mapFormat: no mediaType information");
return;
}
ALOGV("mapFormat: codec %s mediatype %s kind %s reverse %d", componentName.c_str(),
@@ -3105,10 +3105,8 @@
case STOPPING:
{
if (mFlags & kFlagSawMediaServerDie) {
- bool postPendingReplies = true;
if (mState == RELEASING && !mReplyID) {
ALOGD("Releasing asynchronously, so nothing to reply here.");
- postPendingReplies = false;
}
// MediaServer died, there definitely won't
// be a shutdown complete notification after
@@ -3121,8 +3119,11 @@
if (mState == RELEASING) {
mComponentName.clear();
}
- if (postPendingReplies) {
+ if (mReplyID) {
postPendingRepliesAndDeferredMessages(origin + ":dead");
+ } else {
+ ALOGD("no pending replies: %s:dead following %s",
+ origin.c_str(), mLastReplyOrigin.c_str());
}
sendErrorResponse = false;
} else if (!mReplyID) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 1854588..4b6470a 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -243,6 +243,39 @@
}
}
+static const ALookup<uint8_t, int32_t>& getDolbyVisionProfileTable() {
+ static const ALookup<uint8_t, int32_t> profileTable = {
+ {1, DolbyVisionProfileDvavPen},
+ {3, DolbyVisionProfileDvheDen},
+ {4, DolbyVisionProfileDvheDtr},
+ {5, DolbyVisionProfileDvheStn},
+ {6, DolbyVisionProfileDvheDth},
+ {7, DolbyVisionProfileDvheDtb},
+ {8, DolbyVisionProfileDvheSt},
+ {9, DolbyVisionProfileDvavSe},
+ {10, DolbyVisionProfileDvav110},
+ };
+ return profileTable;
+}
+
+static const ALookup<uint8_t, int32_t>& getDolbyVisionLevelsTable() {
+ static const ALookup<uint8_t, int32_t> levelsTable = {
+ {0, DolbyVisionLevelUnknown},
+ {1, DolbyVisionLevelHd24},
+ {2, DolbyVisionLevelHd30},
+ {3, DolbyVisionLevelFhd24},
+ {4, DolbyVisionLevelFhd30},
+ {5, DolbyVisionLevelFhd60},
+ {6, DolbyVisionLevelUhd24},
+ {7, DolbyVisionLevelUhd30},
+ {8, DolbyVisionLevelUhd48},
+ {9, DolbyVisionLevelUhd60},
+ {10, DolbyVisionLevelUhd120},
+ {11, DolbyVisionLevel8k30},
+ {12, DolbyVisionLevel8k60},
+ };
+ return levelsTable;
+}
static void parseDolbyVisionProfileLevelFromDvcc(const uint8_t *ptr, size_t size, sp<AMessage> &format) {
// dv_major.dv_minor Should be 1.0 or 2.1
if (size != 24 || ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1))) {
@@ -262,33 +295,9 @@
// All Dolby Profiles will have profile and level info in MediaFormat
// Profile 8 and 9 will have bl_compatibility_id too.
- const static ALookup<uint8_t, int32_t> profiles{
- {1, DolbyVisionProfileDvavPen},
- {3, DolbyVisionProfileDvheDen},
- {4, DolbyVisionProfileDvheDtr},
- {5, DolbyVisionProfileDvheStn},
- {6, DolbyVisionProfileDvheDth},
- {7, DolbyVisionProfileDvheDtb},
- {8, DolbyVisionProfileDvheSt},
- {9, DolbyVisionProfileDvavSe},
- {10, DolbyVisionProfileDvav110},
- };
+ const ALookup<uint8_t, int32_t> &profiles = getDolbyVisionProfileTable();
+ const ALookup<uint8_t, int32_t> &levels = getDolbyVisionLevelsTable();
- const static ALookup<uint8_t, int32_t> levels{
- {0, DolbyVisionLevelUnknown},
- {1, DolbyVisionLevelHd24},
- {2, DolbyVisionLevelHd30},
- {3, DolbyVisionLevelFhd24},
- {4, DolbyVisionLevelFhd30},
- {5, DolbyVisionLevelFhd60},
- {6, DolbyVisionLevelUhd24},
- {7, DolbyVisionLevelUhd30},
- {8, DolbyVisionLevelUhd48},
- {9, DolbyVisionLevelUhd60},
- {10, DolbyVisionLevelUhd120},
- {11, DolbyVisionLevel8k30},
- {12, DolbyVisionLevel8k60},
- };
// set rpuAssoc
if (rpu_present_flag && el_present_flag && !bl_present_flag) {
format->setInt32("rpuAssoc", 1);
@@ -1516,30 +1525,18 @@
if (meta->findData(kKeyDVCC, &type, &data, &size)
|| meta->findData(kKeyDVVC, &type, &data, &size)
|| meta->findData(kKeyDVWC, &type, &data, &size)) {
- sp<ABuffer> buffer, csdOrg;
- if (msg->findBuffer("csd-0", &csdOrg)) {
- buffer = new (std::nothrow) ABuffer(size + csdOrg->size());
- if (buffer.get() == NULL || buffer->base() == NULL) {
- return NO_MEMORY;
- }
-
- memcpy(buffer->data(), csdOrg->data(), csdOrg->size());
- memcpy(buffer->data() + csdOrg->size(), data, size);
- } else {
- buffer = new (std::nothrow) ABuffer(size);
- if (buffer.get() == NULL || buffer->base() == NULL) {
- return NO_MEMORY;
- }
- memcpy(buffer->data(), data, size);
- }
-
- buffer->meta()->setInt32("csd", true);
- buffer->meta()->setInt64("timeUs", 0);
- msg->setBuffer("csd-0", buffer);
-
const uint8_t *ptr = (const uint8_t *)data;
ALOGV("DV: calling parseDolbyVisionProfileLevelFromDvcc with data size %zu", size);
parseDolbyVisionProfileLevelFromDvcc(ptr, size, msg);
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == nullptr || buffer->base() == nullptr) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer->data(), data, size);
+
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ msg->setBuffer("csd-2", buffer);
}
*format = msg;
@@ -2041,133 +2038,146 @@
mime == MEDIA_MIMETYPE_IMAGE_AVIF) {
meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
} else if (mime == MEDIA_MIMETYPE_VIDEO_DOLBY_VISION) {
- int32_t needCreateDoviCSD = 0;
- int32_t profile = 0;
- uint8_t bl_compatibility = 0;
- if (msg->findInt32("profile", &profile)) {
- if (profile == DolbyVisionProfileDvheSt) {
- profile = 8;
- bl_compatibility = 4;
- } else if (profile == DolbyVisionProfileDvavSe) {
- profile = 9;
- bl_compatibility = 2;
- }
- if (profile == 8 || profile == 9) {
- needCreateDoviCSD = 1;
- }
- } else {
- ALOGW("did not find dolby vision profile");
- }
- // No dovi csd data, need to create it
- if (needCreateDoviCSD) {
- uint8_t dvcc[24];
- int32_t level = 0;
- uint8_t level_val = 0;
+ int32_t profile = -1;
+ uint8_t blCompatibilityId = -1;
+ int32_t level = 0;
+ uint8_t profileVal = -1;
+ uint8_t profileVal1 = -1;
+ uint8_t profileVal2 = -1;
+ constexpr size_t dvccSize = 24;
- if (msg->findInt32("level", &level)) {
- const static ALookup<int32_t, uint8_t> levels {
- {DolbyVisionLevelUnknown, 0},
- {DolbyVisionLevelHd24, 1},
- {DolbyVisionLevelHd30, 2},
- {DolbyVisionLevelFhd24, 3},
- {DolbyVisionLevelFhd30, 4},
- {DolbyVisionLevelFhd60, 5},
- {DolbyVisionLevelUhd24, 6},
- {DolbyVisionLevelUhd30, 7},
- {DolbyVisionLevelUhd48, 8},
- {DolbyVisionLevelUhd60, 9},
- {DolbyVisionLevelUhd120, 10},
- {DolbyVisionLevel8k30, 11},
- {DolbyVisionLevel8k60, 12},
- };
- levels.map(level, &level_val);
- ALOGV("found dolby vision level: %d, value: %d", level, level_val);
+ const ALookup<uint8_t, int32_t> &profiles =
+ getDolbyVisionProfileTable();
+ const ALookup<uint8_t, int32_t> &levels =
+ getDolbyVisionLevelsTable();
+
+ if (!msg->findBuffer("csd-2", &csd2)) {
+ // MP4 extractors are expected to generate csd buffer
+ // some encoders might not be generating it, in which
+ // case we populate the track metadata dv (cc|vc|wc)
+ // from the 'profile' and 'level' info.
+ // This is done according to Dolby Vision ISOBMFF spec
+
+ if (!msg->findInt32("profile", &profile)) {
+ ALOGE("Dolby Vision profile not found");
+ return BAD_VALUE;
}
+ msg->findInt32("level", &level);
+
+ if (profile == DolbyVisionProfileDvheSt) {
+ if (!profiles.rlookup(DolbyVisionProfileDvheSt, &profileVal)) { // dvhe.08
+ ALOGE("Dolby Vision profile lookup error");
+ return BAD_VALUE;
+ }
+ blCompatibilityId = 4;
+ } else if (profile == DolbyVisionProfileDvavSe) {
+ if (!profiles.rlookup(DolbyVisionProfileDvavSe, &profileVal)) { // dvav.09
+ ALOGE("Dolby Vision profile lookup error");
+ return BAD_VALUE;
+ }
+ blCompatibilityId = 2;
+ } else {
+ ALOGE("Dolby Vision profile look up error");
+ return BAD_VALUE;
+ }
+
+ profile = (int32_t) profileVal;
+
+ uint8_t level_val = 0;
+ if (!levels.map(level, &level_val)) {
+ ALOGE("Dolby Vision level lookup error");
+ return BAD_VALUE;
+ }
+
+ std::vector<uint8_t> dvcc(dvccSize);
dvcc[0] = 1; // major version
dvcc[1] = 0; // minor version
- dvcc[2] = (uint8_t)((profile & 0x7f) << 1);// dolby vision profile
+ dvcc[2] = (uint8_t)((profile & 0x7f) << 1); // dolby vision profile
dvcc[2] = (uint8_t)((dvcc[2] | (uint8_t)((level_val >> 5) & 0x1)) & 0xff);
dvcc[3] = (uint8_t)((level_val & 0x1f) << 3); // dolby vision level
dvcc[3] = (uint8_t)(dvcc[3] | (1 << 2)); // rpu_present_flag
dvcc[3] = (uint8_t)(dvcc[3] | (1)); // bl_present_flag
- dvcc[4] = (uint8_t)(bl_compatibility << 4);// bl_compatibility id
+ dvcc[4] = (uint8_t)(blCompatibilityId << 4); // bl_compatibility id
- std::vector<uint8_t> dvcc_data(24);
- memcpy(dvcc_data.data(), dvcc, 24);
- if (profile > 10) {
- meta->setData(kKeyDVWC, kTypeDVWC, dvcc_data.data(), 24);
- } else if (profile > 7) {
- meta->setData(kKeyDVVC, kTypeDVVC, dvcc_data.data(), 24);
+ profiles.rlookup(DolbyVisionProfileDvav110, &profileVal);
+ profiles.rlookup(DolbyVisionProfileDvheDtb, &profileVal1);
+ if (profile > (int32_t) profileVal) {
+ meta->setData(kKeyDVWC, kTypeDVWC, dvcc.data(), dvccSize);
+ } else if (profile > (int32_t) profileVal1) {
+ meta->setData(kKeyDVVC, kTypeDVVC, dvcc.data(), dvccSize);
} else {
- meta->setData(kKeyDVCC, kTypeDVCC, dvcc_data.data(), 24);
+ meta->setData(kKeyDVCC, kTypeDVCC, dvcc.data(), dvccSize);
}
- } else if (csd0size >= 24) { // have dovi csd, just send it out...
- uint8_t *dvconfig = csd0->data() + (csd0size -24);
- profile = dvconfig[2] >> 1;
- if (profile > 10) {
- meta->setData(kKeyDVWC, kTypeDVWC, dvconfig, 24);
- } else if (profile > 7) {
- meta->setData(kKeyDVVC, kTypeDVVC, dvconfig, 24);
- } else {
- meta->setData(kKeyDVCC, kTypeDVCC, dvconfig, 24);
- }
+
} else {
- return BAD_VALUE;
+ // we have csd-2, just use that to populate dvcc
+ if (csd2->size() == dvccSize) {
+ uint8_t *dvcc = csd2->data();
+ profile = dvcc[2] >> 1;
+
+ profiles.rlookup(DolbyVisionProfileDvav110, &profileVal);
+ profiles.rlookup(DolbyVisionProfileDvheDtb, &profileVal1);
+ if (profile > (int32_t) profileVal) {
+ meta->setData(kKeyDVWC, kTypeDVWC, csd2->data(), csd2->size());
+ } else if (profile > (int32_t) profileVal1) {
+ meta->setData(kKeyDVVC, kTypeDVVC, csd2->data(), csd2->size());
+ } else {
+ meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
+ }
+
+ } else {
+ ALOGE("Convert MessageToMetadata csd-2 is present but not valid");
+ return BAD_VALUE;
+ }
}
-
- // Send the avc/hevc/av1 csd data...
- if (csd0size >= 24) {
- sp<ABuffer> csd;
- if ( profile > 1 && profile < 9) {
- if (msg->findBuffer("csd-hevc", &csd)) {
- meta->setData(kKeyHVCC, kTypeHVCC, csd->data(), csd->size());
- } else if (csd0size > 24) {
- std::vector<uint8_t> hvcc(csd0size + 1024);
- size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
- meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
- }
- } else if (profile == 9) {
- sp<ABuffer> csd1;
- if (msg->findBuffer("csd-avc", &csd)) {
- meta->setData(kKeyAVCC, kTypeAVCC, csd->data(), csd->size());
- } else if (msg->findBuffer("csd-1", &csd1)) {
- std::vector<char> avcc(csd0size + csd1->size() + 1024);
- size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
- meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
- } else { // for dolby vision avc, csd0 also holds csd1
- size_t i = 0;
- int csd0realsize = 0;
- do {
- i = findNextNalStartCode(csd0->data() + i,
- csd0->size() - i) - csd0->data();
- if (i > 0) {
- csd0realsize = i;
- break;
- }
- i += 4;
- } while(i < csd0->size());
- // buffer0 -> csd0
- sp<ABuffer> buffer0 = new (std::nothrow) ABuffer(csd0realsize);
- if (buffer0.get() == NULL || buffer0->base() == NULL) {
- return NO_MEMORY;
+ profiles.rlookup(DolbyVisionProfileDvavPen, &profileVal);
+ profiles.rlookup(DolbyVisionProfileDvavSe, &profileVal1);
+ profiles.rlookup(DolbyVisionProfileDvav110, &profileVal2);
+ if ((profile > (int32_t) profileVal) && (profile < (int32_t) profileVal1)) {
+ std::vector<uint8_t> hvcc(csd0size + 1024);
+ size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
+ meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
+ } else if (profile == (int32_t) profileVal2) {
+ meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
+ } else {
+ sp<ABuffer> csd1;
+ if (msg->findBuffer("csd-1", &csd1)) {
+ std::vector<char> avcc(csd0size + csd1->size() + 1024);
+ size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
+ meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
+ }
+ else {
+ // for dolby vision avc, csd0 also holds csd1
+ size_t i = 0;
+ int csd0realsize = 0;
+ do {
+ i = findNextNalStartCode(csd0->data() + i,
+ csd0->size() - i) - csd0->data();
+ if (i > 0) {
+ csd0realsize = i;
+ break;
}
- memcpy(buffer0->data(), csd0->data(), csd0realsize);
- // buffer1 -> csd1
- sp<ABuffer> buffer1 = new (std::nothrow)
- ABuffer(csd0->size() - csd0realsize);
- if (buffer1.get() == NULL || buffer1->base() == NULL) {
- return NO_MEMORY;
- }
- memcpy(buffer1->data(), csd0->data()+csd0realsize,
- csd0->size() - csd0realsize);
-
- std::vector<char> avcc(csd0->size() + 1024);
- size_t outsize = reassembleAVCC(buffer0, buffer1, avcc.data());
- meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
+ i += 4;
+ } while(i < csd0->size());
+ // buffer0 -> csd0
+ sp<ABuffer> buffer0 = new (std::nothrow) ABuffer(csd0realsize);
+ if (buffer0.get() == NULL || buffer0->base() == NULL) {
+ return NO_MEMORY;
}
- } else if (profile == 10) {
- meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size() - 24);
+ memcpy(buffer0->data(), csd0->data(), csd0realsize);
+ // buffer1 -> csd1
+ sp<ABuffer> buffer1 = new (std::nothrow)
+ ABuffer(csd0->size() - csd0realsize);
+ if (buffer1.get() == NULL || buffer1->base() == NULL) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer1->data(), csd0->data()+csd0realsize,
+ csd0->size() - csd0realsize);
+
+ std::vector<char> avcc(csd0->size() + 1024);
+ size_t outsize = reassembleAVCC(buffer0, buffer1, avcc.data());
+ meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
}
}
} else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
@@ -2216,6 +2226,17 @@
meta->setData(kKeyStreamHeader, 'mdat', csd0->data(), csd0->size());
} else if (msg->findBuffer("d263", &csd0)) {
meta->setData(kKeyD263, kTypeD263, csd0->data(), csd0->size());
+ } else if (mime == MEDIA_MIMETYPE_VIDEO_DOLBY_VISION && msg->findBuffer("csd-2", &csd2)) {
+ meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
+
+ // Remove CSD-2 from the data here to avoid duplicate data in meta
+ meta->remove(kKeyOpaqueCSD2);
+
+ if (msg->findBuffer("csd-avc", &csd0)) {
+ meta->setData(kKeyAVCC, kTypeAVCC, csd0->data(), csd0->size());
+ } else if (msg->findBuffer("csd-hevc", &csd0)) {
+ meta->setData(kKeyHVCC, kTypeHVCC, csd0->data(), csd0->size());
+ }
}
// XXX TODO add whatever other keys there are
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 04041eb..3509ef8 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -79,7 +79,7 @@
</MediaCodec>
<MediaCodec name="c2.android.av1.decoder" type="video/av01">
<Limit name="size" min="96x96" max="1920x1080" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="alignment" value="1x1" />
<Limit name="block-size" value="16x16" />
<Limit name="blocks-per-second" min="24" max="2073600" />
<Limit name="bitrate" range="1-120000000" />
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 53ca4e7..d7e2d18 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -184,7 +184,7 @@
</MediaCodec>
<MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
<Limit name="size" min="2x2" max="2048x2048" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="alignment" value="1x1" />
<Limit name="block-size" value="16x16" />
<Limit name="block-count" range="1-16384" />
<Limit name="blocks-per-second" range="1-2073600" />
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 0a4e598..4334f1e 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -19,6 +19,8 @@
#include <utils/Log.h>
#include <utils/String8.h>
+#include <inttypes.h>
+
#include "ALooperRoster.h"
#include "ADebug.h"
@@ -142,7 +144,7 @@
sp<AHandler> handler = info.mHandler.promote();
if (handler != NULL) {
handler->mVerboseStats = verboseStats;
- s.appendFormat(": %u messages processed", handler->mMessageCounter);
+ s.appendFormat(": %" PRIu64 " messages processed", handler->mMessageCounter);
if (verboseStats) {
for (size_t j = 0; j < handler->mMessages.size(); j++) {
char fourcc[15];
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AHandler.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AHandler.h
index 53d8a9b..337460a 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AHandler.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AHandler.h
@@ -66,7 +66,7 @@
}
bool mVerboseStats;
- uint32_t mMessageCounter;
+ uint64_t mMessageCounter;
KeyedVector<uint32_t, uint32_t> mMessages;
void deliverMessage(const sp<AMessage> &msg);
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index d59e4f5..e417324 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -50,7 +50,8 @@
sp<IMemory> extractFrame(FrameRect *rect = NULL);
static sp<IMemory> getMetadataOnly(
- const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
+ const sp<MetaData> &trackMeta, int colorFormat,
+ bool thumbnail = false, uint32_t bitDepth = 0);
protected:
virtual ~FrameDecoder();
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 84653eb..78792c5 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -816,7 +816,7 @@
constexpr char KEY_OPERATING_RATE[] = "operating-rate";
constexpr char KEY_OUTPUT_REORDER_DEPTH[] = "output-reorder-depth";
constexpr char KEY_PCM_ENCODING[] = "pcm-encoding";
-constexpr char KEY_PICTURE_TYPE[] = "picture_type";
+constexpr char KEY_PICTURE_TYPE[] = "picture-type";
constexpr char KEY_PIXEL_ASPECT_RATIO_HEIGHT[] = "sar-height";
constexpr char KEY_PIXEL_ASPECT_RATIO_WIDTH[] = "sar-width";
constexpr char KEY_PREPEND_HEADER_TO_SYNC_FRAMES[] = "prepend-sps-pps-to-idr-frames";
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index 6b1d2a1..1c8eef5 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -204,7 +204,7 @@
};
bool AMPEG4ElementaryAssembler::initCheck() {
- if(mSizeLength == 0 || mIndexLength == 0 || mIndexDeltaLength == 0) {
+ if(mIsGeneric && (mSizeLength == 0 || mIndexLength == 0 || mIndexDeltaLength == 0)) {
android_errorWriteLog(0x534e4554, "124777537");
return false;
}
diff --git a/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp b/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
index ac1e9b1..a8e64b6 100644
--- a/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
+++ b/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
@@ -393,3 +393,51 @@
std::this_thread::sleep_for(std::chrono::milliseconds(100));
looper->stop();
}
+
+TEST(MediaCodecTest, DeadWhileStoppingError) {
+ // Test scenario:
+ //
+ // 1) Client thread calls stop(); MediaCodec looper thread calls
+ // initiateShutdown(); shutdown is being handled at the component thread.
+ // 2) An error occurs while handling initiateShutdown().
+ // 3) MediaCodec looper thread handles the error.
+ // 4) Codec service dies after the error is handled
+ // 5) MediaCodec looper thread handles the death.
+
+ static const AString kCodecName{"test.codec"};
+ static const AString kCodecOwner{"nobody"};
+ static const AString kMediaType{"video/x-test"};
+
+ sp<MockCodec> mockCodec;
+ std::function<sp<CodecBase>(const AString &name, const char *owner)> getCodecBase =
+ [&mockCodec](const AString &, const char *) {
+ mockCodec = new MockCodec([](const std::shared_ptr<MockBufferChannel> &) {
+ // No mock setup, as we don't expect any buffer operations
+ // in this scenario.
+ });
+ ON_CALL(*mockCodec, initiateAllocateComponent(_))
+ .WillByDefault([mockCodec](const sp<AMessage> &) {
+ mockCodec->callback()->onComponentAllocated(kCodecName.c_str());
+ });
+ ON_CALL(*mockCodec, initiateShutdown(_))
+ .WillByDefault([mockCodec](bool) {
+ // 2)
+ mockCodec->callback()->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ // 4)
+ mockCodec->callback()->onError(DEAD_OBJECT, ACTION_CODE_FATAL);
+ // Codec service has died, no callback.
+ });
+ return mockCodec;
+ };
+
+ sp<ALooper> looper{new ALooper};
+ sp<MediaCodec> codec = SetupMediaCodec(
+ kCodecOwner, kCodecName, kMediaType, looper, getCodecBase);
+ ASSERT_NE(nullptr, codec) << "Codec must not be null";
+ ASSERT_NE(nullptr, mockCodec) << "MockCodec must not be null";
+
+ codec->stop();
+ // sleep here so that the looper thread can handle the error
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ looper->stop();
+}
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index 537df76..edddaa4 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -33,7 +33,7 @@
shared_libs: [
"android.hardware.media.omx@1.0",
- "libandroidicu",
+ "libicu",
"libfmq",
"libbinder",
"libhidlbase",
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 354971a..6f25cec 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -15,6 +15,8 @@
*/
#include <inttypes.h>
+#include <mutex>
+#include <set>
//#define LOG_NDEBUG 0
#define LOG_TAG "NdkMediaCodec"
@@ -42,6 +44,7 @@
static media_status_t translate_error(status_t err) {
+
if (err == OK) {
return AMEDIA_OK;
} else if (err == -EAGAIN) {
@@ -51,7 +54,18 @@
} else if (err == DEAD_OBJECT) {
return AMEDIACODEC_ERROR_RECLAIMED;
}
- ALOGE("sf error code: %d", err);
+
+ {
+ // minimize log flooding. Some CTS behavior made this noisy and apps could do the same.
+ static std::set<status_t> untranslated;
+ static std::mutex mutex;
+ std::lock_guard lg(mutex);
+
+ if (untranslated.find(err) == untranslated.end()) {
+ ALOGE("untranslated sf error code: %d", err);
+ untranslated.insert(err);
+ }
+ }
return AMEDIA_ERROR_UNKNOWN;
}
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index ba8f199..569ea2a 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -43,7 +43,7 @@
],
shared_libs: [
"libaudioclient_aidl_conversion",
- "libaudioutils", // for clock.h
+ "libaudioutils", // for clock.h, Statistics.h
"libbinder",
"libcutils",
"liblog",
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 2b765cc..5f269af 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -24,7 +24,7 @@
#include <utils/Log.h>
#include "debuggerd/handler.h"
-namespace android {
+namespace android::mediautils {
namespace {
@@ -48,7 +48,7 @@
void TimeCheck::accessAudioHalPids(std::vector<pid_t>* pids, bool update) {
static constexpr int kNumAudioHalPidsVectors = 3;
static std::vector<pid_t> audioHalPids[kNumAudioHalPidsVectors];
- static std::atomic<int> curAudioHalPids = 0;
+ static std::atomic<unsigned> curAudioHalPids = 0;
if (update) {
audioHalPids[(curAudioHalPids++ + 1) % kNumAudioHalPidsVectors] = *pids;
@@ -70,27 +70,54 @@
}
/* static */
-TimerThread* TimeCheck::getTimeCheckThread() {
- static TimerThread* sTimeCheckThread = new TimerThread();
+TimerThread& TimeCheck::getTimeCheckThread() {
+ static TimerThread sTimeCheckThread{};
return sTimeCheckThread;
}
-TimeCheck::TimeCheck(const char* tag, uint32_t timeoutMs)
- : mTimerHandle(getTimeCheckThread()->scheduleTask(
- [tag, startTime = std::chrono::system_clock::now()] { crash(tag, startTime); },
+TimeCheck::TimeCheck(std::string tag, OnTimerFunc&& onTimer, uint32_t timeoutMs,
+ bool crashOnTimeout)
+ : mTimeCheckHandler(new TimeCheckHandler{
+ std::move(tag), std::move(onTimer), crashOnTimeout,
+ std::chrono::system_clock::now(), gettid()})
+ , mTimerHandle(getTimeCheckThread().scheduleTask(
+ // Pass in all the arguments by value to this task for safety.
+ // The thread could call the callback before the constructor is finished.
+ // The destructor will be blocked on the callback, but that is implementation
+ // dependent.
+ [ timeCheckHandler = mTimeCheckHandler ] {
+ timeCheckHandler->onTimeout();
+ },
std::chrono::milliseconds(timeoutMs))) {}
TimeCheck::~TimeCheck() {
- getTimeCheckThread()->cancelTask(mTimerHandle);
+ mTimeCheckHandler->onCancel(mTimerHandle);
}
-/* static */
-void TimeCheck::crash(const char* tag, std::chrono::system_clock::time_point startTime) {
- std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
+void TimeCheck::TimeCheckHandler::onCancel(TimerThread::Handle timerHandle) const
+{
+ if (TimeCheck::getTimeCheckThread().cancelTask(timerHandle) && onTimer) {
+ const std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
+ onTimer(false /* timeout */,
+ std::chrono::duration_cast<std::chrono::duration<float, std::milli>>(
+ endTime - startTime).count());
+ }
+}
+
+void TimeCheck::TimeCheckHandler::onTimeout() const
+{
+ const std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
+ if (onTimer) {
+ onTimer(true /* timeout */,
+ std::chrono::duration_cast<std::chrono::duration<float, std::milli>>(
+ endTime - startTime).count());
+ }
+
+ if (!crashOnTimeout) return;
// Generate audio HAL processes tombstones and allow time to complete
// before forcing restart
- std::vector<pid_t> pids = getAudioHalPids();
+ std::vector<pid_t> pids = TimeCheck::getAudioHalPids();
if (pids.size() != 0) {
for (const auto& pid : pids) {
ALOGI("requesting tombstone for pid: %d", pid);
@@ -100,9 +127,9 @@
} else {
ALOGI("No HAL process pid available, skipping tombstones");
}
- LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
- LOG_ALWAYS_FATAL("TimeCheck timeout for %s (start=%s, end=%s)", tag,
- formatTime(startTime).c_str(), formatTime(endTime).c_str());
+ LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag.c_str());
+ LOG_ALWAYS_FATAL("TimeCheck timeout for %s on thread %d (start=%s, end=%s)",
+ tag.c_str(), tid, formatTime(startTime).c_str(), formatTime(endTime).c_str());
}
-}; // namespace android
+} // namespace android::mediautils
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
index 3c95798..dfc84e1 100644
--- a/media/utils/TimerThread.cpp
+++ b/media/utils/TimerThread.cpp
@@ -50,9 +50,12 @@
return deadline;
}
-void TimerThread::cancelTask(Handle handle) {
+// Returns true if cancelled, false if handle doesn't exist.
+// Beware of lock inversion with cancelTask() as the callback
+// is called while holding mMutex.
+bool TimerThread::cancelTask(Handle handle) {
std::lock_guard _l(mMutex);
- mMonitorRequests.erase(handle);
+ return mMonitorRequests.erase(handle) != 0;
}
void TimerThread::threadFunc() {
diff --git a/media/utils/fuzzers/TimeCheckFuzz.cpp b/media/utils/fuzzers/TimeCheckFuzz.cpp
index eeb6ba6..7966469 100644
--- a/media/utils/fuzzers/TimeCheckFuzz.cpp
+++ b/media/utils/fuzzers/TimeCheckFuzz.cpp
@@ -44,11 +44,11 @@
// 2. We also have setAudioHalPids, which is populated with the pids set
// above.
- android::TimeCheck::setAudioHalPids(pids);
+ android::mediautils::TimeCheck::setAudioHalPids(pids);
std::string name = data_provider.ConsumeRandomLengthString(kMaxStringLen);
// 3. The constructor, which is fuzzed here:
- android::TimeCheck timeCheck(name.c_str(), timeoutMs);
+ android::mediautils::TimeCheck timeCheck(name.c_str(), {} /* onTimer */, timeoutMs);
// We will leave some buffer to avoid sleeping too long
uint8_t sleep_amount_ms = data_provider.ConsumeIntegralInRange<uint8_t>(0, timeoutMs / 2);
diff --git a/media/utils/include/mediautils/MethodStatistics.h b/media/utils/include/mediautils/MethodStatistics.h
new file mode 100644
index 0000000..7d8061d
--- /dev/null
+++ b/media/utils/include/mediautils/MethodStatistics.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <map>
+#include <mutex>
+#include <string>
+
+#include <android-base/thread_annotations.h>
+#include <audio_utils/Statistics.h>
+
+namespace android::mediautils {
+
+/**
+ * MethodStatistics is used to associate Binder codes
+ * with a method name and execution time statistics.
+ *
+ * This is used to track binder transaction times for
+ * AudioFlinger and AudioPolicy services.
+ *
+ * Here, Code is the enumeration type for the method
+ * lookup.
+ */
+template <typename Code>
+class MethodStatistics {
+public:
+ using FloatType = float;
+ using StatsType = audio_utils::Statistics<FloatType>;
+
+ /**
+ * Method statistics.
+ *
+ * Initialized with the Binder transaction list for tracking AudioFlinger
+ * and AudioPolicyManager execution statistics.
+ */
+ explicit MethodStatistics(
+ const std::initializer_list<std::pair<const Code, std::string>>& methodMap = {})
+ : mMethodMap{methodMap} {}
+
+ /**
+ * Adds a method event, typically execution time in ms.
+ */
+ void event(Code code, FloatType executeMs) {
+ std::lock_guard lg(mLock);
+ mStatisticsMap[code].add(executeMs);
+ }
+
+ /**
+ * Returns the name for the method code.
+ */
+ std::string getMethodForCode(Code code) const {
+ auto it = mMethodMap.find(code);
+ return it == mMethodMap.end() ? std::to_string((int)code) : it->second;
+ }
+
+ /**
+ * Returns the number of times the method was invoked by event().
+ */
+ size_t getMethodCount(Code code) const {
+ std::lock_guard lg(mLock);
+ auto it = mStatisticsMap.find(code);
+ return it == mStatisticsMap.end() ? 0 : it->second.getN();
+ }
+
+ /**
+ * Returns the statistics object for the method.
+ */
+ StatsType getStatistics(Code code) const {
+ std::lock_guard lg(mLock);
+ auto it = mStatisticsMap.find(code);
+ return it == mStatisticsMap.end() ? StatsType{} : it->second;
+ }
+
+ /**
+ * Dumps the current method statistics.
+ */
+ std::string dump() const {
+ std::stringstream ss;
+ std::lock_guard lg(mLock);
+ for (const auto &[code, stats] : mStatisticsMap) {
+ ss << int(code) << " " << getMethodForCode(code) <<
+ " n=" << stats.getN() << " " << stats.toString() << "\n";
+ }
+ return ss.str();
+ }
+
+private:
+ const std::map<Code, std::string> mMethodMap;
+ mutable std::mutex mLock;
+ std::map<Code, StatsType> mStatisticsMap GUARDED_BY(mLock);
+};
+
+// Only if used, requires IBinder.h to be included at the location of invocation.
+#define METHOD_STATISTICS_BINDER_CODE_NAMES(CODE_TYPE) \
+ {(CODE_TYPE)IBinder::PING_TRANSACTION , "ping"}, \
+ {(CODE_TYPE)IBinder::DUMP_TRANSACTION , "dump"}, \
+ {(CODE_TYPE)IBinder::SHELL_COMMAND_TRANSACTION , "shellCommand"}, \
+ {(CODE_TYPE)IBinder::INTERFACE_TRANSACTION , "getInterfaceDescriptor"}, \
+ {(CODE_TYPE)IBinder::SYSPROPS_TRANSACTION , "SYSPROPS_TRANSACTION"}, \
+ {(CODE_TYPE)IBinder::EXTENSION_TRANSACTION , "EXTENSION_TRANSACTION"}, \
+ {(CODE_TYPE)IBinder::DEBUG_PID_TRANSACTION , "DEBUG_PID_TRANSACTION"}, \
+
+} // android::mediautils
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index 0d6e80d..991a921 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -20,27 +20,76 @@
#include <mediautils/TimerThread.h>
-namespace android {
+namespace android::mediautils {
// A class monitoring execution time for a code block (scoped variable) and causing an assert
// if it exceeds a certain time
class TimeCheck {
public:
+ using OnTimerFunc = std::function<void(bool /* timeout */, float /* elapsedMs */ )>;
+
// The default timeout is chosen to be less than system server watchdog timeout
static constexpr uint32_t kDefaultTimeOutMs = 5000;
- TimeCheck(const char* tag, uint32_t timeoutMs = kDefaultTimeOutMs);
+ /**
+ * TimeCheck is a RAII object which will notify a callback
+ * on timer expiration or when the object is deallocated.
+ *
+ * TimeCheck is used as a watchdog and aborts by default on timer expiration.
+ * When it aborts, it will also send a debugger signal to pids passed in through
+ * setAudioHalPids().
+ *
+ * If the callback function returns for timeout it will not be called again for
+ * the deallocation.
+ *
+ * \param tag string associated with the TimeCheck object.
+ * \param onTimer callback function with 2 parameters
+ * bool timeout (which is true when the TimeCheck object
+ * times out, false when the TimeCheck object is
+ * destroyed or leaves scope before the timer expires.)
+ * float elapsedMs (the elapsed time to this event).
+ * The callback when timeout is true will be called on a different thread.
+ * Currently this is guaranteed to block the destructor
+ * (potential lock inversion warning here) nevertheless
+ * it would be safer not to depend on stack contents.
+ * \param timeoutMs timeout in milliseconds.
+ * \param crashOnTimeout true if the object issues an abort on timeout.
+ */
+ explicit TimeCheck(std::string tag, OnTimerFunc&& onTimer = {},
+ uint32_t timeoutMs = kDefaultTimeOutMs, bool crashOnTimeout = true);
+ // Remove copy constructors as there should only be one call to the destructor.
+ // Move is kept implicitly disabled, but would be logically consistent if enabled.
+ TimeCheck(const TimeCheck& other) = delete;
+ TimeCheck& operator=(const TimeCheck&) = delete;
+
~TimeCheck();
static void setAudioHalPids(const std::vector<pid_t>& pids);
static std::vector<pid_t> getAudioHalPids();
private:
- static TimerThread* getTimeCheckThread();
+ static TimerThread& getTimeCheckThread();
static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
- static void crash(const char* tag, std::chrono::system_clock::time_point startTime);
+ // Helper class for handling events.
+ // The usage here is const safe.
+ class TimeCheckHandler {
+ public:
+ const std::string tag;
+ const OnTimerFunc onTimer;
+ const bool crashOnTimeout;
+ const std::chrono::system_clock::time_point startTime;
+ const pid_t tid;
+
+ void onCancel(TimerThread::Handle handle) const;
+ void onTimeout() const;
+ };
+
+ // mTimeCheckHandler is immutable, prefer to be first initialized, last destroyed.
+ // Technically speaking, we do not need a shared_ptr here because TimerThread::cancelTask()
+ // is mutually exclusive of the callback, but the price paid for lifetime safety is minimal.
+ const std::shared_ptr<const TimeCheckHandler> mTimeCheckHandler;
const TimerThread::Handle mTimerHandle;
};
-}; // namespace android
+} // namespace android::mediautils
diff --git a/media/utils/include/mediautils/TimerThread.h b/media/utils/include/mediautils/TimerThread.h
index cf457b8..acf0b16 100644
--- a/media/utils/include/mediautils/TimerThread.h
+++ b/media/utils/include/mediautils/TimerThread.h
@@ -48,9 +48,9 @@
/**
* Cancel a task, previously scheduled with scheduleTask().
- * If the task has already executed, this is a no-op.
+ * If the task has already executed, this is a no-op and returns false.
*/
- void cancelTask(Handle handle);
+ bool cancelTask(Handle handle);
private:
using TimePoint = std::chrono::steady_clock::time_point;
diff --git a/media/utils/tests/Android.bp b/media/utils/tests/Android.bp
index 6593d56..5498ac5 100644
--- a/media/utils/tests/Android.bp
+++ b/media/utils/tests/Android.bp
@@ -26,3 +26,51 @@
"media_synchronization_tests.cpp",
],
}
+
+cc_test {
+ name: "methodstatistics_tests",
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "liblog",
+ "libmediautils",
+ "libutils",
+ ],
+
+ srcs: [
+ "methodstatistics_tests.cpp",
+ ],
+}
+
+cc_test {
+ name: "timecheck_tests",
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+
+ sanitize:{
+ address: true,
+ cfi: true,
+ integer_overflow: true,
+ memtag_heap: true,
+ },
+
+ shared_libs: [
+ "liblog",
+ "libmediautils",
+ "libutils",
+ ],
+
+ srcs: [
+ "timecheck_tests.cpp",
+ ],
+}
diff --git a/media/utils/tests/methodstatistics_tests.cpp b/media/utils/tests/methodstatistics_tests.cpp
new file mode 100644
index 0000000..85c4ad5
--- /dev/null
+++ b/media/utils/tests/methodstatistics_tests.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "methodstatistics_tests"
+
+#include <mediautils/MethodStatistics.h>
+
+#include <atomic>
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+using namespace android::mediautils;
+using CodeType = size_t;
+
+constexpr CodeType HELLO_CODE = 10;
+constexpr const char * HELLO_NAME = "hello";
+constexpr float HELLO_EVENTS[] = { 1.f, 3.f }; // needs lossless average
+
+constexpr CodeType WORLD_CODE = 21;
+constexpr const char * WORLD_NAME = "world";
+
+constexpr CodeType UNKNOWN_CODE = 12345;
+
+TEST(methodstatistics_tests, method_names) {
+ const MethodStatistics<CodeType> methodStatistics{
+ {HELLO_CODE, HELLO_NAME},
+ {WORLD_CODE, WORLD_NAME},
+ };
+
+ ASSERT_EQ(std::string(HELLO_NAME), methodStatistics.getMethodForCode(HELLO_CODE));
+ ASSERT_EQ(std::string(WORLD_NAME), methodStatistics.getMethodForCode(WORLD_CODE));
+ // an unknown code returns itself as a number.
+ ASSERT_EQ(std::to_string(UNKNOWN_CODE), methodStatistics.getMethodForCode(UNKNOWN_CODE));
+}
+
+TEST(methodstatistics_tests, events) {
+ MethodStatistics<CodeType> methodStatistics{
+ {HELLO_CODE, HELLO_NAME},
+ {WORLD_CODE, WORLD_NAME},
+ };
+
+ size_t n = 0;
+ float sum = 0.f;
+ for (const auto event : HELLO_EVENTS) {
+ methodStatistics.event(HELLO_CODE, event);
+ sum += event;
+ ++n;
+ }
+
+ const auto helloStats = methodStatistics.getStatistics(HELLO_CODE);
+ ASSERT_EQ((signed)n, helloStats.getN());
+ ASSERT_EQ(sum / n, helloStats.getMean());
+ ASSERT_EQ(n, methodStatistics.getMethodCount(HELLO_CODE));
+
+ const auto unsetStats = methodStatistics.getStatistics(UNKNOWN_CODE);
+ ASSERT_EQ(0, unsetStats.getN());
+ ASSERT_EQ(0.f, unsetStats.getMean());
+ ASSERT_EQ(0U, methodStatistics.getMethodCount(UNKNOWN_CODE));
+}
diff --git a/media/utils/tests/timecheck_tests.cpp b/media/utils/tests/timecheck_tests.cpp
new file mode 100644
index 0000000..9833dc9
--- /dev/null
+++ b/media/utils/tests/timecheck_tests.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "timecheck_tests"
+
+#include <mediautils/TimeCheck.h>
+
+#include <atomic>
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+using namespace android::mediautils;
+
+TEST(timecheck_tests, success) {
+ bool timeoutRegistered = false;
+ float elapsedMsRegistered = 0.f;
+ bool event = false;
+
+ {
+ TimeCheck timeCheck("success",
+ [&event, &timeoutRegistered, &elapsedMsRegistered]
+ (bool timeout, float elapsedMs) {
+ timeoutRegistered = timeout;
+ elapsedMsRegistered = elapsedMs;
+ event = true;
+ }, 1000 /* msec */, false /* crash */);
+ }
+ ASSERT_TRUE(event);
+ ASSERT_FALSE(timeoutRegistered);
+ ASSERT_GT(elapsedMsRegistered, 0.f);
+}
+
+TEST(timecheck_tests, timeout) {
+ bool timeoutRegistered = false;
+ float elapsedMsRegistered = 0.f;
+ std::atomic_bool event = false; // seq-cst implies acquire-release
+
+ {
+ TimeCheck timeCheck("timeout",
+ [&event, &timeoutRegistered, &elapsedMsRegistered]
+ (bool timeout, float elapsedMs) {
+ timeoutRegistered = timeout;
+ elapsedMsRegistered = elapsedMs;
+ event = true; // store-release, must be last.
+ }, 1 /* msec */, false /* crash */);
+ usleep(100 * 1000 /* usec */); // extra time as callback called by different thread.
+ }
+ ASSERT_TRUE(event); // load-acquire, must be first.
+ ASSERT_TRUE(timeoutRegistered); // only called once on failure, not on dealloc.
+ ASSERT_GT(elapsedMsRegistered, 0.f);
+}
+
+// Note: We do not test TimeCheck crash because TimeCheck is multithreaded and the
+// EXPECT_EXIT() signal catching is imperfect due to the gtest fork.
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index eb3c164..8cafdfd 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -79,6 +79,7 @@
#include <media/nbaio/PipeReader.h>
#include <mediautils/BatteryNotifier.h>
#include <mediautils/MemoryLeakTrackUtil.h>
+#include <mediautils/MethodStatistics.h>
#include <mediautils/ServiceUtilities.h>
#include <mediautils/TimeCheck.h>
#include <private/android_filesystem_config.h>
@@ -158,6 +159,92 @@
return sExternalVibratorService;
}
+// Creates association between Binder code to name for IAudioFlinger.
+#define IAUDIOFLINGER_BINDER_METHOD_MACRO_LIST \
+BINDER_METHOD_ENTRY(createTrack) \
+BINDER_METHOD_ENTRY(createRecord) \
+BINDER_METHOD_ENTRY(sampleRate) \
+BINDER_METHOD_ENTRY(format) \
+BINDER_METHOD_ENTRY(frameCount) \
+BINDER_METHOD_ENTRY(latency) \
+BINDER_METHOD_ENTRY(setMasterVolume) \
+BINDER_METHOD_ENTRY(setMasterMute) \
+BINDER_METHOD_ENTRY(masterVolume) \
+BINDER_METHOD_ENTRY(masterMute) \
+BINDER_METHOD_ENTRY(setStreamVolume) \
+BINDER_METHOD_ENTRY(setStreamMute) \
+BINDER_METHOD_ENTRY(streamVolume) \
+BINDER_METHOD_ENTRY(streamMute) \
+BINDER_METHOD_ENTRY(setMode) \
+BINDER_METHOD_ENTRY(setMicMute) \
+BINDER_METHOD_ENTRY(getMicMute) \
+BINDER_METHOD_ENTRY(setRecordSilenced) \
+BINDER_METHOD_ENTRY(setParameters) \
+BINDER_METHOD_ENTRY(getParameters) \
+BINDER_METHOD_ENTRY(registerClient) \
+BINDER_METHOD_ENTRY(getInputBufferSize) \
+BINDER_METHOD_ENTRY(openOutput) \
+BINDER_METHOD_ENTRY(openDuplicateOutput) \
+BINDER_METHOD_ENTRY(closeOutput) \
+BINDER_METHOD_ENTRY(suspendOutput) \
+BINDER_METHOD_ENTRY(restoreOutput) \
+BINDER_METHOD_ENTRY(openInput) \
+BINDER_METHOD_ENTRY(closeInput) \
+BINDER_METHOD_ENTRY(invalidateStream) \
+BINDER_METHOD_ENTRY(setVoiceVolume) \
+BINDER_METHOD_ENTRY(getRenderPosition) \
+BINDER_METHOD_ENTRY(getInputFramesLost) \
+BINDER_METHOD_ENTRY(newAudioUniqueId) \
+BINDER_METHOD_ENTRY(acquireAudioSessionId) \
+BINDER_METHOD_ENTRY(releaseAudioSessionId) \
+BINDER_METHOD_ENTRY(queryNumberEffects) \
+BINDER_METHOD_ENTRY(queryEffect) \
+BINDER_METHOD_ENTRY(getEffectDescriptor) \
+BINDER_METHOD_ENTRY(createEffect) \
+BINDER_METHOD_ENTRY(moveEffects) \
+BINDER_METHOD_ENTRY(loadHwModule) \
+BINDER_METHOD_ENTRY(getPrimaryOutputSamplingRate) \
+BINDER_METHOD_ENTRY(getPrimaryOutputFrameCount) \
+BINDER_METHOD_ENTRY(setLowRamDevice) \
+BINDER_METHOD_ENTRY(getAudioPort) \
+BINDER_METHOD_ENTRY(createAudioPatch) \
+BINDER_METHOD_ENTRY(releaseAudioPatch) \
+BINDER_METHOD_ENTRY(listAudioPatches) \
+BINDER_METHOD_ENTRY(setAudioPortConfig) \
+BINDER_METHOD_ENTRY(getAudioHwSyncForSession) \
+BINDER_METHOD_ENTRY(systemReady) \
+BINDER_METHOD_ENTRY(audioPolicyReady) \
+BINDER_METHOD_ENTRY(frameCountHAL) \
+BINDER_METHOD_ENTRY(getMicrophones) \
+BINDER_METHOD_ENTRY(setMasterBalance) \
+BINDER_METHOD_ENTRY(getMasterBalance) \
+BINDER_METHOD_ENTRY(setEffectSuspended) \
+BINDER_METHOD_ENTRY(setAudioHalPids) \
+BINDER_METHOD_ENTRY(setVibratorInfos) \
+BINDER_METHOD_ENTRY(updateSecondaryOutputs) \
+BINDER_METHOD_ENTRY(getMmapPolicyInfos) \
+BINDER_METHOD_ENTRY(getAAudioMixerBurstCount) \
+BINDER_METHOD_ENTRY(getAAudioHardwareBurstMinUsec) \
+BINDER_METHOD_ENTRY(setDeviceConnectedState) \
+
+// singleton for Binder Method Statistics for IAudioFlinger
+static auto& getIAudioFlingerStatistics() {
+ using Code = android::AudioFlingerServerAdapter::Delegate::TransactionCode;
+
+#pragma push_macro("BINDER_METHOD_ENTRY")
+#undef BINDER_METHOD_ENTRY
+#define BINDER_METHOD_ENTRY(ENTRY) \
+ {(Code)media::BnAudioFlingerService::TRANSACTION_##ENTRY, #ENTRY},
+
+ static mediautils::MethodStatistics<Code> methodStatistics{
+ IAUDIOFLINGER_BINDER_METHOD_MACRO_LIST
+ METHOD_STATISTICS_BINDER_CODE_NAMES(Code)
+ };
+#pragma pop_macro("BINDER_METHOD_ENTRY")
+
+ return methodStatistics;
+}
+
class DevicesFactoryHalCallbackImpl : public DevicesFactoryHalCallback {
public:
void onNewDevicesAvailable() override {
@@ -276,7 +363,7 @@
mMediaLogNotifier->run("MediaLogNotifier");
std::vector<pid_t> halPids;
mDevicesFactoryHal->getHalPids(&halPids);
- TimeCheck::setAudioHalPids(halPids);
+ mediautils::TimeCheck::setAudioHalPids(halPids);
// Notify that we have started (also called when audioserver service restarts)
mediametrics::LogItem(mMetricsId)
@@ -316,7 +403,7 @@
}
status_t AudioFlinger::setAudioHalPids(const std::vector<pid_t>& pids) {
- TimeCheck::setAudioHalPids(pids);
+ mediautils::TimeCheck::setAudioHalPids(pids);
return NO_ERROR;
}
@@ -828,6 +915,16 @@
std::string s = GetUnreachableMemoryString(true /* contents */, 100 /* limit */);
write(fd, s.c_str(), s.size());
}
+ {
+ std::string timeCheckStats = getIAudioFlingerStatistics().dump();
+ dprintf(fd, "\nIAudioFlinger binder call profile\n");
+ write(fd, timeCheckStats.c_str(), timeCheckStats.size());
+
+ extern mediautils::MethodStatistics<int>& getIEffectStatistics();
+ timeCheckStats = getIEffectStatistics().dump();
+ dprintf(fd, "\nIEffect binder call profile\n");
+ write(fd, timeCheckStats.c_str(), timeCheckStats.size());
+ }
}
return NO_ERROR;
}
@@ -4417,9 +4514,20 @@
break;
}
- std::string tag("IAudioFlinger command " +
- std::to_string(static_cast<std::underlying_type_t<TransactionCode>>(code)));
- TimeCheck check(tag.c_str());
+ const std::string methodName = getIAudioFlingerStatistics().getMethodForCode(code);
+ mediautils::TimeCheck check(
+ std::string("IAudioFlinger::").append(methodName),
+ [code, methodName](bool timeout, float elapsedMs) { // don't move methodName.
+ if (timeout) {
+ mediametrics::LogItem(mMetricsId)
+ .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_TIMEOUT)
+ .set(AMEDIAMETRICS_PROP_METHODCODE, int64_t(code))
+ .set(AMEDIAMETRICS_PROP_METHODNAME, methodName.c_str())
+ .record();
+ } else {
+ getIAudioFlingerStatistics().event(code, elapsedMs);
+ }
+ });
// Make sure we connect to Audio Policy Service before calling into AudioFlinger:
// - AudioFlinger can call into Audio Policy Service with its global mutex held
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index b748f9d..efd2dbd 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -39,6 +39,7 @@
#include <media/ShmemCompat.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <mediautils/MethodStatistics.h>
#include <mediautils/ServiceUtilities.h>
#include "AudioFlinger.h"
@@ -1751,6 +1752,44 @@
disconnect(false);
}
+// Creates an association between Binder code to name for IEffect.
+#define IEFFECT_BINDER_METHOD_MACRO_LIST \
+BINDER_METHOD_ENTRY(enable) \
+BINDER_METHOD_ENTRY(disable) \
+BINDER_METHOD_ENTRY(command) \
+BINDER_METHOD_ENTRY(disconnect) \
+BINDER_METHOD_ENTRY(getCblk) \
+
+// singleton for Binder Method Statistics for IEffect
+mediautils::MethodStatistics<int>& getIEffectStatistics() {
+ using Code = int;
+
+#pragma push_macro("BINDER_METHOD_ENTRY")
+#undef BINDER_METHOD_ENTRY
+#define BINDER_METHOD_ENTRY(ENTRY) \
+ {(Code)media::BnEffect::TRANSACTION_##ENTRY, #ENTRY},
+
+ static mediautils::MethodStatistics<Code> methodStatistics{
+ IEFFECT_BINDER_METHOD_MACRO_LIST
+ METHOD_STATISTICS_BINDER_CODE_NAMES(Code)
+ };
+#pragma pop_macro("BINDER_METHOD_ENTRY")
+
+ return methodStatistics;
+}
+
+status_t AudioFlinger::EffectHandle::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+ std::chrono::system_clock::time_point startTime = std::chrono::system_clock::now();
+ mediametrics::Defer defer([startTime, code] {
+ std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
+ getIEffectStatistics().event(code,
+ std::chrono::duration_cast<std::chrono::duration<float, std::milli>>(
+ endTime - startTime).count());
+ });
+ return BnEffect::onTransact(code, data, reply, flags);
+}
+
status_t AudioFlinger::EffectHandle::initCheck()
{
return mClient == 0 || mCblkMemory != 0 ? OK : NO_MEMORY;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index e2bea67..42614cc 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -356,6 +356,8 @@
const sp<media::IEffectClient>& effectClient,
int32_t priority, bool notifyFramesProcessed);
virtual ~EffectHandle();
+ status_t onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
virtual status_t initCheck();
// IEffect
diff --git a/services/audioflinger/TEST_MAPPING b/services/audioflinger/TEST_MAPPING
new file mode 100644
index 0000000..3de5a9f
--- /dev/null
+++ b/services/audioflinger/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsNativeMediaAAudioTestCases",
+ "options" : [
+ {
+ "include-filter": "android.nativemedia.aaudio.AAudioTests#AAudioBasic.*"
+ }
+ ]
+ }
+ ]
+}
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 5b2b87e..09f947c 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -210,12 +210,10 @@
// return the strategy corresponding to a given stream type
virtual product_strategy_t getStrategyForStream(audio_stream_type_t stream) = 0;
- // return the enabled output devices for the given stream type
- virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream) = 0;
-
// retrieves the list of enabled output devices for the given audio attributes
virtual status_t getDevicesForAttributes(const audio_attributes_t &attr,
- AudioDeviceTypeAddrVector *devices) = 0;
+ AudioDeviceTypeAddrVector *devices,
+ bool forVolume) = 0;
// Audio effect management
virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc) = 0;
diff --git a/services/audiopolicy/TEST_MAPPING b/services/audiopolicy/TEST_MAPPING
index 9b4cc8a..f130f7c 100644
--- a/services/audiopolicy/TEST_MAPPING
+++ b/services/audiopolicy/TEST_MAPPING
@@ -11,6 +11,14 @@
"include-filter": "com.google.android.gts.audio.AudioHostTest#testTwoChannelCapturing"
}
]
+ },
+ {
+ "name": "CtsNativeMediaAAudioTestCases",
+ "options" : [
+ {
+ "include-filter": "android.nativemedia.aaudio.AAudioTests#AAudioBasic.*"
+ }
+ ]
}
]
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 18cf0c1..64c7923 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -305,6 +305,7 @@
{
return !devices().isEmpty() ? devices().itemAt(0)->hasGainController() : false;
}
+ bool isRouted() const { return mPatchHandle != AUDIO_PATCH_HANDLE_NONE; }
DeviceVector mDevices; /**< current devices this output is routed to */
wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
@@ -437,6 +438,8 @@
uint32_t getRecommendedMuteDurationMs() const override;
+ void setTracksInvalidatedStatusByStrategy(product_strategy_t strategy);
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index dc2403c..0431619 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -56,7 +56,13 @@
virtual void dump(String8 *dst, int spaces) const;
virtual std::string toShortString() const;
-
+ /**
+ * @brief isInternal
+ * @return true if the client corresponds to an audio patch created from createAudioPatch API or
+ * for call audio routing, or false if the client corresponds to an AudioTrack, AudioRecord or
+ * HW Audio Source.
+ */
+ virtual bool isInternal() const { return false; }
audio_port_handle_t portId() const { return mPortId; }
uid_t uid() const { return mUid; }
audio_session_t session() const { return mSessionId; };
@@ -69,8 +75,16 @@
bool isPreferredDeviceForExclusiveUse() const { return mPreferredDeviceForExclusiveUse; }
virtual void setActive(bool active) { mActive = active; }
bool active() const { return mActive; }
+ /**
+ * @brief hasPreferredDevice Note that as internal clients use preferred device for convenience,
+ * we do hide this internal behavior to prevent from regression (like invalidating track for
+ * clients following same strategies...)
+ * @param activeOnly
+ * @return
+ */
bool hasPreferredDevice(bool activeOnly = false) const {
- return mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE && (!activeOnly || mActive);
+ return !isInternal() &&
+ mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE && (!activeOnly || mActive);
}
private:
@@ -143,6 +157,14 @@
}
uint32_t getActivityCount() const { return mActivityCount; }
+ bool isInvalid() const {
+ return mIsInvalid;
+ }
+
+ void setIsInvalid() {
+ mIsInvalid = true;
+ }
+
private:
const audio_stream_type_t mStream;
const product_strategy_t mStrategy;
@@ -155,6 +177,7 @@
* involved in a duplication.
*/
uint32_t mActivityCount = 0;
+ bool mIsInvalid = false;
};
class RecordClientDescriptor: public ClientDescriptor
@@ -211,6 +234,11 @@
mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
mSinkDevice = nullptr;
}
+ bool belongsToOutput(const sp<SwAudioOutputDescriptor> &swOutput) const {
+ return swOutput != nullptr && mSwOutput.promote() == swOutput;
+ }
+ void setUseSwBridge() { mUseSwBridge = true; }
+ bool useSwBridge() const { return mUseSwBridge; }
bool isConnected() const { return mPatchHandle != AUDIO_PATCH_HANDLE_NONE; }
audio_patch_handle_t getPatchHandle() const { return mPatchHandle; }
sp<DeviceDescriptor> srcDevice() const { return mSrcDevice; }
@@ -229,6 +257,35 @@
sp<DeviceDescriptor> mSinkDevice;
wp<SwAudioOutputDescriptor> mSwOutput;
wp<HwAudioOutputDescriptor> mHwOutput;
+ bool mUseSwBridge = false;
+};
+
+/**
+ * @brief The InternalSourceClientDescriptor class
+ * Specialized Client Descriptor for either a raw patch created from @see createAudioPatch API
+ * or for internal audio patches managed by APM (e.g. phone call patches).
+ * Whatever the bridge created (software or hardware), we need a client to track the activity
+ * and manage volumes.
+ * The Audio Patch requested sink is expressed as a preferred device which allows to route
+ * the SwOutput. Then APM will performs checks on the UID (against UID of Audioserver) of the
+ * requester to prevent rerouting SwOutput involved in raw patches.
+ */
+class InternalSourceClientDescriptor: public SourceClientDescriptor
+{
+public:
+ InternalSourceClientDescriptor(
+ audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
+ const struct audio_port_config &config, const sp<DeviceDescriptor>& srcDevice,
+ const sp<DeviceDescriptor>& sinkDevice,
+ product_strategy_t strategy, VolumeSource volumeSource) :
+ SourceClientDescriptor(
+ portId, uid, attributes, config, srcDevice, AUDIO_STREAM_PATCH, strategy,
+ volumeSource)
+ {
+ setPreferredDeviceId(sinkDevice->getId());
+ }
+ bool isInternal() const override { return true; }
+ ~InternalSourceClientDescriptor() override = default;
};
class SourceClientCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 5c342a1..009fa82 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -726,6 +726,14 @@
return mProfile->recommendedMuteDurationMs;
}
+void SwAudioOutputDescriptor::setTracksInvalidatedStatusByStrategy(product_strategy_t strategy) {
+ for (const auto &client : getClientIterable()) {
+ if (strategy == client->strategy()) {
+ client->setIsInvalid();
+ }
+ }
+}
+
// HwAudioOutputDescriptor implementation
HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<SourceClientDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 1132a29..d1655ef 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -100,7 +100,8 @@
TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
{config.sample_rate, config.channel_mask, config.format}, AUDIO_PORT_HANDLE_NONE,
stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
- {} /* Sources do not support secondary outputs*/, nullptr), mSrcDevice(srcDevice)
+ {} /* Sources do not support secondary outputs*/, nullptr),
+ mSrcDevice(srcDevice)
{
}
diff --git a/services/audiopolicy/fuzzer/Android.bp b/services/audiopolicy/fuzzer/Android.bp
index faf15d6..9f6b703 100644
--- a/services/audiopolicy/fuzzer/Android.bp
+++ b/services/audiopolicy/fuzzer/Android.bp
@@ -62,4 +62,7 @@
"libaudiopolicymanager_interface_headers",
],
data: [":audiopolicyfuzzer_configuration_files"],
+ fuzz_config: {
+ cc: ["mnaganov@google.com"],
+ },
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index fd42229..e45de32 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -664,12 +664,8 @@
ALOGV("%s device rxDevice %s txDevice %s", __func__,
rxDevices.itemAt(0)->toString().c_str(), txSourceDevice->toString().c_str());
- disconnectTelephonyRxAudioSource();
- // release TX patch if any
- if (mCallTxPatch != 0) {
- releaseAudioPatchInternal(mCallTxPatch->getHandle());
- mCallTxPatch.clear();
- }
+ disconnectTelephonyAudioSource(mCallRxSourceClient);
+ disconnectTelephonyAudioSource(mCallTxSourceClient);
auto telephonyRxModule =
mHwModules.getModuleForDeviceType(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT);
@@ -727,7 +723,7 @@
closeActiveClients(activeDesc);
}
}
- mCallTxPatch = createTelephonyPatch(false /*isRx*/, txSourceDevice, delayMs);
+ connectTelephonyTxAudioSource(txSourceDevice, txSinkDevice, delayMs);
}
if (waitMs != nullptr) {
*waitMs = muteWaitMs;
@@ -735,36 +731,6 @@
return NO_ERROR;
}
-sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
- bool isRx, const sp<DeviceDescriptor> &device, uint32_t delayMs) {
- PatchBuilder patchBuilder;
-
- if (device == nullptr) {
- return nullptr;
- }
-
- // @TODO: still ignoring the address, or not dealing platform with multiple telephony devices
- if (isRx) {
- patchBuilder.addSink(device).
- addSource(mAvailableInputDevices.getDevice(
- AUDIO_DEVICE_IN_TELEPHONY_RX, String8(), AUDIO_FORMAT_DEFAULT));
- } else {
- patchBuilder.addSource(device).
- addSink(mAvailableOutputDevices.getDevice(
- AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT));
- }
-
- audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
- status_t status =
- createAudioPatchInternal(patchBuilder.patch(), &patchHandle, mUidCached, delayMs);
- ssize_t index = mAudioPatches.indexOfKey(patchHandle);
- if (status != NO_ERROR || index < 0) {
- ALOGW("%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX");
- return nullptr;
- }
- return mAudioPatches.valueAt(index);
-}
-
bool AudioPolicyManager::isDeviceOfModule(
const sp<DeviceDescriptor>& devDesc, const char *moduleId) const {
sp<HwModule> module = mHwModules.getModuleFromName(moduleId);
@@ -779,20 +745,55 @@
void AudioPolicyManager::connectTelephonyRxAudioSource()
{
- disconnectTelephonyRxAudioSource();
+ disconnectTelephonyAudioSource(mCallRxSourceClient);
const struct audio_port_config source = {
.role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE,
.ext.device.type = AUDIO_DEVICE_IN_TELEPHONY_RX, .ext.device.address = ""
};
const auto aa = mEngine->getAttributesForStreamType(AUDIO_STREAM_VOICE_CALL);
- status_t status = startAudioSource(&source, &aa, &mCallRxSourceClientPort, 0/*uid*/);
- ALOGE_IF(status != NO_ERROR, "%s failed to start Telephony Rx AudioSource", __func__);
+ mCallRxSourceClient = startAudioSourceInternal(&source, &aa, 0/*uid*/);
+ ALOGE_IF(mCallRxSourceClient == nullptr,
+ "%s failed to start Telephony Rx AudioSource", __func__);
}
-void AudioPolicyManager::disconnectTelephonyRxAudioSource()
+void AudioPolicyManager::disconnectTelephonyAudioSource(sp<SourceClientDescriptor> &clientDesc)
{
- stopAudioSource(mCallRxSourceClientPort);
- mCallRxSourceClientPort = AUDIO_PORT_HANDLE_NONE;
+ if (clientDesc == nullptr) {
+ return;
+ }
+ ALOGW_IF(stopAudioSource(clientDesc->portId()) != NO_ERROR,
+ "%s error stopping audio source", __func__);
+ clientDesc.clear();
+}
+
+void AudioPolicyManager::connectTelephonyTxAudioSource(
+ const sp<DeviceDescriptor> &srcDevice, const sp<DeviceDescriptor> &sinkDevice,
+ uint32_t delayMs)
+{
+ disconnectTelephonyAudioSource(mCallTxSourceClient);
+ if (srcDevice == nullptr || sinkDevice == nullptr) {
+ ALOGW("%s could not create patch, invalid sink and/or source device(s)", __func__);
+ return;
+ }
+ PatchBuilder patchBuilder;
+ patchBuilder.addSource(srcDevice).addSink(sinkDevice);
+ ALOGV("%s between source %s and sink %s", __func__,
+ srcDevice->toString().c_str(), sinkDevice->toString().c_str());
+ auto callTxSourceClientPortId = PolicyAudioPort::getNextUniqueId();
+ const audio_attributes_t aa = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION };
+ struct audio_port_config source = {};
+ srcDevice->toAudioPortConfig(&source);
+ mCallTxSourceClient = new InternalSourceClientDescriptor(
+ callTxSourceClientPortId, mUidCached, aa, source, srcDevice, sinkDevice,
+ mCommunnicationStrategy, toVolumeSource(aa));
+ audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
+ status_t status = connectAudioSourceToSink(
+ mCallTxSourceClient, sinkDevice, patchBuilder.patch(), patchHandle, mUidCached,
+ delayMs);
+ ALOGE_IF(status != NO_ERROR, "%s() error %d creating TX audio patch", __func__, status);
+ if (status == NO_ERROR) {
+ mAudioSources.add(callTxSourceClientPortId, mCallTxSourceClient);
+ }
}
void AudioPolicyManager::setPhoneState(audio_mode_t state)
@@ -860,11 +861,8 @@
rxDevices = mPrimaryOutput->devices();
}
if (oldState == AUDIO_MODE_IN_CALL) {
- disconnectTelephonyRxAudioSource();
- if (mCallTxPatch != 0) {
- releaseAudioPatchInternal(mCallTxPatch->getHandle());
- mCallTxPatch.clear();
- }
+ disconnectTelephonyAudioSource(mCallRxSourceClient);
+ disconnectTelephonyAudioSource(mCallTxSourceClient);
}
setOutputDevices(mPrimaryOutput, rxDevices, force, 0);
}
@@ -874,8 +872,10 @@
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
DeviceVector newDevices = getNewOutputDevices(desc, true /*fromCache*/);
- if (state != AUDIO_MODE_IN_CALL || desc != mPrimaryOutput) {
- setOutputDevices(desc, newDevices, !newDevices.isEmpty(), 0 /*delayMs*/);
+ if (state != AUDIO_MODE_IN_CALL || (desc != mPrimaryOutput && !isTelephonyRxOrTx(desc))) {
+ bool forceRouting = !newDevices.isEmpty();
+ setOutputDevices(desc, newDevices, forceRouting, 0 /*delayMs*/, nullptr,
+ true /*requiresMuteCheck*/, !forceRouting /*requiresVolumeCheck*/);
}
}
@@ -935,6 +935,32 @@
ALOGV("setSystemProperty() property %s, value %s", property, value);
}
+// Find an MSD output profile compatible with the parameters passed.
+// When "directOnly" is set, restrict search to profiles for direct outputs.
+sp<IOProfile> AudioPolicyManager::getMsdProfileForOutput(
+ const DeviceVector& devices,
+ uint32_t samplingRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_output_flags_t flags,
+ bool directOnly)
+{
+ flags = getRelevantFlags(flags, directOnly);
+
+ sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+ if (msdModule != nullptr) {
+ // for the msd module check if there are patches to the output devices
+ if (msdHasPatchesToAllDevices(devices.toTypeAddrVector())) {
+ HwModuleCollection modules;
+ modules.add(msdModule);
+ return searchCompatibleProfileHwModules(
+ modules, getMsdAudioOutDevices(), samplingRate, format, channelMask,
+ flags, directOnly);
+ }
+ }
+ return nullptr;
+}
+
// Find an output profile compatible with the parameters passed. When "directOnly" is set, restrict
// search to profiles for direct outputs.
sp<IOProfile> AudioPolicyManager::getProfileForOutput(
@@ -945,45 +971,65 @@
audio_output_flags_t flags,
bool directOnly)
{
+ flags = getRelevantFlags(flags, directOnly);
+
+ return searchCompatibleProfileHwModules(
+ mHwModules, devices, samplingRate, format, channelMask, flags, directOnly);
+}
+
+audio_output_flags_t AudioPolicyManager::getRelevantFlags (
+ audio_output_flags_t flags, bool directOnly) {
if (directOnly) {
- // only retain flags that will drive the direct output profile selection
- // if explicitly requested
- static const uint32_t kRelevantFlags =
+ // only retain flags that will drive the direct output profile selection
+ // if explicitly requested
+ static const uint32_t kRelevantFlags =
(AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
- AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
- flags =
- (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+ AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
+ flags = (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
}
+ return flags;
+}
+sp<IOProfile> AudioPolicyManager::searchCompatibleProfileHwModules (
+ const HwModuleCollection& hwModules,
+ const DeviceVector& devices,
+ uint32_t samplingRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_output_flags_t flags,
+ bool directOnly) {
sp<IOProfile> profile;
-
- for (const auto& hwModule : mHwModules) {
+ for (const auto& hwModule : hwModules) {
for (const auto& curProfile : hwModule->getOutputProfiles()) {
- if (!curProfile->isCompatibleProfile(devices,
- samplingRate, NULL /*updatedSamplingRate*/,
- format, NULL /*updatedFormat*/,
- channelMask, NULL /*updatedChannelMask*/,
- flags)) {
+ if (!curProfile->isCompatibleProfile(devices,
+ samplingRate, NULL /*updatedSamplingRate*/,
+ format, NULL /*updatedFormat*/,
+ channelMask, NULL /*updatedChannelMask*/,
+ flags)) {
+ continue;
+ }
+ // reject profiles not corresponding to a device currently available
+ if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
+ continue;
+ }
+ // reject profiles if connected device does not support codec
+ if (!curProfile->devicesSupportEncodedFormats(devices.types())) {
+ continue;
+ }
+ if (!directOnly) {
+ return curProfile;
+ }
+
+ // when searching for direct outputs, if several profiles are compatible, give priority
+ // to one with offload capability
+ if (profile != 0 &&
+ ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
continue;
- }
- // reject profiles not corresponding to a device currently available
- if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
- continue;
- }
- // reject profiles if connected device does not support codec
- if (!curProfile->devicesSupportEncodedFormats(devices.types())) {
- continue;
- }
- if (!directOnly) return curProfile;
- // when searching for direct outputs, if several profiles are compatible, give priority
- // to one with offload capability
- if (profile != 0 && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
- continue;
- }
- profile = curProfile;
- if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- break;
- }
+ }
+ profile = curProfile;
+ if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ break;
+ }
}
}
return profile;
@@ -1510,6 +1556,27 @@
return msdPatches;
}
+bool AudioPolicyManager::isMsdPatch(const audio_patch_handle_t &handle) const {
+ ssize_t index = mAudioPatches.indexOfKey(handle);
+ if (index < 0) {
+ return false;
+ }
+ const sp<AudioPatch> patch = mAudioPatches.valueAt(index);
+ sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD);
+ if (msdModule == nullptr) {
+ return false;
+ }
+ const struct audio_port_config *sink = &patch->mPatch.sinks[0];
+ if (getMsdAudioOutDevices().contains(mAvailableOutputDevices.getDeviceFromId(sink->id))) {
+ return true;
+ }
+ index = getMsdOutputPatches().indexOfKey(handle);
+ if (index < 0) {
+ return false;
+ }
+ return true;
+}
+
status_t AudioPolicyManager::getMsdProfiles(bool hwAvSync,
const InputProfileCollection &inputProfiles,
const OutputProfileCollection &outputProfiles,
@@ -1939,8 +2006,7 @@
// force device change if the output is inactive and no audio patch is already present.
// check active before incrementing usage count
- bool force = !outputDesc->isActive() &&
- (outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
+ bool force = !outputDesc->isActive() && !outputDesc->isRouted();
DeviceVector devices;
sp<AudioPolicyMix> policyMix = outputDesc->mPolicyMix.promote();
@@ -3511,11 +3577,15 @@
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
DeviceVector newDevices = getNewOutputDevices(outputDesc, true /*fromCache*/);
- if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
+ if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) ||
+ (outputDesc != mPrimaryOutput && !isTelephonyRxOrTx(outputDesc))) {
// As done in setDeviceConnectionState, we could also fix default device issue by
// preventing the force re-routing in case of default dev that distinguishes on address.
// Let's give back to engine full device choice decision however.
- waitMs = setOutputDevices(outputDesc, newDevices, !newDevices.isEmpty(), delayMs);
+ bool forceRouting = !newDevices.isEmpty();
+ waitMs = setOutputDevices(outputDesc, newDevices, forceRouting, delayMs, nullptr,
+ true /*requiresMuteCheck*/,
+ !forceRouting /*requiresVolumeCheck*/);
// Only apply special touch sound delay once
delayMs = 0;
}
@@ -3796,7 +3866,22 @@
__FUNCTION__, profile != 0 ? "" : "NOT ",
(profile != 0 ? profile->getTagName().c_str() : "null"),
config.sample_rate, config.format, config.channel_mask, output_flags);
- return (profile != 0);
+
+ // also try the MSD module if compatible profile not found
+ if (profile == nullptr) {
+ profile = getMsdProfileForOutput(outputDevices,
+ config.sample_rate,
+ config.format,
+ config.channel_mask,
+ output_flags,
+ true /* directOnly */);
+ ALOGV("%s() MSD profile %sfound with name: %s, "
+ "sample rate: %u, format: 0x%x, channel_mask: 0x%x, output flags: 0x%x",
+ __FUNCTION__, profile != 0 ? "" : "NOT ",
+ (profile != 0 ? profile->getTagName().c_str() : "null"),
+ config.sample_rate, config.format, config.channel_mask, output_flags);
+ }
+ return (profile != nullptr);
}
bool AudioPolicyManager::isOffloadPossible(const audio_offload_info_t &offloadInfo,
@@ -3877,8 +3962,16 @@
}
flags = (audio_output_flags_t)((flags & relevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
- DeviceVector outputDevices = mEngine->getOutputDevicesForAttributes(*attr);
+ DeviceVector engineOutputDevices = mEngine->getOutputDevicesForAttributes(*attr);
for (const auto& hwModule : mHwModules) {
+ DeviceVector outputDevices = engineOutputDevices;
+ // the MSD module checks for different conditions and output devices
+ if (strcmp(hwModule->getName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0) {
+ if (!msdHasPatchesToAllDevices(engineOutputDevices.toTypeAddrVector())) {
+ continue;
+ }
+ outputDevices = getMsdAudioOutDevices();
+ }
for (const auto& curProfile : hwModule->getOutputProfiles()) {
if (!curProfile->isCompatibleProfile(outputDevices,
config->sample_rate, nullptr /*updatedSamplingRate*/,
@@ -3905,11 +3998,10 @@
~AUDIO_DIRECT_OFFLOAD_SUPPORTED) |
AUDIO_DIRECT_OFFLOAD_GAPLESS_SUPPORTED);
} else {
- directMode = (audio_direct_mode_t)(directMode |AUDIO_DIRECT_OFFLOAD_SUPPORTED);
+ directMode = (audio_direct_mode_t)(directMode | AUDIO_DIRECT_OFFLOAD_SUPPORTED);
}
} else {
- directMode = (audio_direct_mode_t) (directMode |
- AUDIO_DIRECT_BITSTREAM_SUPPORTED);
+ directMode = (audio_direct_mode_t) (directMode | AUDIO_DIRECT_BITSTREAM_SUPPORTED);
}
}
}
@@ -3919,7 +4011,7 @@
status_t AudioPolicyManager::getDirectProfilesForAttributes(const audio_attributes_t* attr,
AudioProfileVector& audioProfilesVector) {
AudioDeviceTypeAddrVector devices;
- status_t status = getDevicesForAttributes(*attr, &devices);
+ status_t status = getDevicesForAttributes(*attr, &devices, false /* forVolume */);
if (status != OK) {
return status;
}
@@ -4064,17 +4156,15 @@
return BAD_VALUE;
}
-status_t AudioPolicyManager::createAudioPatchInternal(const struct audio_patch *patch,
- audio_patch_handle_t *handle,
- uid_t uid, uint32_t delayMs,
- const sp<SourceClientDescriptor>& sourceDesc)
+status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
+ audio_patch_handle_t *handle,
+ uid_t uid)
{
ALOGV("%s", __func__);
if (handle == NULL || patch == NULL) {
return BAD_VALUE;
}
ALOGV("%s num sources %d num sinks %d", __func__, patch->num_sources, patch->num_sinks);
-
if (!audio_patch_is_valid(patch)) {
return BAD_VALUE;
}
@@ -4082,7 +4172,6 @@
if (patch->num_sources > 1) {
return INVALID_OPERATION;
}
-
if (patch->sources[0].role != AUDIO_PORT_ROLE_SOURCE) {
return INVALID_OPERATION;
}
@@ -4092,6 +4181,86 @@
}
}
+ sp<DeviceDescriptor> srcDevice = mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
+ sp<DeviceDescriptor> sinkDevice = mAvailableOutputDevices.getDeviceFromId(patch->sinks[0].id);
+ if (srcDevice == nullptr || sinkDevice == nullptr) {
+ ALOGW("%s could not create patch, invalid sink and/or source device(s)", __func__);
+ return BAD_VALUE;
+ }
+ ALOGV("%s between source %s and sink %s", __func__,
+ srcDevice->toString().c_str(), sinkDevice->toString().c_str());
+ audio_port_handle_t portId = PolicyAudioPort::getNextUniqueId();
+ // Default attributes, default volume priority, not to infer with non raw audio patches.
+ audio_attributes_t attributes = attributes_initializer(AUDIO_USAGE_MEDIA);
+ const struct audio_port_config *source = &patch->sources[0];
+ sp<SourceClientDescriptor> sourceDesc =
+ new InternalSourceClientDescriptor(
+ portId, uid, attributes, *source, srcDevice, sinkDevice,
+ mEngine->getProductStrategyForAttributes(attributes), toVolumeSource(attributes));
+
+ status_t status =
+ connectAudioSourceToSink(sourceDesc, sinkDevice, patch, *handle, uid, 0 /* delayMs */);
+
+ if (status != NO_ERROR) {
+ return INVALID_OPERATION;
+ }
+ mAudioSources.add(portId, sourceDesc);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::connectAudioSourceToSink(
+ const sp<SourceClientDescriptor>& sourceDesc, const sp<DeviceDescriptor> &sinkDevice,
+ const struct audio_patch *patch,
+ audio_patch_handle_t &handle,
+ uid_t uid, uint32_t delayMs)
+{
+ status_t status = createAudioPatchInternal(patch, &handle, uid, delayMs, sourceDesc);
+ if (status != NO_ERROR || mAudioPatches.indexOfKey(handle) < 0) {
+ ALOGW("%s patch panel could not connect device patch, error %d", __func__, status);
+ return INVALID_OPERATION;
+ }
+ sourceDesc->connect(handle, sinkDevice);
+ if (isMsdPatch(handle)) {
+ return NO_ERROR;
+ }
+ // SW Bridge? (@todo: HW bridge, keep track of HwOutput for device selection "reconsideration")
+ sp<SwAudioOutputDescriptor> swOutput = sourceDesc->swOutput().promote();
+ ALOG_ASSERT(swOutput != nullptr, "%s: a swOutput shall always be associated", __func__);
+ if (swOutput->getClient(sourceDesc->portId()) != nullptr) {
+ ALOGW("%s source portId has already been attached to outputDesc", __func__);
+ goto FailurePatchAdded;
+ }
+ status = swOutput->start();
+ if (status != NO_ERROR) {
+ goto FailureSourceAdded;
+ }
+ swOutput->addClient(sourceDesc);
+ status = startSource(swOutput, sourceDesc, &delayMs);
+ if (status != NO_ERROR) {
+ ALOGW("%s failed to start source, error %d", __FUNCTION__, status);
+ goto FailureSourceActive;
+ }
+ if (delayMs != 0) {
+ usleep(delayMs * 1000);
+ }
+ return NO_ERROR;
+
+FailureSourceActive:
+ swOutput->stop();
+ releaseOutput(sourceDesc->portId());
+FailureSourceAdded:
+ sourceDesc->setSwOutput(nullptr);
+FailurePatchAdded:
+ releaseAudioPatchInternal(handle);
+ return INVALID_OPERATION;
+}
+
+status_t AudioPolicyManager::createAudioPatchInternal(const struct audio_patch *patch,
+ audio_patch_handle_t *handle,
+ uid_t uid, uint32_t delayMs,
+ const sp<SourceClientDescriptor>& sourceDesc)
+{
+ ALOGV("%s num sources %d num sinks %d", __func__, patch->num_sources, patch->num_sinks);
sp<AudioPatch> patchDesc;
ssize_t index = mAudioPatches.indexOfKey(*handle);
@@ -4280,7 +4449,7 @@
// in config XML to reach the sink so that is can be declared as available.
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
sp<SwAudioOutputDescriptor> outputDesc = nullptr;
- if (sourceDesc != nullptr) {
+ if (!sourceDesc->isInternal()) {
// take care of dynamic routing for SwOutput selection,
audio_attributes_t attributes = sourceDesc->attributes();
audio_stream_type_t stream = sourceDesc->stream();
@@ -4308,44 +4477,49 @@
return INVALID_OPERATION;
}
sourceDesc->setSwOutput(outputDesc);
+ } else {
+ // Same for "raw patches" aka created from createAudioPatch API
+ SortedVector<audio_io_handle_t> outputs =
+ getOutputsForDevices(DeviceVector(sinkDevice), mOutputs);
+ // if the sink device is reachable via an opened output stream, request to
+ // go via this output stream by adding a second source to the patch
+ // description
+ output = selectOutput(outputs);
+ if (output == AUDIO_IO_HANDLE_NONE) {
+ ALOGE("%s no output available for internal patch sink", __func__);
+ return INVALID_OPERATION;
+ }
+ outputDesc = mOutputs.valueFor(output);
+ if (outputDesc->isDuplicated()) {
+ ALOGV("%s output for device %s is duplicated",
+ __func__, sinkDevice->toString().c_str());
+ return INVALID_OPERATION;
+ }
+ sourceDesc->setSwOutput(outputDesc);
}
// create a software bridge in PatchPanel if:
// - source and sink devices are on different HW modules OR
// - audio HAL version is < 3.0
// - audio HAL version is >= 3.0 but no route has been declared between devices
- // - called from startAudioSource (aka sourceDesc != nullptr) and source device does
- // not have a gain controller
+ // - called from startAudioSource (aka sourceDesc is not internal) and source device
+ // does not have a gain controller
if (!srcDevice->hasSameHwModuleAs(sinkDevice) ||
(srcDevice->getModuleVersionMajor() < 3) ||
!srcDevice->getModule()->supportsPatch(srcDevice, sinkDevice) ||
- (sourceDesc != nullptr &&
+ (!sourceDesc->isInternal() &&
srcDevice->getAudioPort()->getGains().size() == 0)) {
// support only one sink device for now to simplify output selection logic
if (patch->num_sinks > 1) {
return INVALID_OPERATION;
}
- if (sourceDesc == nullptr) {
- SortedVector<audio_io_handle_t> outputs =
- getOutputsForDevices(DeviceVector(sinkDevice), mOutputs);
- // if the sink device is reachable via an opened output stream, request to
- // go via this output stream by adding a second source to the patch
- // description
- output = selectOutput(outputs);
- if (output != AUDIO_IO_HANDLE_NONE) {
- outputDesc = mOutputs.valueFor(output);
- if (outputDesc->isDuplicated()) {
- ALOGV("%s output for device %s is duplicated",
- __FUNCTION__, sinkDevice->toString().c_str());
- return INVALID_OPERATION;
- }
- }
- }
+ sourceDesc->setUseSwBridge();
if (outputDesc != nullptr) {
audio_port_config srcMixPortConfig = {};
outputDesc->toAudioPortConfig(&srcMixPortConfig, nullptr);
// for volume control, we may need a valid stream
- srcMixPortConfig.ext.mix.usecase.stream = sourceDesc != nullptr ?
- sourceDesc->stream() : AUDIO_STREAM_PATCH;
+ srcMixPortConfig.ext.mix.usecase.stream = !sourceDesc->isInternal() ?
+ mEngine->getStreamTypeForAttributes(sourceDesc->attributes()) :
+ AUDIO_STREAM_PATCH;
patchBuilder.addSource(srcMixPortConfig);
}
}
@@ -4368,11 +4542,9 @@
return NO_ERROR;
}
-status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle,
- uid_t uid)
+status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle, uid_t uid)
{
- ALOGV("releaseAudioPatch() patch %d", handle);
-
+ ALOGV("%s patch %d", __func__, handle);
ssize_t index = mAudioPatches.indexOfKey(handle);
if (index < 0) {
@@ -4384,11 +4556,21 @@
if (patchDesc->getUid() != mUidCached && uid != patchDesc->getUid()) {
return INVALID_OPERATION;
}
- return releaseAudioPatchInternal(handle);
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ for (size_t i = 0; i < mAudioSources.size(); i++) {
+ sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
+ if (sourceDesc != nullptr && sourceDesc->getPatchHandle() == handle) {
+ portId = sourceDesc->portId();
+ break;
+ }
+ }
+ return portId != AUDIO_PORT_HANDLE_NONE ?
+ stopAudioSource(portId) : releaseAudioPatchInternal(handle);
}
status_t AudioPolicyManager::releaseAudioPatchInternal(audio_patch_handle_t handle,
- uint32_t delayMs)
+ uint32_t delayMs,
+ const sp<SourceClientDescriptor>& sourceDesc)
{
ALOGV("%s patch %d", __func__, handle);
if (mAudioPatches.indexOfKey(handle) < 0) {
@@ -4429,26 +4611,29 @@
removeAudioPatch(patchDesc->getHandle());
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
- // SW Bridge
+ // SW or HW Bridge
+ sp<SwAudioOutputDescriptor> outputDesc = nullptr;
+ audio_patch_handle_t patchHandle = AUDIO_PATCH_HANDLE_NONE;
if (patch->num_sources > 1 && patch->sources[1].type == AUDIO_PORT_TYPE_MIX) {
- sp<SwAudioOutputDescriptor> outputDesc =
- mOutputs.getOutputFromId(patch->sources[1].id);
- if (outputDesc == NULL) {
- ALOGW("%s output not found for id %d", __func__, patch->sources[0].id);
- // releaseOutput has already called closeOuput in case of direct output
- return NO_ERROR;
- }
- if (patchDesc->getHandle() != outputDesc->getPatchHandle()) {
- // force SwOutput patch removal as AF counter part patch has already gone.
- ALOGV("%s reset patch handle on Output as different from SWBridge", __func__);
- removeAudioPatch(outputDesc->getPatchHandle());
- }
- outputDesc->setPatchHandle(AUDIO_PATCH_HANDLE_NONE);
+ outputDesc = mOutputs.getOutputFromId(patch->sources[1].id);
+ } else if (patch->num_sources == 1 && sourceDesc != nullptr) {
+ outputDesc = sourceDesc->swOutput().promote();
+ }
+ if (outputDesc == nullptr) {
+ ALOGW("%s no output for id %d", __func__, patch->sources[0].id);
+ // releaseOutput has already called closeOutput in case of direct output
+ return NO_ERROR;
+ }
+ if (!outputDesc->isActive() && !sourceDesc->useSwBridge()) {
+ resetOutputDevice(outputDesc);
+ } else {
+ // Reuse patch handle if still valid / do not force rerouting if still routed
+ patchHandle = outputDesc->getPatchHandle();
setOutputDevices(outputDesc,
getNewOutputDevices(outputDesc, true /*fromCache*/),
- true, /*force*/
+ patchHandle == AUDIO_PATCH_HANDLE_NONE, /*force*/
0,
- NULL);
+ patchHandle == AUDIO_PATCH_HANDLE_NONE ? nullptr : &patchHandle);
}
} else {
return BAD_VALUE;
@@ -4694,6 +4879,18 @@
return status;
}
+sp<SourceClientDescriptor> AudioPolicyManager::startAudioSourceInternal(
+ const struct audio_port_config *source, const audio_attributes_t *attributes, uid_t uid)
+{
+ ALOGV("%s", __FUNCTION__);
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+
+ status_t status = startAudioSource(source, attributes, &portId, uid);
+ ALOGE_IF(status != OK, "%s: failed to start audio source (%d)", __func__, status);
+ return mAudioSources.valueFor(portId);
+}
+
+
status_t AudioPolicyManager::connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc)
{
ALOGV("%s handle %d", __FUNCTION__, sourceDesc->portId());
@@ -4718,52 +4915,9 @@
PatchBuilder patchBuilder;
patchBuilder.addSink(sinkDevice).addSource(srcDevice);
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
- status_t status =
- createAudioPatchInternal(patchBuilder.patch(), &handle, mUidCached, 0, sourceDesc);
- if (status != NO_ERROR || mAudioPatches.indexOfKey(handle) < 0) {
- ALOGW("%s patch panel could not connect device patch, error %d", __func__, status);
- return INVALID_OPERATION;
- }
- sourceDesc->connect(handle, sinkDevice);
- // SW Bridge? (@todo: HW bridge, keep track of HwOutput for device selection "reconsideration")
- sp<SwAudioOutputDescriptor> swOutput = sourceDesc->swOutput().promote();
- if (swOutput != 0) {
- status = swOutput->start();
- if (status != NO_ERROR) {
- goto FailureSourceAdded;
- }
- if (swOutput->getClient(sourceDesc->portId()) != nullptr) {
- ALOGW("%s source portId has already been attached to outputDesc", __func__);
- goto FailureReleasePatch;
- }
- swOutput->addClient(sourceDesc);
- uint32_t delayMs = 0;
- status = startSource(swOutput, sourceDesc, &delayMs);
- if (status != NO_ERROR) {
- ALOGW("%s failed to start source, error %d", __FUNCTION__, status);
- goto FailureSourceActive;
- }
- if (delayMs != 0) {
- usleep(delayMs * 1000);
- }
- } else {
- sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->hwOutput().promote();
- if (hwOutputDesc != 0) {
- // create Hwoutput and add to mHwOutputs
- } else {
- ALOGW("%s source has neither SW nor HW output", __FUNCTION__);
- }
- }
- return NO_ERROR;
-FailureSourceActive:
- swOutput->stop();
- releaseOutput(sourceDesc->portId());
-FailureSourceAdded:
- sourceDesc->setSwOutput(nullptr);
-FailureReleasePatch:
- releaseAudioPatchInternal(handle);
- return INVALID_OPERATION;
+ return connectAudioSourceToSink(
+ sourceDesc, sinkDevice, patchBuilder.patch(), handle, mUidCached, 0 /*delayMs*/);
}
status_t AudioPolicyManager::stopAudioSource(audio_port_handle_t portId)
@@ -5089,7 +5243,7 @@
ALOGW("%s source has neither SW nor HW output", __FUNCTION__);
}
}
- status_t status = releaseAudioPatchInternal(sourceDesc->getPatchHandle());
+ status_t status = releaseAudioPatchInternal(sourceDesc->getPatchHandle(), 0, sourceDesc);
sourceDesc->disconnect();
return status;
}
@@ -6061,7 +6215,7 @@
sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
if (sourceDesc != nullptr && followsSameRouting(attr, sourceDesc->attributes())
&& sourceDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE
- && !isCallRxAudioSource(sourceDesc)) {
+ && !isCallRxAudioSource(sourceDesc) && !sourceDesc->isInternal()) {
connectAudioSource(sourceDesc);
}
}
@@ -6174,7 +6328,7 @@
newDevices.types());
}
sp<SourceClientDescriptor> source = getSourceForAttributesOnOutput(srcOut, attr);
- if (source != nullptr && !isCallRxAudioSource(source)) {
+ if (source != nullptr && !isCallRxAudioSource(source) && !source->isInternal()) {
connectAudioSource(source);
}
}
@@ -6188,6 +6342,12 @@
for (auto stream : mEngine->getStreamTypesForProductStrategy(psId)) {
mpClientInterface->invalidateStream(stream);
}
+ for (audio_io_handle_t srcOut : srcOutputs) {
+ sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
+ if (desc == nullptr) continue;
+
+ desc->setTracksInvalidatedStatusByStrategy(psId);
+ }
}
}
}
@@ -6430,66 +6590,71 @@
return (stream1 == stream2);
}
-DeviceTypeSet AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
- // By checking the range of stream before calling getStrategy, we avoid
- // getOutputDevicesForStream's behavior for invalid streams.
- // engine's getOutputDevicesForStream would fallback on its default behavior (most probably
- // device for music stream), but we want to return the empty set.
- if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
- return DeviceTypeSet{};
- }
- DeviceVector activeDevices;
- DeviceVector devices;
- for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_PUBLIC_CNT; ++i) {
- const audio_stream_type_t curStream{static_cast<audio_stream_type_t>(i)};
- if (!streamsMatchForvolume(stream, curStream)) {
- continue;
- }
- DeviceVector curDevices = mEngine->getOutputDevicesForStream(curStream, false/*fromCache*/);
- devices.merge(curDevices);
- for (audio_io_handle_t output : getOutputsForDevices(curDevices, mOutputs)) {
- sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- if (outputDesc->isActive(toVolumeSource(curStream, false))) {
- activeDevices.merge(outputDesc->devices());
- }
- }
- }
-
- // Favor devices selected on active streams if any to report correct device in case of
- // explicit device selection
- if (!activeDevices.isEmpty()) {
- devices = activeDevices;
- }
- /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
- and doesn't really need to.*/
- DeviceVector speakerSafeDevices = devices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER_SAFE);
- if (!speakerSafeDevices.isEmpty()) {
- devices.merge(mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
- devices.remove(speakerSafeDevices);
- }
- return devices.types();
-}
-
// TODO - consider MSD routes b/214971780
status_t AudioPolicyManager::getDevicesForAttributes(
- const audio_attributes_t &attr, AudioDeviceTypeAddrVector *devices) {
+ const audio_attributes_t &attr, AudioDeviceTypeAddrVector *devices, bool forVolume) {
if (devices == nullptr) {
return BAD_VALUE;
}
+
+ // Devices are determined in the following precedence:
+ //
+ // 1) Devices associated with a dynamic policy matching the attributes. This is often
+ // a remote submix from MIX_ROUTE_FLAG_LOOP_BACK.
+ //
+ // If no such dynamic policy then
+ // 2) Devices containing an active client using setPreferredDevice
+ // with same strategy as the attributes.
+ // (from the default Engine::getOutputDevicesForAttributes() implementation).
+ //
+ // If no corresponding active client with setPreferredDevice then
+ // 3) Devices associated with the strategy determined by the attributes
+ // (from the default Engine::getOutputDevicesForAttributes() implementation).
+ //
+ // See related getOutputForAttrInt().
+
// check dynamic policies but only for primary descriptors (secondary not used for audible
// audio routing, only used for duplication for playback capture)
sp<AudioPolicyMix> policyMix;
status_t status = mPolicyMixes.getOutputForAttr(attr, 0 /*uid unknown here*/,
- AUDIO_OUTPUT_FLAG_NONE, policyMix, nullptr);
+ AUDIO_OUTPUT_FLAG_NONE, policyMix, nullptr /* secondaryMixes */);
if (status != OK) {
return status;
}
- if (policyMix != nullptr && policyMix->getOutput() != nullptr) {
- AudioDeviceTypeAddr device(policyMix->mDeviceType, policyMix->mDeviceAddress.c_str());
- devices->push_back(device);
- return NO_ERROR;
+
+ DeviceVector curDevices;
+ if (policyMix != nullptr && policyMix->getOutput() != nullptr &&
+ // For volume control, skip LOOPBACK mixes which use AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+ // as they are unaffected by device/stream volume
+ // (per SwAudioOutputDescriptor::isFixedVolume()).
+ (!forVolume || policyMix->mDeviceType != AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
+ ) {
+ sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(
+ policyMix->mDeviceType, policyMix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
+ curDevices.add(deviceDesc);
+ } else {
+ // The default Engine::getOutputDevicesForAttributes() uses findPreferredDevice()
+ // which selects setPreferredDevice if active. This means forVolume call
+ // will take an active setPreferredDevice, if such exists.
+
+ curDevices = mEngine->getOutputDevicesForAttributes(
+ attr, nullptr /* preferredDevice */, false /* fromCache */);
}
- DeviceVector curDevices = mEngine->getOutputDevicesForAttributes(attr, nullptr, false);
+
+ if (forVolume) {
+ // We alias the device AUDIO_DEVICE_OUT_SPEAKER_SAFE to AUDIO_DEVICE_OUT_SPEAKER
+ // for single volume control in AudioService (such relationship should exist if
+ // SPEAKER_SAFE is present).
+ //
+ // (This is unrelated to a different device grouping as Volume::getDeviceCategory)
+ DeviceVector speakerSafeDevices =
+ curDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER_SAFE);
+ if (!speakerSafeDevices.isEmpty()) {
+ curDevices.merge(
+ mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
+ curDevices.remove(speakerSafeDevices);
+ }
+ }
for (const auto& device : curDevices) {
devices->push_back(device->getDeviceTypeAddr());
}
@@ -6703,6 +6868,8 @@
muteWaitMs = 0;
}
+ bool outputRouted = outputDesc->isRouted();
+
// no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
// output profile or if new device is not supported AND previous device(s) is(are) still
// available (otherwise reset device must be done on the output)
@@ -6719,8 +6886,7 @@
// AND force is not specified
// AND the output is connected by a valid audio patch.
// Doing this check here allows the caller to call setOutputDevices() without conditions
- if ((filteredDevices.isEmpty() || filteredDevices == prevDevices) &&
- !force && outputDesc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) {
+ if ((filteredDevices.isEmpty() || filteredDevices == prevDevices) && !force && outputRouted) {
ALOGV("%s setting same device %s or null device, force=%d, patch handle=%d", __func__,
filteredDevices.toString().c_str(), force, outputDesc->getPatchHandle());
if (requiresVolumeCheck && !filteredDevices.isEmpty()) {
@@ -6760,6 +6926,9 @@
audio_patch_handle_t *patchHandle)
{
ssize_t index;
+ if (patchHandle == nullptr && !outputDesc->isRouted()) {
+ return INVALID_OPERATION;
+ }
if (patchHandle) {
index = mAudioPatches.indexOfKey(*patchHandle);
} else {
@@ -7504,7 +7673,10 @@
routedDevices.add(device);
}
for (const auto& client : activeClients) {
- // TODO: b/175343099 only travel the valid client
+ if (client->isInvalid()) {
+ // No need to take care about invalidated clients.
+ continue;
+ }
sp<DeviceDescriptor> preferredDevice =
mAvailableOutputDevices.getDeviceFromId(client->preferredDeviceId());
if (mEngine->getOutputDevicesForAttributes(
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index daa4faf..68ae8cb 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -190,12 +190,37 @@
return mEngine->getProductStrategyForAttributes(attributes);
}
- // return the enabled output devices for the given stream type
- virtual DeviceTypeSet getDevicesForStream(audio_stream_type_t stream);
-
+ /**
+ * Returns a vector of devices associated with attributes.
+ *
+ * An AudioTrack opened with specified attributes should play on the returned devices.
+ * If forVolume is set to true, the caller is AudioService, determining the proper
+ * device volume to adjust.
+ *
+ * Devices are determined in the following precedence:
+ * 1) Devices associated with a dynamic policy matching the attributes. This is often
+ * a remote submix from MIX_ROUTE_FLAG_LOOP_BACK. Secondary mixes from a
+ * dynamic policy are not included.
+ *
+ * If no such dynamic policy then
+ * 2) Devices containing an active client using setPreferredDevice
+ * with same strategy as the attributes.
+ * (from the default Engine::getOutputDevicesForAttributes() implementation).
+ *
+ * If no corresponding active client with setPreferredDevice then
+ * 3) Devices associated with the strategy determined by the attributes
+ * (from the default Engine::getOutputDevicesForAttributes() implementation).
+ *
+ * @param attributes to be considered
+ * @param devices an AudioDeviceTypeAddrVector container passed in that
+ * will be filled on success.
+ * @param forVolume true if the devices are to be associated with current device volume.
+ * @return NO_ERROR on success.
+ */
virtual status_t getDevicesForAttributes(
const audio_attributes_t &attributes,
- AudioDeviceTypeAddrVector *devices);
+ AudioDeviceTypeAddrVector *devices,
+ bool forVolume);
virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc = NULL);
virtual status_t registerEffect(const effect_descriptor_t *desc,
@@ -238,10 +263,7 @@
virtual status_t getAudioPort(struct audio_port_v7 *port);
virtual status_t createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
- uid_t uid) {
- return createAudioPatchInternal(patch, handle, uid);
- }
-
+ uid_t uid);
virtual status_t releaseAudioPatch(audio_patch_handle_t handle,
uid_t uid);
virtual status_t listAudioPatches(unsigned int *num_patches,
@@ -613,13 +635,22 @@
void updateCallAndOutputRouting(bool forceVolumeReeval = true, uint32_t delayMs = 0);
bool isCallRxAudioSource(const sp<SourceClientDescriptor> &source) {
- return mCallRxSourceClientPort != AUDIO_PORT_HANDLE_NONE
- && source == mAudioSources.valueFor(mCallRxSourceClientPort);
+ return mCallRxSourceClient != nullptr && source == mCallRxSourceClient;
}
void connectTelephonyRxAudioSource();
- void disconnectTelephonyRxAudioSource();
+ void disconnectTelephonyAudioSource(sp<SourceClientDescriptor> &clientDesc);
+
+ void connectTelephonyTxAudioSource(const sp<DeviceDescriptor> &srcdevice,
+ const sp<DeviceDescriptor> &sinkDevice,
+ uint32_t delayMs);
+
+ bool isTelephonyRxOrTx(const sp<SwAudioOutputDescriptor>& desc) const {
+ return (mCallRxSourceClient != nullptr && mCallRxSourceClient->belongsToOutput(desc))
+ || (mCallTxSourceClient != nullptr
+ && mCallTxSourceClient->belongsToOutput(desc));
+ }
/**
* @brief updates routing for all inputs.
@@ -737,6 +768,15 @@
audio_channel_mask_t channelMask,
audio_output_flags_t flags,
bool directOnly);
+ /**
+ * Same as getProfileForOutput, but it looks for an MSD profile
+ */
+ sp<IOProfile> getMsdProfileForOutput(const DeviceVector &devices,
+ uint32_t samplingRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_output_flags_t flags,
+ bool directOnly);
audio_io_handle_t selectOutputForMusicEffects();
@@ -826,6 +866,12 @@
status_t connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
status_t disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
+ status_t connectAudioSourceToSink(const sp<SourceClientDescriptor>& sourceDesc,
+ const sp<DeviceDescriptor> &sinkDevice,
+ const struct audio_patch *patch,
+ audio_patch_handle_t &handle,
+ uid_t uid, uint32_t delayMs);
+
sp<SourceClientDescriptor> getSourceForAttributesOnOutput(audio_io_handle_t output,
const audio_attributes_t &attr);
void clearAudioSourcesForOutput(audio_io_handle_t output);
@@ -876,8 +922,6 @@
SoundTriggerSessionCollection mSoundTriggerSessions;
- sp<AudioPatch> mCallTxPatch;
-
HwAudioOutputCollection mHwOutputs;
SourceClientCollection mAudioSources;
@@ -918,7 +962,8 @@
// The port handle of the hardware audio source created internally for the Call RX audio
// end point.
- audio_port_handle_t mCallRxSourceClientPort = AUDIO_PORT_HANDLE_NONE;
+ sp<SourceClientDescriptor> mCallRxSourceClient;
+ sp<SourceClientDescriptor> mCallTxSourceClient;
// Support for Multi-Stream Decoder (MSD) module
sp<DeviceDescriptor> getMsdAudioInDevice() const;
@@ -950,7 +995,13 @@
// Called by setDeviceConnectionState()
status_t deviceToAudioPort(audio_devices_t deviceType, const char* device_address,
const char* device_name, media::AudioPort* aidPort);
+ bool isMsdPatch(const audio_patch_handle_t &handle) const;
+
private:
+ sp<SourceClientDescriptor> startAudioSourceInternal(
+ const struct audio_port_config *source, const audio_attributes_t *attributes,
+ uid_t uid);
+
void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
// Add or remove AC3 DTS encodings based on user preferences.
@@ -1095,21 +1146,25 @@
* @param[out] handle patch handle to be provided if patch installed correctly
* @param[in] uid of the client
* @param[in] delayMs if required
- * @param[in] sourceDesc [optional] in case of external source, source client to be
- * configured by the patch, i.e. assigning an Output (HW or SW)
+ * @param[in] sourceDesc source client to be configured when creating the patch, i.e.
+ * assigning an Output (HW or SW) used for volume control.
* @return NO_ERROR if patch installed correctly, error code otherwise.
*/
status_t createAudioPatchInternal(const struct audio_patch *patch,
audio_patch_handle_t *handle,
- uid_t uid, uint32_t delayMs = 0,
- const sp<SourceClientDescriptor>& sourceDesc = nullptr);
+ uid_t uid, uint32_t delayMs,
+ const sp<SourceClientDescriptor>& sourceDesc);
/**
* @brief releaseAudioPatchInternal internal function to remove an audio patch
* @param[in] handle of the patch to be removed
* @param[in] delayMs if required
+ * @param[in] sourceDesc [optional] in case of external source, source client to be
+ * unrouted from the patch, i.e. assigning an Output (HW or SW)
* @return NO_ERROR if patch removed correctly, error code otherwise.
*/
- status_t releaseAudioPatchInternal(audio_patch_handle_t handle, uint32_t delayMs = 0);
+ status_t releaseAudioPatchInternal(audio_patch_handle_t handle,
+ uint32_t delayMs = 0,
+ const sp<SourceClientDescriptor>& sourceDesc = nullptr);
status_t installPatch(const char *caller,
audio_patch_handle_t *patchHandle,
@@ -1155,6 +1210,21 @@
// without duplicating them if already present
void addPortProfilesToVector(sp<IOProfile> outputProfile,
AudioProfileVector& audioProfilesVector);
+
+ // Searches for a compatible profile with the sample rate, audio format and channel mask
+ // in the list of passed HwModule(s).
+ // returns a compatible profile if found, nullptr otherwise
+ sp<IOProfile> searchCompatibleProfileHwModules (
+ const HwModuleCollection& hwModules,
+ const DeviceVector& devices,
+ uint32_t samplingRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_output_flags_t flags,
+ bool directOnly);
+
+ // Filters only the relevant flags for getProfileForOutput
+ audio_output_flags_t getRelevantFlags (audio_output_flags_t flags, bool directOnly);
};
};
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 391d60a..ae4d174 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -485,6 +485,7 @@
status_t status = mAudioPolicyManager->startOutput(portId);
if (status == NO_ERROR) {
client->active = true;
+ onUpdateActiveSpatializerTracks_l();
}
return binderStatusFromStatusT(status);
}
@@ -522,6 +523,7 @@
status_t status = mAudioPolicyManager->stopOutput(portId);
if (status == NO_ERROR) {
client->active = false;
+ onUpdateActiveSpatializerTracks_l();
}
return status;
}
@@ -552,8 +554,10 @@
client->io, client->stream, client->session);
}
Mutex::Autolock _l(mLock);
+ if (client != nullptr && client->active) {
+ onUpdateActiveSpatializerTracks_l();
+ }
mAudioPlaybackClients.removeItem(portId);
-
// called from internal thread: no need to clear caller identity
mAudioPolicyManager->releaseOutput(portId);
}
@@ -1159,31 +1163,8 @@
return Status::ok();
}
-//audio policy: use audio_device_t appropriately
-
-Status AudioPolicyService::getDevicesForStream(
- AudioStreamType streamAidl,
- std::vector<AudioDeviceDescription>* _aidl_return) {
- audio_stream_type_t stream = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioStreamType_audio_stream_type_t(streamAidl));
-
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- *_aidl_return = std::vector<AudioDeviceDescription>{};
- return Status::ok();
- }
- if (mAudioPolicyManager == NULL) {
- return binderStatusFromStatusT(NO_INIT);
- }
- Mutex::Autolock _l(mLock);
- AutoCallerClear acc;
- *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
- convertContainer<std::vector<AudioDeviceDescription>>(
- mAudioPolicyManager->getDevicesForStream(stream),
- legacy2aidl_audio_devices_t_AudioDeviceDescription));
- return Status::ok();
-}
-
Status AudioPolicyService::getDevicesForAttributes(const media::AudioAttributesEx& attrAidl,
+ bool forVolume,
std::vector<AudioDevice>* _aidl_return)
{
AudioAttributes aa = VALUE_OR_RETURN_BINDER_STATUS(
@@ -1196,7 +1177,8 @@
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
- mAudioPolicyManager->getDevicesForAttributes(aa.getAttributes(), &devices)));
+ mAudioPolicyManager->getDevicesForAttributes(
+ aa.getAttributes(), &devices, forVolume)));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<std::vector<AudioDevice>>(devices,
legacy2aidl_AudioDeviceTypeAddress));
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 38b58d5..bdd86b1 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -39,6 +39,7 @@
#include <media/AidlConversion.h>
#include <media/AudioEffect.h>
#include <media/AudioParameter.h>
+#include <mediautils/MethodStatistics.h>
#include <mediautils/ServiceUtilities.h>
#include <mediautils/TimeCheck.h>
#include <sensorprivacy/SensorPrivacyManager.h>
@@ -60,6 +61,120 @@
static const String16 sManageAudioPolicyPermission("android.permission.MANAGE_AUDIO_POLICY");
+// Creates an association between Binder code to name for IAudioPolicyService.
+#define IAUDIOPOLICYSERVICE_BINDER_METHOD_MACRO_LIST \
+BINDER_METHOD_ENTRY(onNewAudioModulesAvailable) \
+BINDER_METHOD_ENTRY(setDeviceConnectionState) \
+BINDER_METHOD_ENTRY(getDeviceConnectionState) \
+BINDER_METHOD_ENTRY(handleDeviceConfigChange) \
+BINDER_METHOD_ENTRY(setPhoneState) \
+BINDER_METHOD_ENTRY(setForceUse) \
+BINDER_METHOD_ENTRY(getForceUse) \
+BINDER_METHOD_ENTRY(getOutput) \
+BINDER_METHOD_ENTRY(getOutputForAttr) \
+BINDER_METHOD_ENTRY(startOutput) \
+BINDER_METHOD_ENTRY(stopOutput) \
+BINDER_METHOD_ENTRY(releaseOutput) \
+BINDER_METHOD_ENTRY(getInputForAttr) \
+BINDER_METHOD_ENTRY(startInput) \
+BINDER_METHOD_ENTRY(stopInput) \
+BINDER_METHOD_ENTRY(releaseInput) \
+BINDER_METHOD_ENTRY(initStreamVolume) \
+BINDER_METHOD_ENTRY(setStreamVolumeIndex) \
+BINDER_METHOD_ENTRY(getStreamVolumeIndex) \
+BINDER_METHOD_ENTRY(setVolumeIndexForAttributes) \
+BINDER_METHOD_ENTRY(getVolumeIndexForAttributes) \
+BINDER_METHOD_ENTRY(getMaxVolumeIndexForAttributes) \
+BINDER_METHOD_ENTRY(getMinVolumeIndexForAttributes) \
+BINDER_METHOD_ENTRY(getStrategyForStream) \
+BINDER_METHOD_ENTRY(getDevicesForAttributes) \
+BINDER_METHOD_ENTRY(getOutputForEffect) \
+BINDER_METHOD_ENTRY(registerEffect) \
+BINDER_METHOD_ENTRY(unregisterEffect) \
+BINDER_METHOD_ENTRY(setEffectEnabled) \
+BINDER_METHOD_ENTRY(moveEffectsToIo) \
+BINDER_METHOD_ENTRY(isStreamActive) \
+BINDER_METHOD_ENTRY(isStreamActiveRemotely) \
+BINDER_METHOD_ENTRY(isSourceActive) \
+BINDER_METHOD_ENTRY(queryDefaultPreProcessing) \
+BINDER_METHOD_ENTRY(addSourceDefaultEffect) \
+BINDER_METHOD_ENTRY(addStreamDefaultEffect) \
+BINDER_METHOD_ENTRY(removeSourceDefaultEffect) \
+BINDER_METHOD_ENTRY(removeStreamDefaultEffect) \
+BINDER_METHOD_ENTRY(setSupportedSystemUsages) \
+BINDER_METHOD_ENTRY(setAllowedCapturePolicy) \
+BINDER_METHOD_ENTRY(getOffloadSupport) \
+BINDER_METHOD_ENTRY(isDirectOutputSupported) \
+BINDER_METHOD_ENTRY(listAudioPorts) \
+BINDER_METHOD_ENTRY(getAudioPort) \
+BINDER_METHOD_ENTRY(createAudioPatch) \
+BINDER_METHOD_ENTRY(releaseAudioPatch) \
+BINDER_METHOD_ENTRY(listAudioPatches) \
+BINDER_METHOD_ENTRY(setAudioPortConfig) \
+BINDER_METHOD_ENTRY(registerClient) \
+BINDER_METHOD_ENTRY(setAudioPortCallbacksEnabled) \
+BINDER_METHOD_ENTRY(setAudioVolumeGroupCallbacksEnabled) \
+BINDER_METHOD_ENTRY(acquireSoundTriggerSession) \
+BINDER_METHOD_ENTRY(releaseSoundTriggerSession) \
+BINDER_METHOD_ENTRY(getPhoneState) \
+BINDER_METHOD_ENTRY(registerPolicyMixes) \
+BINDER_METHOD_ENTRY(setUidDeviceAffinities) \
+BINDER_METHOD_ENTRY(removeUidDeviceAffinities) \
+BINDER_METHOD_ENTRY(setUserIdDeviceAffinities) \
+BINDER_METHOD_ENTRY(removeUserIdDeviceAffinities) \
+BINDER_METHOD_ENTRY(startAudioSource) \
+BINDER_METHOD_ENTRY(stopAudioSource) \
+BINDER_METHOD_ENTRY(setMasterMono) \
+BINDER_METHOD_ENTRY(getMasterMono) \
+BINDER_METHOD_ENTRY(getStreamVolumeDB) \
+BINDER_METHOD_ENTRY(getSurroundFormats) \
+BINDER_METHOD_ENTRY(getReportedSurroundFormats) \
+BINDER_METHOD_ENTRY(getHwOffloadFormatsSupportedForBluetoothMedia) \
+BINDER_METHOD_ENTRY(setSurroundFormatEnabled) \
+BINDER_METHOD_ENTRY(setAssistantServicesUids) \
+BINDER_METHOD_ENTRY(setActiveAssistantServicesUids) \
+BINDER_METHOD_ENTRY(setA11yServicesUids) \
+BINDER_METHOD_ENTRY(setCurrentImeUid) \
+BINDER_METHOD_ENTRY(isHapticPlaybackSupported) \
+BINDER_METHOD_ENTRY(isUltrasoundSupported) \
+BINDER_METHOD_ENTRY(listAudioProductStrategies) \
+BINDER_METHOD_ENTRY(getProductStrategyFromAudioAttributes) \
+BINDER_METHOD_ENTRY(listAudioVolumeGroups) \
+BINDER_METHOD_ENTRY(getVolumeGroupFromAudioAttributes) \
+BINDER_METHOD_ENTRY(setRttEnabled) \
+BINDER_METHOD_ENTRY(isCallScreenModeSupported) \
+BINDER_METHOD_ENTRY(setDevicesRoleForStrategy) \
+BINDER_METHOD_ENTRY(removeDevicesRoleForStrategy) \
+BINDER_METHOD_ENTRY(getDevicesForRoleAndStrategy) \
+BINDER_METHOD_ENTRY(setDevicesRoleForCapturePreset) \
+BINDER_METHOD_ENTRY(addDevicesRoleForCapturePreset) \
+BINDER_METHOD_ENTRY(removeDevicesRoleForCapturePreset) \
+BINDER_METHOD_ENTRY(clearDevicesRoleForCapturePreset) \
+BINDER_METHOD_ENTRY(getDevicesForRoleAndCapturePreset) \
+BINDER_METHOD_ENTRY(registerSoundTriggerCaptureStateListener) \
+BINDER_METHOD_ENTRY(getSpatializer) \
+BINDER_METHOD_ENTRY(canBeSpatialized) \
+BINDER_METHOD_ENTRY(getDirectPlaybackSupport) \
+BINDER_METHOD_ENTRY(getDirectProfilesForAttributes) \
+
+// singleton for Binder Method Statistics for IAudioPolicyService
+static auto& getIAudioPolicyServiceStatistics() {
+ using Code = int;
+
+#pragma push_macro("BINDER_METHOD_ENTRY")
+#undef BINDER_METHOD_ENTRY
+#define BINDER_METHOD_ENTRY(ENTRY) \
+ {(Code)media::BnAudioPolicyService::TRANSACTION_##ENTRY, #ENTRY},
+
+ static mediautils::MethodStatistics<Code> methodStatistics{
+ IAUDIOPOLICYSERVICE_BINDER_METHOD_MACRO_LIST
+ METHOD_STATISTICS_BINDER_CODE_NAMES(Code)
+ };
+#pragma pop_macro("BINDER_METHOD_ENTRY")
+
+ return methodStatistics;
+}
+
// ----------------------------------------------------------------------------
static AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface)
@@ -114,6 +229,13 @@
void AudioPolicyService::onFirstRef()
{
+ // Log an AudioPolicy "constructor" mediametrics event on first ref.
+ // This records the time it takes to load the audio modules and devices.
+ mediametrics::Defer defer([beginNs = systemTime()] {
+ mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_POLICY)
+ .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CTOR)
+ .set(AMEDIAMETRICS_PROP_EXECUTIONTIMENS, (int64_t)(systemTime() - beginNs))
+ .record(); });
{
Mutex::Autolock _l(mLock);
@@ -397,6 +519,7 @@
if (status == NO_ERROR && currentOutput == newOutput) {
return;
}
+ size_t numActiveTracks = countActiveClientsOnOutput_l(newOutput);
mLock.unlock();
// It is OK to call detachOutput() is none is already attached.
mSpatializer->detachOutput();
@@ -404,7 +527,7 @@
mLock.lock();
return;
}
- status = mSpatializer->attachOutput(newOutput);
+ status = mSpatializer->attachOutput(newOutput, numActiveTracks);
mLock.lock();
if (status != NO_ERROR) {
mAudioPolicyManager->releaseSpatializerOutput(newOutput);
@@ -421,6 +544,34 @@
}
}
+size_t AudioPolicyService::countActiveClientsOnOutput_l(audio_io_handle_t output) REQUIRES(mLock) {
+ size_t count = 0;
+ for (size_t i = 0; i < mAudioPlaybackClients.size(); i++) {
+ auto client = mAudioPlaybackClients.valueAt(i);
+ if (client->io == output && client->active) {
+ count++;
+ }
+ }
+ return count;
+}
+
+void AudioPolicyService::onUpdateActiveSpatializerTracks_l() {
+ if (mSpatializer == nullptr) {
+ return;
+ }
+ mOutputCommandThread->updateActiveSpatializerTracksCommand();
+}
+
+void AudioPolicyService::doOnUpdateActiveSpatializerTracks()
+{
+ Mutex::Autolock _l(mLock);
+ if (mSpatializer == nullptr) {
+ return;
+ }
+ mSpatializer->updateActiveTracks(countActiveClientsOnOutput_l(mSpatializer->getOutput()));
+}
+
+
status_t AudioPolicyService::clientCreateAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
int delayMs)
@@ -1032,6 +1183,12 @@
mPackageManager.dump(fd);
dumpReleaseLock(mLock, locked);
+
+ {
+ std::string timeCheckStats = getIAudioPolicyServiceStatistics().dump();
+ dprintf(fd, "\nIAudioPolicyService binder call profile\n");
+ write(fd, timeCheckStats.c_str(), timeCheckStats.size());
+ }
}
return NO_ERROR;
}
@@ -1092,7 +1249,6 @@
case TRANSACTION_isStreamActive:
case TRANSACTION_isStreamActiveRemotely:
case TRANSACTION_isSourceActive:
- case TRANSACTION_getDevicesForStream:
case TRANSACTION_registerPolicyMixes:
case TRANSACTION_setMasterMono:
case TRANSACTION_getSurroundFormats:
@@ -1138,8 +1294,20 @@
break;
}
- std::string tag("IAudioPolicyService command " + std::to_string(code));
- TimeCheck check(tag.c_str());
+ const std::string methodName = getIAudioPolicyServiceStatistics().getMethodForCode(code);
+ mediautils::TimeCheck check(
+ std::string("IAudioPolicyService::").append(methodName),
+ [code, methodName](bool timeout, float elapsedMs) { // don't move methodName.
+ if (timeout) {
+ mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_POLICY)
+ .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_TIMEOUT)
+ .set(AMEDIAMETRICS_PROP_METHODCODE, int64_t(code))
+ .set(AMEDIAMETRICS_PROP_METHODNAME, methodName.c_str())
+ .record();
+ } else {
+ getIAudioPolicyServiceStatistics().event(code, elapsedMs);
+ }
+ });
switch (code) {
case SHELL_COMMAND_TRANSACTION: {
@@ -1953,8 +2121,8 @@
mLock.lock();
} break;
- case CHECK_SPATIALIZER: {
- ALOGV("AudioCommandThread() processing updateUID states");
+ case CHECK_SPATIALIZER_OUTPUT: {
+ ALOGV("AudioCommandThread() processing check spatializer");
svc = mService.promote();
if (svc == 0) {
break;
@@ -1964,6 +2132,17 @@
mLock.lock();
} break;
+ case UPDATE_ACTIVE_SPATIALIZER_TRACKS: {
+ ALOGV("AudioCommandThread() processing update spatializer tracks");
+ svc = mService.promote();
+ if (svc == 0) {
+ break;
+ }
+ mLock.unlock();
+ svc->doOnUpdateActiveSpatializerTracks();
+ mLock.lock();
+ } break;
+
default:
ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
}
@@ -2274,11 +2453,19 @@
void AudioPolicyService::AudioCommandThread::checkSpatializerCommand()
{
sp<AudioCommand>command = new AudioCommand();
- command->mCommand = CHECK_SPATIALIZER;
+ command->mCommand = CHECK_SPATIALIZER_OUTPUT;
ALOGV("AudioCommandThread() adding check spatializer");
sendCommand(command);
}
+void AudioPolicyService::AudioCommandThread::updateActiveSpatializerTracksCommand()
+{
+ sp<AudioCommand>command = new AudioCommand();
+ command->mCommand = UPDATE_ACTIVE_SPATIALIZER_TRACKS;
+ ALOGV("AudioCommandThread() adding update active spatializer tracks");
+ sendCommand(command);
+}
+
status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
{
{
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 0a01b7b..43b579f 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -134,10 +134,8 @@
int32_t* _aidl_return) override;
binder::Status getStrategyForStream(AudioStreamType stream,
int32_t* _aidl_return) override;
- binder::Status getDevicesForStream(
- AudioStreamType stream,
- std::vector<AudioDeviceDescription>* _aidl_return) override;
binder::Status getDevicesForAttributes(const media::AudioAttributesEx& attr,
+ bool forVolume,
std::vector<AudioDevice>* _aidl_return) override;
binder::Status getOutputForEffect(const media::EffectDescriptor& desc,
int32_t* _aidl_return) override;
@@ -352,9 +350,13 @@
* by audio policy manager and attach/detach the spatializer effect accordingly.
*/
void onCheckSpatializer() override;
- void onCheckSpatializer_l();
+ void onCheckSpatializer_l() REQUIRES(mLock);
void doOnCheckSpatializer();
+ void onUpdateActiveSpatializerTracks_l() REQUIRES(mLock);
+ void doOnUpdateActiveSpatializerTracks();
+
+
void setEffectSuspended(int effectId,
audio_session_t sessionId,
bool suspended);
@@ -526,7 +528,8 @@
AUDIO_MODULES_UPDATE,
ROUTING_UPDATED,
UPDATE_UID_STATES,
- CHECK_SPATIALIZER
+ CHECK_SPATIALIZER_OUTPUT, // verify if spatializer effect should be created or moved
+ UPDATE_ACTIVE_SPATIALIZER_TRACKS // Update active track counts on spalializer output
};
AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
@@ -576,6 +579,8 @@
void routingChangedCommand();
void updateUidStatesCommand();
void checkSpatializerCommand();
+ void updateActiveSpatializerTracksCommand();
+
void insertCommand_l(AudioCommand *command, int delayMs = 0);
private:
class AudioCommandData;
@@ -1000,6 +1005,8 @@
void loadAudioPolicyManager();
void unloadAudioPolicyManager();
+ size_t countActiveClientsOnOutput_l(audio_io_handle_t output) REQUIRES(mLock);
+
mutable Mutex mLock; // prevents concurrent access to AudioPolicy manager functions changing
// device connection state or routing
// Note: lock acquisition order is always mLock > mEffectsLock:
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 54d9094..579b852 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -300,6 +300,7 @@
if (levelChanged && mEngine != nullptr) {
setEffectParameter_l(SPATIALIZER_PARAM_LEVEL, std::vector<SpatializationLevel>{level});
}
+ checkSensorsState_l();
}
if (levelChanged) {
@@ -374,6 +375,7 @@
if (mPoseController != nullptr) {
mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+ checkSensorsState_l();
}
return Status::ok();
@@ -447,9 +449,7 @@
}
std::lock_guard lock(mLock);
mHeadSensor = sensorHandle;
- if (mPoseController != nullptr) {
- mPoseController->setHeadSensor(mHeadSensor);
- }
+ checkSensorsState_l();
return Status::ok();
}
@@ -460,9 +460,7 @@
}
std::lock_guard lock(mLock);
mScreenSensor = sensorHandle;
- if (mPoseController != nullptr) {
- mPoseController->setScreenSensor(mScreenSensor);
- }
+ checkSensorsState_l();
return Status::ok();
}
@@ -557,7 +555,6 @@
auto vec = headToStage.toVector();
LOG_ALWAYS_FATAL_IF(vec.size() != sHeadPoseKeys.size(),
"%s invalid head to stage vector size %zu", __func__, vec.size());
-
sp<AMessage> msg =
new AMessage(EngineCallbackHandler::kWhatOnHeadToStagePose, mHandler);
for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
@@ -571,6 +568,9 @@
sp<media::ISpatializerHeadTrackingCallback> callback;
{
std::lock_guard lock(mLock);
+ if (mActualHeadTrackingMode == SpatializerHeadTrackingMode::DISABLED) {
+ return;
+ }
callback = mHeadTrackingCallback;
if (mEngine != nullptr) {
setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
@@ -621,7 +621,7 @@
}
}
-status_t Spatializer::attachOutput(audio_io_handle_t output) {
+status_t Spatializer::attachOutput(audio_io_handle_t output, size_t numActiveTracks) {
std::shared_ptr<SpatializerPoseController> poseController;
bool outputChanged = false;
sp<media::INativeSpatializerCallback> callback;
@@ -634,6 +634,7 @@
// remove FX instance
mEngine->setEnabled(false);
mEngine.clear();
+ mPoseController.reset();
}
// create FX instance on output
AttributionSourceState attributionSource = AttributionSourceState();
@@ -663,8 +664,8 @@
"%s could not allocate pose controller", __func__);
mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
- mPoseController->setHeadSensor(mHeadSensor);
- mPoseController->setScreenSensor(mScreenSensor);
+ mNumActiveTracks = numActiveTracks;
+ checkSensorsState_l();
mPoseController->setDisplayOrientation(mDisplayOrientation);
poseController = mPoseController;
}
@@ -697,7 +698,6 @@
output = mOutput;
mOutput = AUDIO_IO_HANDLE_NONE;
mPoseController.reset();
-
callback = mSpatializerCallback;
}
@@ -707,6 +707,26 @@
return output;
}
+void Spatializer::updateActiveTracks(size_t numActiveTracks) {
+ std::lock_guard lock(mLock);
+ mNumActiveTracks = numActiveTracks;
+ checkSensorsState_l();
+}
+
+void Spatializer::checkSensorsState_l() {
+ if (mSupportsHeadTracking && mPoseController != nullptr) {
+ if (mNumActiveTracks > 0 && mLevel != SpatializationLevel::NONE
+ && mDesiredHeadTrackingMode != HeadTrackingMode::STATIC
+ && mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
+ mPoseController->setHeadSensor(mHeadSensor);
+ mPoseController->setScreenSensor(mScreenSensor);
+ } else {
+ mPoseController->setHeadSensor(SpatializerPoseController::INVALID_SENSOR);
+ mPoseController->setScreenSensor(SpatializerPoseController::INVALID_SENSOR);
+ }
+ }
+}
+
void Spatializer::calculateHeadPose() {
ALOGV("%s", __func__);
std::lock_guard lock(mLock);
@@ -723,11 +743,11 @@
switch (event) {
case AudioEffect::EVENT_FRAMES_PROCESSED: {
int frames = info == nullptr ? 0 : *(int*)info;
- ALOGD("%s frames processed %d for me %p", __func__, frames, me);
+ ALOGV("%s frames processed %d for me %p", __func__, frames, me);
me->postFramesProcessedMsg(frames);
} break;
default:
- ALOGD("%s event %d", __func__, event);
+ ALOGV("%s event %d", __func__, event);
break;
}
}
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 4d77b78..1382124 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -135,7 +135,7 @@
/** Called by audio policy service when the special output mixer dedicated to spatialization
* is opened and the spatializer engine must be created.
*/
- status_t attachOutput(audio_io_handle_t output);
+ status_t attachOutput(audio_io_handle_t output, size_t numActiveTracks);
/** Called by audio policy service when the special output mixer dedicated to spatialization
* is closed and the spatializer engine must be release.
*/
@@ -143,6 +143,8 @@
/** Returns the output stream the spatializer is attached to. */
audio_io_handle_t getOutput() const { std::lock_guard lock(mLock); return mOutput; }
+ void updateActiveTracks(size_t numActiveTracks);
+
/** Gets the channel mask, sampling rate and format set for the spatializer input. */
audio_config_base_t getAudioInConfig() const;
@@ -274,6 +276,13 @@
void postFramesProcessedMsg(int frames);
+ /**
+ * Checks if head and screen sensors must be actively monitored based on
+ * spatializer state and playback activity and configures the pose controller
+ * accordingly.
+ */
+ void checkSensorsState_l() REQUIRES(mLock);
+
/** Effect engine descriptor */
const effect_descriptor_t mEngineDescriptor;
/** Callback interface to parent audio policy service */
@@ -328,6 +337,8 @@
sp<ALooper> mLooper;
sp<EngineCallbackHandler> mHandler;
+ size_t mNumActiveTracks GUARDED_BY(mLock) = 0;
+
static const std::vector<const char *> sHeadPoseKeys;
};
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 551f5e9..da42ab4 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -699,6 +699,158 @@
ASSERT_EQ(countDirectProfilesPrimary, getDirectProfilesForAttributes(attr).size());
}
+TEST_P(AudioPolicyManagerTestMsd, IsDirectPlaybackSupportedWithMsd) {
+ const audio_attributes_t attr = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ audio_config_base_t directConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ directConfig.format = AUDIO_FORMAT_DTS;
+ directConfig.sample_rate = 48000;
+ directConfig.channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
+
+ audio_config_base_t nonDirectConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ nonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ nonDirectConfig.sample_rate = 48000;
+ nonDirectConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ audio_config_base_t nonExistentConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ nonExistentConfig.format = AUDIO_FORMAT_E_AC3;
+ nonExistentConfig.sample_rate = 48000;
+ nonExistentConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ audio_config_base_t msdDirectConfig1 = AUDIO_CONFIG_BASE_INITIALIZER;
+ msdDirectConfig1.format = AUDIO_FORMAT_AC3;
+ msdDirectConfig1.sample_rate = 48000;
+ msdDirectConfig1.channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
+
+ audio_config_base_t msdDirectConfig2 = AUDIO_CONFIG_BASE_INITIALIZER;
+ msdDirectConfig2.format = AUDIO_FORMAT_IEC60958;
+ msdDirectConfig2.sample_rate = 48000;
+ msdDirectConfig2.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ audio_config_base_t msdNonDirectConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ msdNonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ msdNonDirectConfig.sample_rate = 96000;
+ msdNonDirectConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ ASSERT_TRUE(mManager->isDirectOutputSupported(directConfig, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(nonDirectConfig, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(nonExistentConfig, attr));
+ // before setting MSD patches the direct MSD configs return false
+ ASSERT_FALSE(mManager->isDirectOutputSupported(msdDirectConfig1, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(msdDirectConfig2, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(msdNonDirectConfig, attr));
+
+ DeviceVector outputDevices = mManager->getAvailableOutputDevices();
+ // Remove MSD output device to avoid patching to itself
+ outputDevices.remove(mMsdOutputDevice);
+ mManager->setMsdOutputPatches(&outputDevices);
+
+ ASSERT_TRUE(mManager->isDirectOutputSupported(directConfig, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(nonDirectConfig, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(nonExistentConfig, attr));
+ // after setting MSD patches the direct MSD configs return true
+ ASSERT_TRUE(mManager->isDirectOutputSupported(msdDirectConfig1, attr));
+ ASSERT_TRUE(mManager->isDirectOutputSupported(msdDirectConfig2, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(msdNonDirectConfig, attr));
+
+ mManager->releaseMsdOutputPatches(outputDevices);
+
+ ASSERT_TRUE(mManager->isDirectOutputSupported(directConfig, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(nonDirectConfig, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(nonExistentConfig, attr));
+ // AFTER releasing MSD patches the direct MSD configs return false
+ ASSERT_FALSE(mManager->isDirectOutputSupported(msdDirectConfig1, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(msdDirectConfig2, attr));
+ ASSERT_FALSE(mManager->isDirectOutputSupported(msdNonDirectConfig, attr));
+}
+
+TEST_P(AudioPolicyManagerTestMsd, GetDirectPlaybackSupportWithMsd) {
+ const audio_attributes_t attr = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ audio_config_t directConfig = AUDIO_CONFIG_INITIALIZER;
+ directConfig.format = AUDIO_FORMAT_DTS;
+ directConfig.sample_rate = 48000;
+ directConfig.channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
+
+ audio_config_t nonDirectConfig = AUDIO_CONFIG_INITIALIZER;
+ nonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ nonDirectConfig.sample_rate = 48000;
+ nonDirectConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ audio_config_t nonExistentConfig = AUDIO_CONFIG_INITIALIZER;
+ nonExistentConfig.format = AUDIO_FORMAT_E_AC3;
+ nonExistentConfig.sample_rate = 48000;
+ nonExistentConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ audio_config_t msdDirectConfig1 = AUDIO_CONFIG_INITIALIZER;
+ msdDirectConfig1.format = AUDIO_FORMAT_AC3;
+ msdDirectConfig1.sample_rate = 48000;
+ msdDirectConfig1.channel_mask = AUDIO_CHANNEL_OUT_5POINT1;
+
+ audio_config_t msdDirectConfig2 = AUDIO_CONFIG_INITIALIZER;
+ msdDirectConfig2.format = AUDIO_FORMAT_IEC60958;
+ msdDirectConfig2.sample_rate = 48000;
+ msdDirectConfig2.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ audio_config_t msdNonDirectConfig = AUDIO_CONFIG_INITIALIZER;
+ msdNonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ msdNonDirectConfig.sample_rate = 96000;
+ msdNonDirectConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+ ASSERT_EQ(AUDIO_DIRECT_BITSTREAM_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &directConfig));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &nonDirectConfig));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &nonExistentConfig));
+ // before setting MSD patches the direct MSD configs return AUDIO_DIRECT_NOT_SUPPORTED
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdDirectConfig1));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdDirectConfig2));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdNonDirectConfig));
+
+ DeviceVector outputDevices = mManager->getAvailableOutputDevices();
+ // Remove MSD output device to avoid patching to itself
+ outputDevices.remove(mMsdOutputDevice);
+ mManager->setMsdOutputPatches(&outputDevices);
+
+ ASSERT_EQ(AUDIO_DIRECT_BITSTREAM_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &directConfig));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &nonDirectConfig));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &nonExistentConfig));
+ // after setting MSD patches the direct MSD configs return values according to their flags
+ ASSERT_EQ(AUDIO_DIRECT_OFFLOAD_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdDirectConfig1));
+ ASSERT_EQ(AUDIO_DIRECT_BITSTREAM_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdDirectConfig2));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdNonDirectConfig));
+
+ mManager->releaseMsdOutputPatches(outputDevices);
+
+ ASSERT_EQ(AUDIO_DIRECT_BITSTREAM_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &directConfig));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &nonDirectConfig));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &nonExistentConfig));
+ // after releasing MSD patches the direct MSD configs return AUDIO_DIRECT_NOT_SUPPORTED
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdDirectConfig1));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdDirectConfig2));
+ ASSERT_EQ(AUDIO_DIRECT_NOT_SUPPORTED,
+ mManager->getDirectPlaybackSupport(&attr, &msdNonDirectConfig));
+}
+
class AudioPolicyManagerTestWithConfigurationFile : public AudioPolicyManagerTest {
protected:
void SetUpManagerConfig() override;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 0ba1b28..c576162 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -2139,10 +2139,14 @@
id.string());
errorCode = ERROR_CAMERA_IN_USE;
break;
+ case -EINVAL:
+ msg = String8::format("Torch strength level %d is not within the "
+ "valid range.", torchStrength);
+ errorCode = ERROR_ILLEGAL_ARGUMENT;
+ break;
default:
msg = String8::format("Changing torch strength level failed.");
errorCode = ERROR_INVALID_OPERATION;
-
}
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(errorCode, msg.string());
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index bcba80e..701206a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -864,7 +864,7 @@
bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
bool isMultiResolution = outputConfiguration.isMultiResolution();
int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
- int streamUseCase = outputConfiguration.getStreamUseCase();
+ int64_t streamUseCase = outputConfiguration.getStreamUseCase();
int timestampBase = outputConfiguration.getTimestampBase();
int mirrorMode = outputConfiguration.getMirrorMode();
@@ -1260,7 +1260,7 @@
}
const std::vector<int32_t> &sensorPixelModesUsed =
outputConfiguration.getSensorPixelModesUsed();
- int streamUseCase = outputConfiguration.getStreamUseCase();
+ int64_t streamUseCase = outputConfiguration.getStreamUseCase();
int timestampBase = outputConfiguration.getTimestampBase();
int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
int mirrorMode = outputConfiguration.getMirrorMode();
@@ -1629,7 +1629,7 @@
const std::vector<int32_t> &sensorPixelModesUsed =
outputConfiguration.getSensorPixelModesUsed();
int64_t dynamicRangeProfile = outputConfiguration.getDynamicRangeProfile();
- int streamUseCase= outputConfiguration.getStreamUseCase();
+ int64_t streamUseCase= outputConfiguration.getStreamUseCase();
int timestampBase = outputConfiguration.getTimestampBase();
int mirrorMode = outputConfiguration.getMirrorMode();
for (auto& bufferProducer : bufferProducers) {
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 10fa33f..9303fd2 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -287,7 +287,7 @@
}
}
-status_t CameraOfflineSessionClient::notifyActive() {
+status_t CameraOfflineSessionClient::notifyActive(float maxPreviewFps __unused) {
return startCameraStreamingOps();
}
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index ef1d2de..f2c42d8 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -94,7 +94,7 @@
// NotificationListener API
void notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras) override;
void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) override;
- status_t notifyActive() override;
+ status_t notifyActive(float maxPreviewFps) override;
void notifyIdle(int64_t requestCount, int64_t resultErrorCount, bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats) override;
void notifyAutoFocus(uint8_t newState, int triggerId) override;
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
index 600bd28..d32b71c 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.h
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -117,6 +117,41 @@
// Composite streams should behave accordingly.
void enableErrorState();
+ // Utility class to lock and unlock a GraphicBuffer
+ class GraphicBufferLocker {
+ public:
+ GraphicBufferLocker(sp<GraphicBuffer> buffer) : _buffer(buffer) {}
+
+ status_t lockAsync(void** dstBuffer, int fenceFd) {
+ if (_buffer == nullptr) return BAD_VALUE;
+
+ status_t res = OK;
+ if (!_locked) {
+ status_t res = _buffer->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN,
+ dstBuffer, fenceFd);
+ if (res == OK) {
+ _locked = true;
+ }
+ }
+ return res;
+ }
+
+ ~GraphicBufferLocker() {
+ if (_locked && _buffer != nullptr) {
+ auto res = _buffer->unlock();
+ if (res != OK) {
+ ALOGE("%s: Error trying to unlock buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ }
+ }
+ }
+
+ private:
+ sp<GraphicBuffer> _buffer;
+ bool _locked = false;
+ };
+
+
wp<CameraDeviceBase> mDevice;
wp<camera3::StatusTracker> mStatusTracker;
wp<hardware::camera2::ICameraDeviceCallbacks> mRemoteCallback;
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index a66a592..aa057c7 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -297,7 +297,8 @@
}
sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
- res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
+ GraphicBufferLocker gbLocker(gb);
+ res = gbLocker.lockAsync(&dstBuffer, fenceFd);
if (res != OK) {
ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
strerror(-res), res);
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index a73ffb9..6058429 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -1130,7 +1130,8 @@
// Copy the content of the file to memory.
sp<GraphicBuffer> gb = GraphicBuffer::from(inputFrame.anb);
void* dstBuffer;
- auto res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, inputFrame.fenceFd);
+ GraphicBufferLocker gbLocker(gb);
+ auto res = gbLocker.lockAsync(&dstBuffer, inputFrame.fenceFd);
if (res != OK) {
ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
strerror(-res), res);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index a29f3a6..6ed3c02 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -315,7 +315,7 @@
}
template <typename TClientBase>
-status_t Camera2ClientBase<TClientBase>::notifyActive() {
+status_t Camera2ClientBase<TClientBase>::notifyActive(float maxPreviewFps) {
if (!mDeviceActive) {
status_t res = TClientBase::startCameraStreamingOps();
if (res != OK) {
@@ -323,7 +323,7 @@
TClientBase::mCameraIdStr.string(), res);
return res;
}
- CameraServiceProxyWrapper::logActive(TClientBase::mCameraIdStr);
+ CameraServiceProxyWrapper::logActive(TClientBase::mCameraIdStr, maxPreviewFps);
}
mDeviceActive = true;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 182e6ef..6b90f5e 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -73,7 +73,8 @@
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
- virtual status_t notifyActive(); // Returns errors on app ops permission failures
+ // Returns errors on app ops permission failures
+ virtual status_t notifyActive(float maxPreviewFps);
virtual void notifyIdle(int64_t requestCount, int64_t resultErrorCount,
bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 5883988..05edd6a 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -184,7 +184,7 @@
bool isShared = false, bool isMultiResolution = false,
uint64_t consumerUsage = 0,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) = 0;
@@ -205,7 +205,7 @@
bool isShared = false, bool isMultiResolution = false,
uint64_t consumerUsage = 0,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) = 0;
diff --git a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
index 54e42a6..f39b92a 100644
--- a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
+++ b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
@@ -40,7 +40,8 @@
// Required for API 1 and 2
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras &resultExtras) = 0;
- virtual status_t notifyActive() = 0; // May return an error since it checks appops
+ // May return an error since it checks appops
+ virtual status_t notifyActive(float maxPreviewFps) = 0;
virtual void notifyIdle(int64_t requestCount, int64_t resultError, bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats) = 0;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index c337eda..4cc03f0 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -642,7 +642,7 @@
removeRef(DeviceMode::CAMERA, id);
ALOGE("%s: Transaction error opening a session for camera device %s: %s",
__FUNCTION__, id.c_str(), ret.getMessage());
- return DEAD_OBJECT;
+ return AidlProviderInfo::mapToStatusT(ret);
}
return OK;
}
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index b2a7fee..6f35e56 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2021 The Android Open Source Project
+ * Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -45,10 +45,32 @@
using ICameraProvider = aidl::android::hardware::camera::provider::ICameraProvider;
using StatusListener = CameraProviderManager::StatusListener;
+static status_t mapExceptionCodeToStatusT(binder_exception_t binderException) {
+ switch (binderException) {
+ case EX_NONE:
+ return OK;
+ case EX_ILLEGAL_ARGUMENT:
+ case EX_NULL_POINTER:
+ case EX_BAD_PARCELABLE:
+ case EX_ILLEGAL_STATE:
+ return BAD_VALUE;
+ case EX_UNSUPPORTED_OPERATION:
+ return INVALID_OPERATION;
+ case EX_TRANSACTION_FAILED:
+ return DEAD_OBJECT;
+ default:
+ return UNKNOWN_ERROR;
+ }
+}
+
status_t AidlProviderInfo::mapToStatusT(const ndk::ScopedAStatus& s) {
using Status = aidl::android::hardware::camera::common::Status;
+ auto exceptionCode = s.getExceptionCode();
+ if (exceptionCode != EX_SERVICE_SPECIFIC) {
+ return mapExceptionCodeToStatusT(exceptionCode);
+ }
Status st = static_cast<Status>(s.getServiceSpecificError());
- switch(st) {
+ switch (st) {
case Status::OK:
return OK;
case Status::ILLEGAL_ARGUMENT:
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 688b6df..aeffd24 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -978,7 +978,7 @@
const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
- uint64_t consumerUsage, int64_t dynamicRangeProfile, int streamUseCase,
+ uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
int timestampBase, int mirrorMode) {
ATRACE_CALL();
@@ -1013,8 +1013,8 @@
android_dataspace dataSpace, camera_stream_rotation_t rotation, int *id,
const String8& physicalCameraId, const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
- uint64_t consumerUsage, int64_t dynamicRangeProfile, int streamUseCase, int timestampBase,
- int mirrorMode) {
+ uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
+ int timestampBase, int mirrorMode) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
@@ -1022,7 +1022,8 @@
Mutex::Autolock l(mLock);
ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
" consumer usage %" PRIu64 ", isShared %d, physicalCameraId %s, isMultiResolution %d"
- " dynamicRangeProfile %" PRIx64 ", streamUseCase %d, timestampBase %d, mirrorMode %d",
+ " dynamicRangeProfile 0x%" PRIx64 ", streamUseCase %" PRId64 ", timestampBase %d,"
+ " mirrorMode %d",
mId.string(), mNextStreamId, width, height, format, dataSpace, rotation,
consumerUsage, isShared, physicalCameraId.string(), isMultiResolution,
dynamicRangeProfile, streamUseCase, timestampBase, mirrorMode);
@@ -1787,6 +1788,20 @@
return OK;
}
+float Camera3Device::getMaxPreviewFps(sp<camera3::Camera3OutputStreamInterface> stream) {
+ camera_metadata_entry minDurations =
+ mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+ for (size_t i = 0; i < minDurations.count; i += 4) {
+ if (minDurations.data.i64[i] == stream->getFormat()
+ && minDurations.data.i64[i+1] == stream->getWidth()
+ && minDurations.data.i64[i+2] == stream->getHeight()) {
+ int64_t minFrameDuration = minDurations.data.i64[i+3];
+ return 1e9f / minFrameDuration;
+ }
+ }
+ return 0.0f;
+}
+
/**
* Methods called by subclasses
*/
@@ -1795,6 +1810,7 @@
ATRACE_CALL();
std::vector<int> streamIds;
std::vector<hardware::CameraStreamStats> streamStats;
+ float sessionMaxPreviewFps = 0.0f;
{
// Need mLock to safely update state and synchronize to current
@@ -1814,21 +1830,25 @@
// state changes
if (mPauseStateNotify) return;
- // Populate stream statistics in case of Idle
- if (idle) {
- for (size_t i = 0; i < mOutputStreams.size(); i++) {
- auto stream = mOutputStreams[i];
- if (stream.get() == nullptr) continue;
+ for (size_t i = 0; i < mOutputStreams.size(); i++) {
+ auto stream = mOutputStreams[i];
+ if (stream.get() == nullptr) continue;
+
+ float streamMaxPreviewFps = getMaxPreviewFps(stream);
+ sessionMaxPreviewFps = std::max(sessionMaxPreviewFps, streamMaxPreviewFps);
+
+ // Populate stream statistics in case of Idle
+ if (idle) {
streamIds.push_back(stream->getId());
Camera3Stream* camera3Stream = Camera3Stream::cast(stream->asHalStream());
int64_t usage = 0LL;
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
if (camera3Stream != nullptr) {
usage = camera3Stream->getUsage();
streamUseCase = camera3Stream->getStreamUseCase();
}
streamStats.emplace_back(stream->getWidth(), stream->getHeight(),
- stream->getFormat(), stream->getDataSpace(), usage,
+ stream->getFormat(), streamMaxPreviewFps, stream->getDataSpace(), usage,
stream->getMaxHalBuffers(),
stream->getMaxTotalBuffers() - stream->getMaxHalBuffers(),
stream->getDynamicRangeProfile(), streamUseCase);
@@ -1869,7 +1889,7 @@
}
listener->notifyIdle(requestCount, resultErrorCount, deviceError, streamStats);
} else {
- res = listener->notifyActive();
+ res = listener->notifyActive(sessionMaxPreviewFps);
}
}
if (res != OK) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 453ac3a..c1ba88a 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -141,7 +141,7 @@
uint64_t consumerUsage = 0,
int64_t dynamicRangeProfile =
ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) override;
@@ -156,7 +156,7 @@
uint64_t consumerUsage = 0,
int64_t dynamicRangeProfile =
ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO) override;
@@ -294,6 +294,8 @@
status_t disconnectImpl();
static status_t removeFwkOnlyRegionKeys(CameraMetadata *request);
+ float getMaxPreviewFps(sp<camera3::Camera3OutputStreamInterface> stream);
+
static const size_t kDumpLockAttempts = 10;
static const size_t kDumpSleepDuration = 100000; // 0.10 sec
static const nsecs_t kActiveTimeout = 500000000; // 500 ms
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 2497c22..b5d0746 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -34,7 +34,7 @@
android_dataspace dataSpace, camera_stream_rotation_t rotation,
const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
- int setId, bool isMultiResolution, int64_t dynamicRangeProfile, int streamUseCase,
+ int setId, bool isMultiResolution, int64_t dynamicRangeProfile, int64_t streamUseCase,
bool deviceTimeBaseIsRealtime, int timestampBase) :
Camera3Stream(id, type,
width, height, maxSize, format, dataSpace, rotation,
@@ -91,7 +91,7 @@
}
lines.appendFormat(" Dynamic Range Profile: 0x%" PRIx64,
camera_stream::dynamic_range_profile);
- lines.appendFormat(" Stream use case: %d\n", camera_stream::use_case);
+ lines.appendFormat(" Stream use case: %" PRId64 "\n", camera_stream::use_case);
lines.appendFormat(" Frames produced: %d, last timestamp: %" PRId64 " ns\n",
mFrameCount, mLastTimestamp);
lines.appendFormat(" Total buffers: %zu, currently dequeued: %zu\n",
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index e757ec6..f389d53 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -39,7 +39,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 8ae16e5..0f61065 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -47,7 +47,7 @@
nsecs_t timestampOffset, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
- int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+ int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
int mirrorMode) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
@@ -80,7 +80,7 @@
nsecs_t timestampOffset, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
- int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+ int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
int mirrorMode) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
@@ -118,7 +118,7 @@
const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
- int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+ int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
int mirrorMode) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
@@ -163,7 +163,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed,
uint64_t consumerUsage, nsecs_t timestampOffset,
int setId, bool isMultiResolution,
- int64_t dynamicRangeProfile, int streamUseCase,
+ int64_t dynamicRangeProfile, int64_t streamUseCase,
bool deviceTimeBaseIsRealtime, int timestampBase,
int mirrorMode) :
Camera3IOStreamBase(id, type, width, height,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index e777e85..7b12efc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -91,7 +91,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
@@ -108,7 +108,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
@@ -124,7 +124,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
@@ -256,7 +256,7 @@
uint64_t consumerUsage = 0, nsecs_t timestampOffset = 0,
int setId = CAMERA3_STREAM_SET_ID_INVALID, bool isMultiResolution = false,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 198e32f..c09a0b2 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -34,7 +34,7 @@
nsecs_t timestampOffset, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool useHalBufManager, int64_t dynamicProfile,
- int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
+ int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
int mirrorMode) :
Camera3OutputStream(id, CAMERA_STREAM_OUTPUT, width, height,
format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index 9be0c86..8f7f00b 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -42,7 +42,7 @@
int setId = CAMERA3_STREAM_SET_ID_INVALID,
bool useHalBufManager = false,
int64_t dynamicProfile = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
- int streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 396b316..7ad6649 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -55,7 +55,7 @@
const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
- int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase) :
+ int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase) :
camera_stream(),
mId(id),
mSetId(setId),
@@ -179,7 +179,7 @@
return camera_stream::max_buffers;
}
-int Camera3Stream::getStreamUseCase() const {
+int64_t Camera3Stream::getStreamUseCase() const {
return camera_stream::use_case;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index d1545cc..d429e6c 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -178,7 +178,7 @@
android_dataspace getOriginalDataSpace() const;
int getMaxHalBuffers() const;
const String8& physicalCameraId() const;
- int getStreamUseCase() const;
+ int64_t getStreamUseCase() const;
int getTimestampBase() const;
bool isDeviceTimeBaseRealtime() const;
@@ -509,7 +509,7 @@
const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
- int streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase);
+ int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase);
wp<Camera3StreamBufferFreedListener> mBufferFreedListener;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 77c6483..5c333a4 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -66,7 +66,7 @@
std::unordered_set<int32_t> sensor_pixel_modes_used;
int64_t dynamic_range_profile;
- int use_case;
+ int64_t use_case;
} camera_stream_t;
typedef struct camera_stream_buffer {
@@ -111,7 +111,7 @@
bool supportsOffline = false;
std::unordered_set<int32_t> sensorPixelModesUsed;
int64_t dynamicRangeProfile;
- int streamUseCase;
+ int64_t streamUseCase;
int timestampBase;
int mirrorMode;
OutputStreamInfo() :
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 4894ba9..87d3ee8 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -972,7 +972,7 @@
}
if (src->use_case != ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT &&
mHidlSession_3_8 == nullptr) {
- ALOGE("%s: Camera device doesn't support non-default stream use case %d!",
+ ALOGE("%s: Camera device doesn't support non-default stream use case %" PRId64 "!",
__FUNCTION__, src->use_case);
return BAD_VALUE;
}
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 8699543..82d58e0 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -66,10 +66,11 @@
}
}
-void CameraServiceProxyWrapper::CameraSessionStatsWrapper::onActive() {
+void CameraServiceProxyWrapper::CameraSessionStatsWrapper::onActive(float maxPreviewFps) {
Mutex::Autolock l(mLock);
mSessionStats.mNewCameraState = CameraSessionStats::CAMERA_STATE_ACTIVE;
+ mSessionStats.mMaxPreviewFps = maxPreviewFps;
updateProxyDeviceState(mSessionStats);
// Reset mCreationDuration to -1 to distinguish between 1st session
@@ -158,7 +159,7 @@
sessionStats->onStreamConfigured(operatingMode, internalConfig, latencyMs);
}
-void CameraServiceProxyWrapper::logActive(const String8& id) {
+void CameraServiceProxyWrapper::logActive(const String8& id, float maxPreviewFps) {
std::shared_ptr<CameraSessionStatsWrapper> sessionStats;
{
Mutex::Autolock l(mLock);
@@ -171,7 +172,7 @@
}
ALOGV("%s: id %s", __FUNCTION__, id.c_str());
- sessionStats->onActive();
+ sessionStats->onActive(maxPreviewFps);
}
void CameraServiceProxyWrapper::logIdle(const String8& id,
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index f701e94..037316d 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -48,7 +48,7 @@
void onOpen();
void onClose(int32_t latencyMs);
void onStreamConfigured(int operatingMode, bool internalReconfig, int32_t latencyMs);
- void onActive();
+ void onActive(float maxPreviewFps);
void onIdle(int64_t requestCount, int64_t resultErrorCount, bool deviceError,
const std::vector<hardware::CameraStreamStats>& streamStats);
};
@@ -81,7 +81,7 @@
int32_t latencyMs);
// Session state becomes active
- static void logActive(const String8& id);
+ static void logActive(const String8& id, float maxPreviewFps);
// Session state becomes idle
static void logIdle(const String8& id,
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index af00e81..4090dae 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -313,7 +313,7 @@
}
}
-bool isStreamUseCaseSupported(int streamUseCase,
+bool isStreamUseCaseSupported(int64_t streamUseCase,
const CameraMetadata &deviceInfo) {
camera_metadata_ro_entry_t availableStreamUseCases =
deviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES);
@@ -324,7 +324,7 @@
}
for (size_t i = 0; i < availableStreamUseCases.count; i++) {
- if (availableStreamUseCases.data.i32[i] == streamUseCase) {
+ if (availableStreamUseCases.data.i64[i] == streamUseCase) {
return true;
}
}
@@ -336,7 +336,7 @@
sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
const std::vector<int32_t> &sensorPixelModesUsed, int64_t dynamicRangeProfile,
- int streamUseCase, int timestampBase, int mirrorMode) {
+ int64_t streamUseCase, int timestampBase, int mirrorMode) {
// bufferProducer must be non-null
if (gbp == nullptr) {
String8 msg = String8::format("Camera %s: Surface is NULL", logicalCameraId.string());
@@ -452,7 +452,7 @@
}
if (!SessionConfigurationUtils::isStreamUseCaseSupported(streamUseCase,
physicalCameraMetadata)) {
- String8 msg = String8::format("Camera %s: stream use case %d not supported,"
+ String8 msg = String8::format("Camera %s: stream use case %" PRId64 " not supported,"
" failed to create output stream", logicalCameraId.string(), streamUseCase);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
@@ -718,7 +718,7 @@
return res;
}
- int streamUseCase = it.getStreamUseCase();
+ int64_t streamUseCase = it.getStreamUseCase();
int timestampBase = it.getTimestampBase();
int mirrorMode = it.getMirrorMode();
if (deferredConsumer) {
@@ -1040,7 +1040,7 @@
// image
return false;
}
- if (static_cast<int32_t>(streamConfigV38.streams[i].useCase) !=
+ if (static_cast<int64_t>(streamConfigV38.streams[i].useCase) !=
ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {
// ICameraDevice older than 3.8 doesn't support stream use case
return false;
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 406510f..8abcc95 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -99,7 +99,7 @@
sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
const String8 &logicalCameraId, const CameraMetadata &physicalCameraMetadata,
const std::vector<int32_t> &sensorPixelModesUsed, int64_t dynamicRangeProfile,
- int streamUseCase, int timestampBase, int mirrorMode);
+ int64_t streamUseCase, int timestampBase, int mirrorMode);
void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
@@ -114,7 +114,7 @@
// Check if the device supports a given dynamicRangeProfile
bool isDynamicRangeProfileSupported(int64_t dynamicRangeProfile, const CameraMetadata& staticMeta);
-bool isStreamUseCaseSupported(int streamUseCase, const CameraMetadata &deviceInfo);
+bool isStreamUseCaseSupported(int64_t streamUseCase, const CameraMetadata &deviceInfo);
void mapStreamInfo(const OutputStreamInfo &streamInfo,
camera3::camera_stream_rotation_t rotation, String8 physicalId,
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index ade54d0..aacc2be 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -1246,10 +1246,10 @@
if (channelMask != 0) {
switch (direction) {
case 1: // Output, keep sync with AudioTypes#getAAudioDirection()
- channelCount = audio_channel_count_from_out_mask(channelMask);
+ channelCount = (int32_t)audio_channel_count_from_out_mask(channelMask);
break;
case 2: // Input, keep sync with AudioTypes#getAAudioDirection()
- channelCount = audio_channel_count_from_in_mask(channelMask);
+ channelCount = (int32_t)audio_channel_count_from_in_mask(channelMask);
break;
default:
ALOGW("Invalid direction %d", direction);
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index 287fb8d..e06a605 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -171,7 +171,7 @@
std::vector<uint8_t> buf(str.length() / 4 * 3, 0);
size_t size = buf.size();
if (decodeBase64(buf.data(), &size, str.c_str()) && size <= buf.size()) {
- buf.erase(buf.begin() + size, buf.end());
+ buf.erase(buf.begin() + (ptrdiff_t)size, buf.end());
return buf;
}
return {};