Merge "Make flag foreground_audio_control read only" into main
diff --git a/camera/camera_platform.aconfig b/camera/camera_platform.aconfig
index 1f50570..1dc45c3 100644
--- a/camera/camera_platform.aconfig
+++ b/camera/camera_platform.aconfig
@@ -4,6 +4,7 @@
flag {
namespace: "camera_platform"
name: "camera_hsum_permission"
+ is_exported: true
description: "Camera access by headless system user"
bug: "273539631"
}
@@ -11,6 +12,7 @@
flag {
namespace: "camera_platform"
name: "concert_mode"
+ is_exported: true
description: "Introduces a new concert mode camera extension type"
bug: "297083874"
}
@@ -18,6 +20,7 @@
flag {
namespace: "camera_platform"
name: "feature_combination_query"
+ is_exported: true
description: "Query feature combination support and session specific characteristics"
bug: "309627704"
}
@@ -39,6 +42,7 @@
flag {
namespace: "camera_platform"
name: "camera_manual_flash_strength_control"
+ is_exported: true
description: "Flash brightness level control in manual flash mode"
bug: "238348881"
}
@@ -74,6 +78,7 @@
flag {
namespace: "camera_platform"
name: "camera_ae_mode_low_light_boost"
+ is_exported: true
description: "An AE mode that enables increased brightening in low light scenes"
bug: "312803148"
}
@@ -95,6 +100,7 @@
flag {
namespace: "camera_platform"
name: "camera_extensions_characteristics_get"
+ is_exported: true
description: "Enable get extension specific camera characteristics API"
bug: "280649914"
}
@@ -116,6 +122,7 @@
flag {
namespace: "camera_platform"
name: "camera_device_setup"
+ is_exported: true
description: "Create an intermediate Camera Device class for limited CameraDevice access."
bug: "320741775"
}
@@ -123,6 +130,7 @@
flag {
namespace: "camera_platform"
name: "camera_privacy_allowlist"
+ is_exported: true
description: "Allowlisting to exempt safety-relevant cameras from privacy control for automotive devices"
bug: "282814430"
}
@@ -130,6 +138,25 @@
flag {
namespace: "camera_platform"
name: "extension_10_bit"
+ is_exported: true
description: "Enables 10-bit support in the camera extensions."
bug: "316375635"
}
+
+flag {
+ namespace: "camera_platform"
+ name: "surface_leak_fix"
+ description: "Address Surface release leaks in CaptureRequest"
+ bug: "324071855"
+ metadata {
+ purpose: PURPOSE_BUGFIX
+ }
+}
+
+flag {
+ namespace: "camera_platform"
+ name: "concert_mode_api"
+ description: "Covers the eyes free videography public facing API"
+ bug: "297083874"
+}
+
diff --git a/camera/ndk/NdkCameraMetadata.cpp b/camera/ndk/NdkCameraMetadata.cpp
index 7d3a53e..a2dfaee 100644
--- a/camera/ndk/NdkCameraMetadata.cpp
+++ b/camera/ndk/NdkCameraMetadata.cpp
@@ -121,6 +121,18 @@
}
EXPORT
+camera_status_t ACameraMetadata_getTagFromName(
+ const ACameraMetadata* acm, const char* name, uint32_t* tag) {
+ ATRACE_CALL();
+ if (acm == nullptr || name == nullptr || tag == nullptr) {
+ ALOGE("%s: invalid argument! metadata %p, name %p, tag %p",
+ __FUNCTION__, acm, name, tag);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return acm->getTagFromName(name, tag);
+}
+
+EXPORT
ACameraMetadata* ACameraMetadata_copy(const ACameraMetadata* src) {
ATRACE_CALL();
if (src == nullptr) {
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index b6b8012..69b30f7 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -18,6 +18,8 @@
#define LOG_TAG "ACameraMetadata"
#include "ACameraMetadata.h"
+
+#include <camera_metadata_hidden.h>
#include <utils/Vector.h>
#include <system/graphics.h>
#include <media/NdkImage.h>
@@ -85,6 +87,19 @@
filterDurations(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS);
}
// TODO: filter request/result keys
+ const CameraMetadata& metadata = *mData;
+ const camera_metadata_t *rawMetadata = metadata.getAndLock();
+ metadata_vendor_id_t vendorTagId = get_camera_metadata_vendor_id(rawMetadata);
+ metadata.unlock(rawMetadata);
+ sp<VendorTagDescriptorCache> vtCache = VendorTagDescriptorCache::getGlobalVendorTagCache();
+ if (vtCache == nullptr) {
+ ALOGE("%s: error vendor tag descriptor cache is not initialized", __FUNCTION__);
+ return;
+ }
+ vtCache->getVendorTagDescriptor(vendorTagId, &mVTags);
+ if (mVTags == nullptr) {
+ ALOGE("%s: error retrieving vendor tag descriptor", __FUNCTION__);
+ }
}
bool
@@ -473,6 +488,13 @@
return (*mData);
}
+camera_status_t
+ACameraMetadata::getTagFromName(const char *name, uint32_t *tag) const {
+ Mutex::Autolock _l(mLock);
+ status_t status = CameraMetadata::getTagFromName(name, mVTags.get(), tag);
+ return status == OK ? ACAMERA_OK : ACAMERA_ERROR_METADATA_NOT_FOUND;
+}
+
bool
ACameraMetadata::isLogicalMultiCamera(size_t* count, const char*const** physicalCameraIds) const {
if (mType != ACM_CHARACTERISTICS) {
diff --git a/camera/ndk/impl/ACameraMetadata.h b/camera/ndk/impl/ACameraMetadata.h
index 084a60b..e89e620 100644
--- a/camera/ndk/impl/ACameraMetadata.h
+++ b/camera/ndk/impl/ACameraMetadata.h
@@ -27,9 +27,17 @@
#ifdef __ANDROID_VNDK__
#include <CameraMetadata.h>
+#include <aidl/android/frameworks/cameraservice/common/VendorTag.h>
+#include <aidl/android/frameworks/cameraservice/common/VendorTagSection.h>
+#include <aidl/android/frameworks/cameraservice/common/ProviderIdAndVendorTagSections.h>
+#include <VendorTagDescriptor.h>
using CameraMetadata = android::hardware::camera::common::V1_0::helper::CameraMetadata;
+using ::aidl::android::frameworks::cameraservice::common::ProviderIdAndVendorTagSections;
+using ::android::hardware::camera::common::V1_0::helper::VendorTagDescriptor;
+using ::android::hardware::camera::common::V1_0::helper::VendorTagDescriptorCache;
#else
#include <camera/CameraMetadata.h>
+#include <camera/VendorTagDescriptor.h>
#endif
#include <camera/NdkCameraMetadata.h>
@@ -73,6 +81,8 @@
camera_status_t getTags(/*out*/int32_t* numTags,
/*out*/const uint32_t** tags) const;
+ camera_status_t
+ getTagFromName(const char *name, uint32_t *tag) const;
const CameraMetadata& getInternalData() const;
bool isLogicalMultiCamera(size_t* count, const char* const** physicalCameraIds) const;
@@ -134,6 +144,7 @@
std::vector<const char*> mStaticPhysicalCameraIds;
std::vector<String8> mStaticPhysicalCameraIdValues;
+ sp<VendorTagDescriptor> mVTags = nullptr;
};
#endif // _ACAMERA_METADATA_H
diff --git a/camera/ndk/include/camera/NdkCameraMetadata.h b/camera/ndk/include/camera/NdkCameraMetadata.h
index cf29736..237d07b 100644
--- a/camera/ndk/include/camera/NdkCameraMetadata.h
+++ b/camera/ndk/include/camera/NdkCameraMetadata.h
@@ -221,6 +221,24 @@
/*out*/int32_t* numEntries, /*out*/const uint32_t** tags) __INTRODUCED_IN(24);
/**
+ * Look up tag ID value for device-specific custom tags that are usable only
+ * for the particular device, by name. The name and type of the tag need to be
+ * discovered from some other source, such as the manufacturer. The ID value is
+ * stable during the lifetime of an application, but should be queried again after
+ * process restarts. This method can also be used to query tag values using the names
+ * for public tags which exist in the Java API, however it is just simpler and faster to
+ * use the values of the tags which exist in the ndk.
+ * @param metadata The {@link ACameraMetadata} of to query the tag value from.
+ * @param name The name of the tag being queried.
+ * @param tag The output tag assigned by this method.
+ *
+ * @return ACAMERA_OK only if the function call was successful.
+ */
+
+camera_status_t
+ACameraMetadata_getTagFromName(const ACameraMetadata* metadata, const char *name, uint32_t *tag) __INTRODUCED_IN(35);
+
+/**
* Create a copy of input {@link ACameraMetadata}.
*
* <p>The returned ACameraMetadata must be freed by the application by {@link ACameraMetadata_free}
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 1ed17a3..94599d3 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -2149,7 +2149,7 @@
* </ul>
*/
ACAMERA_CONTROL_SETTINGS_OVERRIDE = // int32 (acamera_metadata_enum_android_control_settings_override_t)
- ACAMERA_CONTROL_START + 49,
+ ACAMERA_CONTROL_START + 52,
/**
* <p>List of available settings overrides supported by the camera device that can
* be used to speed up certain controls.</p>
@@ -2175,7 +2175,7 @@
* @see ACAMERA_CONTROL_SETTINGS_OVERRIDE
*/
ACAMERA_CONTROL_AVAILABLE_SETTINGS_OVERRIDES = // int32[n]
- ACAMERA_CONTROL_START + 50,
+ ACAMERA_CONTROL_START + 53,
/**
* <p>Automatic crop, pan and zoom to keep objects in the center of the frame.</p>
*
@@ -2202,7 +2202,7 @@
* @see ACAMERA_SCALER_CROP_REGION
*/
ACAMERA_CONTROL_AUTOFRAMING = // byte (acamera_metadata_enum_android_control_autoframing_t)
- ACAMERA_CONTROL_START + 52,
+ ACAMERA_CONTROL_START + 55,
/**
* <p>Whether the camera device supports ACAMERA_CONTROL_AUTOFRAMING.</p>
*
@@ -2218,7 +2218,7 @@
* <p>Will be <code>false</code> if auto-framing is not available.</p>
*/
ACAMERA_CONTROL_AUTOFRAMING_AVAILABLE = // byte (acamera_metadata_enum_android_control_autoframing_available_t)
- ACAMERA_CONTROL_START + 53,
+ ACAMERA_CONTROL_START + 56,
/**
* <p>Current state of auto-framing.</p>
*
@@ -2245,7 +2245,7 @@
* @see ACAMERA_CONTROL_AUTOFRAMING_AVAILABLE
*/
ACAMERA_CONTROL_AUTOFRAMING_STATE = // byte (acamera_metadata_enum_android_control_autoframing_state_t)
- ACAMERA_CONTROL_START + 54,
+ ACAMERA_CONTROL_START + 57,
/**
* <p>The operating luminance range of low light boost measured in lux (lx).</p>
*
@@ -2258,7 +2258,7 @@
*
*/
ACAMERA_CONTROL_LOW_LIGHT_BOOST_INFO_LUMINANCE_RANGE = // float[2]
- ACAMERA_CONTROL_START + 55,
+ ACAMERA_CONTROL_START + 58,
/**
* <p>Current state of the low light boost AE mode.</p>
*
@@ -2278,7 +2278,7 @@
* 'ON_LOW_LIGHT_BOOST_BRIGHTNESS_PRIORITY.</p>
*/
ACAMERA_CONTROL_LOW_LIGHT_BOOST_STATE = // byte (acamera_metadata_enum_android_control_low_light_boost_state_t)
- ACAMERA_CONTROL_START + 56,
+ ACAMERA_CONTROL_START + 59,
ACAMERA_CONTROL_END,
/**
@@ -4671,7 +4671,7 @@
* application should leave stream use cases within the session as DEFAULT.</p>
*/
ACAMERA_SCALER_AVAILABLE_STREAM_USE_CASES = // int64[n] (acamera_metadata_enum_android_scaler_available_stream_use_cases_t)
- ACAMERA_SCALER_START + 25,
+ ACAMERA_SCALER_START + 26,
/**
* <p>The region of the sensor that corresponds to the RAW read out for this
* capture when the stream use case of a RAW stream is set to CROPPED_RAW.</p>
@@ -4727,7 +4727,7 @@
* @see ACAMERA_STATISTICS_HOT_PIXEL_MAP
*/
ACAMERA_SCALER_RAW_CROP_REGION = // int32[4]
- ACAMERA_SCALER_START + 26,
+ ACAMERA_SCALER_START + 27,
ACAMERA_SCALER_END,
/**
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index 4c54658..7d7868b 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -35,6 +35,7 @@
ACameraMetadata_copy;
ACameraMetadata_free;
ACameraMetadata_getAllTags;
+ ACameraMetadata_getTagFromName; #introduced=35
ACameraMetadata_getConstEntry;
ACameraMetadata_isLogicalMultiCamera; # introduced=29
ACameraMetadata_fromCameraMetadata; # introduced=30
diff --git a/media/Android.mk b/media/Android.mk
deleted file mode 100644
index 220a358..0000000
--- a/media/Android.mk
+++ /dev/null
@@ -1,5 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-$(eval $(call declare-1p-copy-files,frameworks/av/media/libeffects,audio_effects.conf))
-$(eval $(call declare-1p-copy-files,frameworks/av/media/libeffects,audio_effects.xml))
-$(eval $(call declare-1p-copy-files,frameworks/av/media/libstagefright,))
diff --git a/media/aconfig/Android.bp b/media/aconfig/Android.bp
index e0d1fa9..16beb28 100644
--- a/media/aconfig/Android.bp
+++ b/media/aconfig/Android.bp
@@ -43,6 +43,7 @@
name: "android.media.codec-aconfig-cc",
min_sdk_version: "30",
vendor_available: true,
+ double_loadable: true,
apex_available: [
"//apex_available:platform",
"com.android.media.swcodec",
diff --git a/media/aconfig/codec_fwk.aconfig b/media/aconfig/codec_fwk.aconfig
index 3092091..a2b6a82 100644
--- a/media/aconfig/codec_fwk.aconfig
+++ b/media/aconfig/codec_fwk.aconfig
@@ -14,6 +14,7 @@
flag {
name: "dynamic_color_aspects"
+ is_exported: true
namespace: "codec_fwk"
description: "Feature flag for dynamic color aspect support"
bug: "297914560"
@@ -21,6 +22,7 @@
flag {
name: "hlg_editing"
+ is_exported: true
namespace: "codec_fwk"
description: "Feature flag for HLG editing support"
bug: "316397061"
@@ -28,6 +30,7 @@
flag {
name: "in_process_sw_audio_codec"
+ is_exported: true
namespace: "codec_fwk"
description: "Feature flag for in-process software audio codec API"
bug: "297922713"
@@ -48,7 +51,15 @@
}
flag {
+ name: "native_capabilites"
+ namespace: "codec_fwk"
+ description: "Feature flag for native codec capabilities"
+ bug: "306023029"
+}
+
+flag {
name: "null_output_surface"
+ is_exported: true
namespace: "codec_fwk"
description: "Feature flag for null output Surface API"
bug: "297920102"
@@ -63,6 +74,7 @@
flag {
name: "region_of_interest"
+ is_exported: true
namespace: "codec_fwk"
description: "Feature flag for region of interest API"
bug: "299191092"
@@ -74,3 +86,10 @@
description: "Feature flag for region of interest support"
bug: "325549730"
}
+
+flag {
+ name: "teamfood"
+ namespace: "codec_fwk"
+ description: "Feature flag to track teamfood population"
+ bug: "328770262"
+}
diff --git a/media/aconfig/mediacodec_flags.aconfig b/media/aconfig/mediacodec_flags.aconfig
index 4d1e5ca..3cc9a1a 100644
--- a/media/aconfig/mediacodec_flags.aconfig
+++ b/media/aconfig/mediacodec_flags.aconfig
@@ -7,22 +7,23 @@
# ******************************************************************
flag {
- name: "large_audio_frame"
+ name: "aidl_hal"
namespace: "codec_fwk"
- description: "Feature flags for large audio frame support"
- bug: "297219557"
+ description: "Feature flags for enabling AIDL HAL handling"
+ bug: "251850069"
}
flag {
name: "codec_importance"
+ is_exported: true
namespace: "codec_fwk"
description: "Feature flags for media codec importance"
bug: "297929011"
}
flag {
- name: "aidl_hal"
+ name: "large_audio_frame"
namespace: "codec_fwk"
- description: "Feature flags for enabling AIDL HAL handling"
- bug: "251850069"
+ description: "Feature flags for large audio frame support"
+ bug: "297219557"
}
diff --git a/media/audio/aconfig/audio.aconfig b/media/audio/aconfig/audio.aconfig
index cdbadc2..1df5727 100644
--- a/media/audio/aconfig/audio.aconfig
+++ b/media/audio/aconfig/audio.aconfig
@@ -60,6 +60,13 @@
}
flag {
+ name: "spatializer_upmix"
+ namespace: "media_audio"
+ description: "Enable spatializer upmix"
+ bug: "323985367"
+}
+
+flag {
name: "stereo_spatialization"
namespace: "media_audio"
description: "Enable stereo channel mask for spatialization."
diff --git a/media/audio/aconfig/audio_framework.aconfig b/media/audio/aconfig/audio_framework.aconfig
index 4bda863..676733d 100644
--- a/media/audio/aconfig/audio_framework.aconfig
+++ b/media/audio/aconfig/audio_framework.aconfig
@@ -23,6 +23,7 @@
flag {
name: "feature_spatial_audio_headtracking_low_latency"
+ is_exported: true
namespace: "media_audio"
description: "Define feature for low latency headtracking for SA"
bug: "324291076"
@@ -30,6 +31,7 @@
flag {
name: "focus_exclusive_with_recording"
+ is_exported: true
namespace: "media_audio"
description:
"Audio focus GAIN_TRANSIENT_EXCLUSIVE only mutes"
@@ -39,6 +41,7 @@
flag {
name: "foreground_audio_control"
+ is_exported: true
namespace: "media_audio"
description:
"Audio focus gain requires FGS or delegation to "
@@ -49,6 +52,7 @@
# TODO remove
flag {
name: "focus_freeze_test_api"
+ is_exported: true
namespace: "media_audio"
description: "\
AudioManager audio focus test APIs:\
@@ -62,6 +66,7 @@
flag {
name: "loudness_configurator_api"
+ is_exported: true
namespace: "media_audio"
description: "\
Enable the API for providing loudness metadata and CTA-2075 \
@@ -71,7 +76,15 @@
}
flag {
+ name: "mute_background_audio"
+ namespace: "media_audio"
+ description: "mute audio playing in background"
+ bug: "296232417"
+}
+
+flag {
name: "sco_managed_by_audio"
+ is_exported: true
namespace: "media_audio"
description: "\
Enable new implementation of headset profile device connection and\
@@ -81,6 +94,7 @@
flag {
name: "supported_device_types_api"
+ is_exported: true
namespace: "media_audio"
description: "Surface new API method AudioManager.getSupportedDeviceTypes()"
bug: "307537538"
diff --git a/media/audio/aconfig/audiopolicy_framework.aconfig b/media/audio/aconfig/audiopolicy_framework.aconfig
index 72a1e6c..28b6c7f 100644
--- a/media/audio/aconfig/audiopolicy_framework.aconfig
+++ b/media/audio/aconfig/audiopolicy_framework.aconfig
@@ -24,6 +24,7 @@
flag {
name: "audio_mix_test_api"
+ is_exported: true
namespace: "media_audio"
description: "Enable new Test APIs that provide access to registered AudioMixes on system server and native side."
bug: "309080867"
@@ -32,6 +33,7 @@
flag {
name: "audio_policy_update_mixing_rules_api"
+ is_exported: true
namespace: "media_audio"
description: "Enable AudioPolicy.updateMixingRules API for hot-swapping audio mixing rules."
bug: "293874525"
@@ -39,6 +41,7 @@
flag {
name: "enable_fade_manager_configuration"
+ is_exported: true
namespace: "media_audio"
description: "Enable Fade Manager Configuration support to determine fade properties"
bug: "307354764"
diff --git a/media/audio/aconfig/midi_flags.aconfig b/media/audio/aconfig/midi_flags.aconfig
index efb643f..1620e1b 100644
--- a/media/audio/aconfig/midi_flags.aconfig
+++ b/media/audio/aconfig/midi_flags.aconfig
@@ -8,6 +8,7 @@
flag {
name: "virtual_ump"
+ is_exported: true
namespace: "media_audio"
description: "Enable virtual UMP MIDI."
bug: "291115176"
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index c7a1bfd..55847f4 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -169,6 +169,11 @@
"%s: AudioSystem already has an AudioFlinger instance!", __func__);
const auto aps = sp<AudioPolicyService>::make();
ALOGD("%s: AudioPolicy created", __func__);
+ ALOGW_IF(AudioSystem::setLocalAudioPolicyService(aps) != OK,
+ "%s: AudioSystem already has an AudioPolicyService instance!", __func__);
+
+ // Start initialization of internally managed audio objects such as Device Effects.
+ aps->onAudioSystemReady();
// Add AudioFlinger and AudioPolicy to ServiceManager.
sp<IServiceManager> sm = defaultServiceManager();
diff --git a/media/codec2/hal/aidl/Component.cpp b/media/codec2/hal/aidl/Component.cpp
index ba5f8d4..87c9d87 100644
--- a/media/codec2/hal/aidl/Component.cpp
+++ b/media/codec2/hal/aidl/Component.cpp
@@ -205,30 +205,7 @@
mDeathContext(nullptr) {
// Retrieve supported parameters from store
// TODO: We could cache this per component/interface type
- if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
- c2_status_t err = C2_OK;
- C2ComponentDomainSetting domain;
- std::vector<std::unique_ptr<C2Param>> heapParams;
- err = component->intf()->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
- if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
- std::vector<std::shared_ptr<C2ParamDescriptor>> params;
- bool isComponentSupportsLargeAudioFrame = false;
- component->intf()->querySupportedParams_nb(¶ms);
- for (const auto ¶mDesc : params) {
- if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
- isComponentSupportsLargeAudioFrame = true;
- LOG(VERBOSE) << "Underlying component supports large frame audio";
- break;
- }
- }
- if (!isComponentSupportsLargeAudioFrame) {
- mMultiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
- component->intf(),
- std::static_pointer_cast<C2ReflectorHelper>(
- ::android::GetCodec2PlatformComponentStore()->getParamReflector()));
- }
- }
- }
+ mMultiAccessUnitIntf = store->tryCreateMultiAccessUnitInterface(component->intf());
mInterface = SharedRefBase::make<ComponentInterface>(
component->intf(), mMultiAccessUnitIntf, store->getParameterCache());
mInit = mInterface->status();
@@ -510,7 +487,19 @@
if (__builtin_available(android __ANDROID_API_T__, *)) {
std::shared_ptr<C2Component::Listener> c2listener;
if (mMultiAccessUnitIntf) {
- mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(mMultiAccessUnitIntf);
+ std::shared_ptr<C2Allocator> allocator;
+ std::shared_ptr<C2BlockPool> linearPool;
+ std::shared_ptr<C2AllocatorStore> store = ::android::GetCodec2PlatformAllocatorStore();
+ if(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &allocator) == C2_OK) {
+ ::android::C2PlatformAllocatorDesc desc;
+ desc.allocatorId = allocator->getId();
+ if (C2_OK == CreateCodec2BlockPool(desc, mComponent, &linearPool)) {
+ if (linearPool) {
+ mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(
+ mMultiAccessUnitIntf, linearPool);
+ }
+ }
+ }
}
c2listener = mMultiAccessUnitHelper ?
std::make_shared<MultiAccessUnitListener>(self, mMultiAccessUnitHelper) :
diff --git a/media/codec2/hal/aidl/ComponentInterface.cpp b/media/codec2/hal/aidl/ComponentInterface.cpp
index 1f0534d..8ae9fa8 100644
--- a/media/codec2/hal/aidl/ComponentInterface.cpp
+++ b/media/codec2/hal/aidl/ComponentInterface.cpp
@@ -131,9 +131,35 @@
virtual c2_status_t querySupportedValues(
std::vector<C2FieldSupportedValuesQuery>& fields,
c2_blocking_t mayBlock) const override {
- c2_status_t err = mIntf->querySupportedValues_vb(fields, mayBlock);
- if (mMultiAccessUnitIntf != nullptr) {
- err = mMultiAccessUnitIntf->querySupportedValues(fields, mayBlock);
+ if (mMultiAccessUnitIntf == nullptr) {
+ return mIntf->querySupportedValues_vb(fields, mayBlock);
+ }
+ std::vector<C2FieldSupportedValuesQuery> dup = fields;
+ std::vector<C2FieldSupportedValuesQuery> queryArray[2];
+ std::map<C2ParamField, std::pair<uint32_t, size_t>> queryMap;
+ c2_status_t err = C2_OK;
+ for (int i = 0 ; i < fields.size(); i++) {
+ const C2ParamField &field = fields[i].field();
+ uint32_t queryArrayIdx = 1;
+ if (mMultiAccessUnitIntf->isValidField(fields[i].field())) {
+ queryArrayIdx = 0;
+ }
+ queryMap[field] = std::make_pair(
+ queryArrayIdx, queryArray[queryArrayIdx].size());
+ queryArray[queryArrayIdx].push_back(fields[i]);
+ }
+ if (queryArray[0].size() > 0) {
+ err = mMultiAccessUnitIntf->querySupportedValues(queryArray[0], mayBlock);
+ }
+ if (queryArray[1].size() > 0) {
+ err = mIntf->querySupportedValues_vb(queryArray[1], mayBlock);
+ }
+ for (int i = 0 ; i < dup.size(); i++) {
+ auto it = queryMap.find(dup[i].field());
+ if (it != queryMap.end()) {
+ std::pair<uint32_t, size_t> queryid = it->second;
+ fields[i] = queryArray[queryid.first][queryid.second];
+ }
}
return err;
}
diff --git a/media/codec2/hal/aidl/ComponentStore.cpp b/media/codec2/hal/aidl/ComponentStore.cpp
index ef49308..b95c09e 100644
--- a/media/codec2/hal/aidl/ComponentStore.cpp
+++ b/media/codec2/hal/aidl/ComponentStore.cpp
@@ -199,6 +199,36 @@
}
#endif
+std::shared_ptr<MultiAccessUnitInterface> ComponentStore::tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface) {
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf = nullptr;
+ if (c2interface == nullptr) {
+ return nullptr;
+ }
+ if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
+ c2_status_t err = C2_OK;
+ C2ComponentDomainSetting domain;
+ std::vector<std::unique_ptr<C2Param>> heapParams;
+ err = c2interface->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
+ if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
+ std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+ bool isComponentSupportsLargeAudioFrame = false;
+ c2interface->querySupportedParams_nb(¶ms);
+ for (const auto ¶mDesc : params) {
+ if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
+ isComponentSupportsLargeAudioFrame = true;
+ break;
+ }
+ }
+ if (!isComponentSupportsLargeAudioFrame) {
+ multiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
+ c2interface, std::static_pointer_cast<C2ReflectorHelper>(mParamReflector));
+ }
+ }
+ }
+ return multiAccessUnitIntf;
+}
+
// Methods from ::aidl::android::hardware::media::c2::IComponentStore
ScopedAStatus ComponentStore::createComponent(
const std::string& name,
@@ -258,7 +288,10 @@
c2interface = GetFilterWrapper()->maybeWrapInterface(c2interface);
#endif
onInterfaceLoaded(c2interface);
- *intf = SharedRefBase::make<ComponentInterface>(c2interface, mParameterCache);
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf =
+ tryCreateMultiAccessUnitInterface(c2interface);
+ *intf = SharedRefBase::make<ComponentInterface>(
+ c2interface, multiAccessUnitIntf, mParameterCache);
return ScopedAStatus::ok();
}
return ScopedAStatus::fromServiceSpecificError(res);
diff --git a/media/codec2/hal/aidl/include/codec2/aidl/ComponentStore.h b/media/codec2/hal/aidl/include/codec2/aidl/ComponentStore.h
index 0698b0f..746e1bf 100644
--- a/media/codec2/hal/aidl/include/codec2/aidl/ComponentStore.h
+++ b/media/codec2/hal/aidl/include/codec2/aidl/ComponentStore.h
@@ -75,6 +75,9 @@
static std::shared_ptr<::android::FilterWrapper> GetFilterWrapper();
+ std::shared_ptr<MultiAccessUnitInterface> tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface);
+
// Methods from ::aidl::android::hardware::media::c2::IComponentStore.
virtual ::ndk::ScopedAStatus createComponent(
const std::string& name,
diff --git a/media/codec2/hal/common/MultiAccessUnitHelper.cpp b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
index 9221a24..8086ef2 100644
--- a/media/codec2/hal/common/MultiAccessUnitHelper.cpp
+++ b/media/codec2/hal/common/MultiAccessUnitHelper.cpp
@@ -73,12 +73,17 @@
for (std::shared_ptr<C2ParamDescriptor> &desc : supportedParams) {
mSupportedParamIndexSet.insert(desc->index());
}
+ mParamFields.emplace_back(mLargeFrameParams.get(), &(mLargeFrameParams.get()->maxSize));
+ mParamFields.emplace_back(mLargeFrameParams.get(), &(mLargeFrameParams.get()->thresholdSize));
if (mC2ComponentIntf) {
c2_status_t err = mC2ComponentIntf->query_vb({&mKind}, {}, C2_MAY_BLOCK, nullptr);
}
}
+bool MultiAccessUnitInterface::isValidField(const C2ParamField &field) const {
+ return (std::find(mParamFields.begin(), mParamFields.end(), field) != mParamFields.end());
+}
bool MultiAccessUnitInterface::isParamSupported(C2Param::Index index) {
return (mSupportedParamIndexSet.count(index) != 0);
}
@@ -91,18 +96,23 @@
return (C2Component::kind_t)(mKind.value);
}
-void MultiAccessUnitInterface::getDecoderSampleRateAndChannelCount(
- uint32_t &sampleRate_, uint32_t &channelCount_) const {
+bool MultiAccessUnitInterface::getDecoderSampleRateAndChannelCount(
+ uint32_t * const sampleRate_, uint32_t * const channelCount_) const {
+ if (sampleRate_ == nullptr || channelCount_ == nullptr) {
+ return false;
+ }
if (mC2ComponentIntf) {
C2StreamSampleRateInfo::output sampleRate;
C2StreamChannelCountInfo::output channelCount;
c2_status_t res = mC2ComponentIntf->query_vb(
{&sampleRate, &channelCount}, {}, C2_MAY_BLOCK, nullptr);
- if (res == C2_OK) {
- sampleRate_ = sampleRate.value;
- channelCount_ = channelCount.value;
+ if (res == C2_OK && sampleRate.value > 0 && channelCount.value > 0) {
+ *sampleRate_ = sampleRate.value;
+ *channelCount_ = channelCount.value;
+ return true;
}
}
+ return false;
}
//C2MultiAccessUnitBuffer
@@ -116,12 +126,12 @@
//MultiAccessUnitHelper
MultiAccessUnitHelper::MultiAccessUnitHelper(
- const std::shared_ptr<MultiAccessUnitInterface>& intf):
+ const std::shared_ptr<MultiAccessUnitInterface>& intf,
+ std::shared_ptr<C2BlockPool>& linearPool):
mInit(false),
- mInterface(intf) {
- std::shared_ptr<C2AllocatorStore> store = GetCodec2PlatformAllocatorStore();
- if(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &mLinearAllocator) == C2_OK) {
- mLinearPool = std::make_shared<C2PooledBlockPool>(mLinearAllocator, ++mBlockPoolId);
+ mInterface(intf),
+ mLinearPool(linearPool) {
+ if (mLinearPool) {
mInit = true;
}
}
@@ -159,6 +169,7 @@
std::list<std::unique_ptr<C2Work>> * const worklist) {
if (worklist == nullptr) {
LOG(ERROR) << "Provided null worklist for error()";
+ mFrameHolder.clear();
return C2_OK;
}
std::unique_lock<std::mutex> l(mLock);
@@ -267,6 +278,7 @@
LOG(ERROR) << "ERROR: Work has Large frame info but has no linear blocks.";
return C2_CORRUPTED;
}
+ frameInfo.mInputC2Ref = inBuffers;
const std::vector<C2ConstLinearBlock>& multiAU =
inBuffers.front()->data().linearBlocks();
std::shared_ptr<const C2AccessUnitInfos::input> auInfo =
@@ -315,26 +327,10 @@
}
}
if (!processedWork->empty()) {
- {
- C2LargeFrame::output multiAccessParams = mInterface->getLargeFrameParam();
- if (mInterface->kind() == C2Component::KIND_DECODER) {
- uint32_t sampleRate = 0;
- uint32_t channelCount = 0;
- uint32_t frameSize = 0;
- mInterface->getDecoderSampleRateAndChannelCount(
- sampleRate, channelCount);
- if (sampleRate > 0 && channelCount > 0) {
- frameSize = channelCount * 2;
- multiAccessParams.maxSize =
- (multiAccessParams.maxSize / frameSize) * frameSize;
- multiAccessParams.thresholdSize =
- (multiAccessParams.thresholdSize / frameSize) * frameSize;
- }
- }
- frameInfo.mLargeFrameTuning = multiAccessParams;
- std::lock_guard<std::mutex> l(mLock);
- mFrameHolder.push_back(std::move(frameInfo));
- }
+ C2LargeFrame::output multiAccessParams = mInterface->getLargeFrameParam();
+ frameInfo.mLargeFrameTuning = multiAccessParams;
+ std::lock_guard<std::mutex> l(mLock);
+ mFrameHolder.push_back(std::move(frameInfo));
}
}
return C2_OK;
@@ -501,6 +497,20 @@
frame.reset();
return C2_OK;
}
+ int64_t sampleTimeUs = 0;
+ uint32_t frameSize = 0;
+ uint32_t sampleRate = 0;
+ uint32_t channelCount = 0;
+ if (mInterface->getDecoderSampleRateAndChannelCount(&sampleRate, &channelCount)) {
+ sampleTimeUs = (1000000u) / (sampleRate * channelCount * 2);
+ frameSize = channelCount * 2;
+ if (mInterface->kind() == C2Component::KIND_DECODER) {
+ frame.mLargeFrameTuning.maxSize =
+ (frame.mLargeFrameTuning.maxSize / frameSize) * frameSize;
+ frame.mLargeFrameTuning.thresholdSize =
+ (frame.mLargeFrameTuning.thresholdSize / frameSize) * frameSize;
+ }
+ }
c2_status_t c2ret = allocateWork(frame, true);
if (c2ret != C2_OK) {
return c2ret;
@@ -515,15 +525,7 @@
outputFramedata.infoBuffers.insert(outputFramedata.infoBuffers.begin(),
(*worklet)->output.infoBuffers.begin(),
(*worklet)->output.infoBuffers.end());
- int64_t sampleTimeUs = 0;
- uint32_t frameSize = 0;
- uint32_t sampleRate = 0;
- uint32_t channelCount = 0;
- mInterface->getDecoderSampleRateAndChannelCount(sampleRate, channelCount);
- if (sampleRate > 0 && channelCount > 0) {
- sampleTimeUs = (1000000u) / (sampleRate * channelCount * 2);
- frameSize = channelCount * 2;
- }
+
LOG(DEBUG) << "maxOutSize " << frame.mLargeFrameTuning.maxSize
<< " threshold " << frame.mLargeFrameTuning.thresholdSize;
if ((*worklet)->output.buffers.size() > 0) {
diff --git a/media/codec2/hal/common/include/codec2/common/MultiAccessUnitHelper.h b/media/codec2/hal/common/include/codec2/common/MultiAccessUnitHelper.h
index ef5cff9..bb4464c 100644
--- a/media/codec2/hal/common/include/codec2/common/MultiAccessUnitHelper.h
+++ b/media/codec2/hal/common/include/codec2/common/MultiAccessUnitHelper.h
@@ -41,14 +41,16 @@
bool isParamSupported(C2Param::Index index);
C2LargeFrame::output getLargeFrameParam() const;
C2Component::kind_t kind() const;
+ bool isValidField(const C2ParamField &field) const;
protected:
- void getDecoderSampleRateAndChannelCount(
- uint32_t &sampleRate_, uint32_t &channelCount_) const;
+ bool getDecoderSampleRateAndChannelCount(
+ uint32_t * const sampleRate_, uint32_t * const channelCount_) const;
const std::shared_ptr<C2ComponentInterface> mC2ComponentIntf;
std::shared_ptr<C2LargeFrame::output> mLargeFrameParams;
C2ComponentKindSetting mKind;
std::set<C2Param::Index> mSupportedParamIndexSet;
+ std::vector<C2ParamField> mParamFields;
friend struct MultiAccessUnitHelper;
};
@@ -56,7 +58,8 @@
struct MultiAccessUnitHelper {
public:
MultiAccessUnitHelper(
- const std::shared_ptr<MultiAccessUnitInterface>& intf);
+ const std::shared_ptr<MultiAccessUnitInterface>& intf,
+ std::shared_ptr<C2BlockPool> &linearPool);
virtual ~MultiAccessUnitHelper();
@@ -151,6 +154,11 @@
*/
std::unique_ptr<C2Work> mLargeWork;
+ /*
+ * For holding a reference to the incoming buffer
+ */
+ std::vector<std::shared_ptr<C2Buffer>> mInputC2Ref;
+
MultiAccessUnitInfo(C2WorkOrdinalStruct ordinal):inOrdinal(ordinal) {
}
@@ -195,8 +203,6 @@
C2BlockPool::local_id_t mBlockPoolId;
// C2Blockpool for output buffer allocation
std::shared_ptr<C2BlockPool> mLinearPool;
- // Allocator for output buffer allocation
- std::shared_ptr<C2Allocator> mLinearAllocator;
// FrameIndex for the current outgoing work
std::atomic_uint64_t mFrameIndex;
// Mutex to protect mFrameHolder
diff --git a/media/codec2/hal/hidl/1.0/utils/Component.cpp b/media/codec2/hal/hidl/1.0/utils/Component.cpp
index ebbaafc..0259d90 100644
--- a/media/codec2/hal/hidl/1.0/utils/Component.cpp
+++ b/media/codec2/hal/hidl/1.0/utils/Component.cpp
@@ -259,30 +259,7 @@
mBufferPoolSender{clientPoolManager} {
// Retrieve supported parameters from store
// TODO: We could cache this per component/interface type
- if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
- c2_status_t err = C2_OK;
- C2ComponentDomainSetting domain;
- std::vector<std::unique_ptr<C2Param>> heapParams;
- err = component->intf()->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
- if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
- std::vector<std::shared_ptr<C2ParamDescriptor>> params;
- bool isComponentSupportsLargeAudioFrame = false;
- component->intf()->querySupportedParams_nb(¶ms);
- for (const auto ¶mDesc : params) {
- if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
- isComponentSupportsLargeAudioFrame = true;
- LOG(VERBOSE) << "Underlying component supports large frame audio";
- break;
- }
- }
- if (!isComponentSupportsLargeAudioFrame) {
- mMultiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
- component->intf(),
- std::static_pointer_cast<C2ReflectorHelper>(
- GetCodec2PlatformComponentStore()->getParamReflector()));
- }
- }
- }
+ mMultiAccessUnitIntf = store->tryCreateMultiAccessUnitInterface(component->intf());
mInterface = new ComponentInterface(
component->intf(), mMultiAccessUnitIntf, store->getParameterCache());
mInit = mInterface->status();
@@ -593,7 +570,19 @@
void Component::initListener(const sp<Component>& self) {
std::shared_ptr<C2Component::Listener> c2listener;
if (mMultiAccessUnitIntf) {
- mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(mMultiAccessUnitIntf);
+ std::shared_ptr<C2Allocator> allocator;
+ std::shared_ptr<C2BlockPool> linearPool;
+ std::shared_ptr<C2AllocatorStore> store = ::android::GetCodec2PlatformAllocatorStore();
+ if(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &allocator) == C2_OK) {
+ ::android::C2PlatformAllocatorDesc desc;
+ desc.allocatorId = allocator->getId();
+ if (C2_OK == CreateCodec2BlockPool(desc, mComponent, &linearPool)) {
+ if (linearPool) {
+ mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(
+ mMultiAccessUnitIntf, linearPool);
+ }
+ }
+ }
}
c2listener = mMultiAccessUnitHelper ?
std::make_shared<MultiAccessUnitListener>(self, mMultiAccessUnitHelper) :
diff --git a/media/codec2/hal/hidl/1.0/utils/ComponentInterface.cpp b/media/codec2/hal/hidl/1.0/utils/ComponentInterface.cpp
index 5a5e780..41a8904 100644
--- a/media/codec2/hal/hidl/1.0/utils/ComponentInterface.cpp
+++ b/media/codec2/hal/hidl/1.0/utils/ComponentInterface.cpp
@@ -130,9 +130,35 @@
virtual c2_status_t querySupportedValues(
std::vector<C2FieldSupportedValuesQuery>& fields,
c2_blocking_t mayBlock) const override {
- c2_status_t err = mIntf->querySupportedValues_vb(fields, mayBlock);
- if (mMultiAccessUnitIntf != nullptr) {
- err = mMultiAccessUnitIntf->querySupportedValues(fields, mayBlock);
+ if (mMultiAccessUnitIntf == nullptr) {
+ return mIntf->querySupportedValues_vb(fields, mayBlock);
+ }
+ std::vector<C2FieldSupportedValuesQuery> dup = fields;
+ std::vector<C2FieldSupportedValuesQuery> queryArray[2];
+ std::map<C2ParamField, std::pair<uint32_t, size_t>> queryMap;
+ c2_status_t err = C2_OK;
+ for (int i = 0 ; i < fields.size(); i++) {
+ const C2ParamField &field = fields[i].field();
+ uint32_t queryArrayIdx = 1;
+ if (mMultiAccessUnitIntf->isValidField(field)) {
+ queryArrayIdx = 0;
+ }
+ queryMap[field] = std::make_pair(
+ queryArrayIdx, queryArray[queryArrayIdx].size());
+ queryArray[queryArrayIdx].push_back(fields[i]);
+ }
+ if (queryArray[0].size() > 0) {
+ err = mMultiAccessUnitIntf->querySupportedValues(queryArray[0], mayBlock);
+ }
+ if (queryArray[1].size() > 0) {
+ err = mIntf->querySupportedValues_vb(queryArray[1], mayBlock);
+ }
+ for (int i = 0 ; i < dup.size(); i++) {
+ auto it = queryMap.find(dup[i].field());
+ if (it != queryMap.end()) {
+ std::pair<uint32_t, size_t> queryid = it->second;
+ fields[i] = queryArray[queryid.first][queryid.second];
+ }
}
return err;
}
diff --git a/media/codec2/hal/hidl/1.0/utils/ComponentStore.cpp b/media/codec2/hal/hidl/1.0/utils/ComponentStore.cpp
index 1c0d5b0..988ab6f 100644
--- a/media/codec2/hal/hidl/1.0/utils/ComponentStore.cpp
+++ b/media/codec2/hal/hidl/1.0/utils/ComponentStore.cpp
@@ -194,6 +194,36 @@
}
#endif
+std::shared_ptr<MultiAccessUnitInterface> ComponentStore::tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface) {
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf = nullptr;
+ if (c2interface == nullptr) {
+ return nullptr;
+ }
+ if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
+ c2_status_t err = C2_OK;
+ C2ComponentDomainSetting domain;
+ std::vector<std::unique_ptr<C2Param>> heapParams;
+ err = c2interface->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
+ if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
+ std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+ bool isComponentSupportsLargeAudioFrame = false;
+ c2interface->querySupportedParams_nb(¶ms);
+ for (const auto ¶mDesc : params) {
+ if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
+ isComponentSupportsLargeAudioFrame = true;
+ break;
+ }
+ }
+ if (!isComponentSupportsLargeAudioFrame) {
+ multiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
+ c2interface, std::static_pointer_cast<C2ReflectorHelper>(mParamReflector));
+ }
+ }
+ }
+ return multiAccessUnitIntf;
+}
+
// Methods from ::android::hardware::media::c2::V1_0::IComponentStore
Return<void> ComponentStore::createComponent(
const hidl_string& name,
@@ -242,7 +272,9 @@
c2interface = GetFilterWrapper()->maybeWrapInterface(c2interface);
#endif
onInterfaceLoaded(c2interface);
- interface = new ComponentInterface(c2interface, mParameterCache);
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf =
+ tryCreateMultiAccessUnitInterface(c2interface);
+ interface = new ComponentInterface(c2interface, multiAccessUnitIntf, mParameterCache);
}
_hidl_cb(static_cast<Status>(res), interface);
return Void();
diff --git a/media/codec2/hal/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h b/media/codec2/hal/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
index 27e2a05..b5d85da 100644
--- a/media/codec2/hal/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
+++ b/media/codec2/hal/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
@@ -78,6 +78,9 @@
static std::shared_ptr<FilterWrapper> GetFilterWrapper();
+ std::shared_ptr<MultiAccessUnitInterface> tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface);
+
// Methods from ::android::hardware::media::c2::V1_0::IComponentStore.
virtual Return<void> createComponent(
const hidl_string& name,
diff --git a/media/codec2/hal/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp b/media/codec2/hal/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp
index a34cef1..d1f0fb5 100644
--- a/media/codec2/hal/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp
+++ b/media/codec2/hal/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp
@@ -84,14 +84,11 @@
}
TEST_P(Codec2MasterHalTest, MustUseAidlBeyond202404) {
- static int sBoardFirstApiLevel = android::base::GetIntProperty("ro.board.first_api_level", 0);
- static int sBoardApiLevel = android::base::GetIntProperty("ro.board.api_level", 0);
- if (sBoardFirstApiLevel < 202404 && sBoardApiLevel < 202404) {
- GTEST_SKIP() << "board first level less than 202404:"
- << " ro.board.first_api_level = " << sBoardFirstApiLevel
- << " ro.board.api_level = " << sBoardApiLevel;
+ static int sVendorApiLevel = android::base::GetIntProperty("ro.vendor.api_level", 0);
+ if (sVendorApiLevel < 202404) {
+ GTEST_SKIP() << "vendor api level less than 202404: " << sVendorApiLevel;
}
- ALOGV("HidlCodecAllowed Test");
+ ALOGV("MustUseAidlBeyond202404 Test");
EXPECT_NE(mClient->getAidlBase(), nullptr) << "android.hardware.media.c2 MUST use AIDL "
<< "for chipsets launching at 202404 or above";
diff --git a/media/codec2/hal/hidl/1.1/utils/Component.cpp b/media/codec2/hal/hidl/1.1/utils/Component.cpp
index 5073983..d34d84e 100644
--- a/media/codec2/hal/hidl/1.1/utils/Component.cpp
+++ b/media/codec2/hal/hidl/1.1/utils/Component.cpp
@@ -263,30 +263,7 @@
mBufferPoolSender{clientPoolManager} {
// Retrieve supported parameters from store
// TODO: We could cache this per component/interface type
- if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
- c2_status_t err = C2_OK;
- C2ComponentDomainSetting domain;
- std::vector<std::unique_ptr<C2Param>> heapParams;
- err = component->intf()->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
- if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
- std::vector<std::shared_ptr<C2ParamDescriptor>> params;
- bool isComponentSupportsLargeAudioFrame = false;
- component->intf()->querySupportedParams_nb(¶ms);
- for (const auto ¶mDesc : params) {
- if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
- isComponentSupportsLargeAudioFrame = true;
- LOG(VERBOSE) << "Underlying component supports large frame audio";
- break;
- }
- }
- if (!isComponentSupportsLargeAudioFrame) {
- mMultiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
- component->intf(),
- std::static_pointer_cast<C2ReflectorHelper>(
- GetCodec2PlatformComponentStore()->getParamReflector()));
- }
- }
- }
+ mMultiAccessUnitIntf = store->tryCreateMultiAccessUnitInterface(component->intf());
mInterface = new ComponentInterface(
component->intf(), mMultiAccessUnitIntf, store->getParameterCache());
mInit = mInterface->status();
@@ -606,7 +583,19 @@
void Component::initListener(const sp<Component>& self) {
std::shared_ptr<C2Component::Listener> c2listener;
if (mMultiAccessUnitIntf) {
- mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(mMultiAccessUnitIntf);
+ std::shared_ptr<C2Allocator> allocator;
+ std::shared_ptr<C2BlockPool> linearPool;
+ std::shared_ptr<C2AllocatorStore> store = ::android::GetCodec2PlatformAllocatorStore();
+ if(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &allocator) == C2_OK) {
+ ::android::C2PlatformAllocatorDesc desc;
+ desc.allocatorId = allocator->getId();
+ if (C2_OK == CreateCodec2BlockPool(desc, mComponent, &linearPool)) {
+ if (linearPool) {
+ mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(
+ mMultiAccessUnitIntf, linearPool);
+ }
+ }
+ }
}
c2listener = mMultiAccessUnitHelper ?
std::make_shared<MultiAccessUnitListener>(self, mMultiAccessUnitHelper) :
diff --git a/media/codec2/hal/hidl/1.1/utils/ComponentStore.cpp b/media/codec2/hal/hidl/1.1/utils/ComponentStore.cpp
index d47abdd..46af809 100644
--- a/media/codec2/hal/hidl/1.1/utils/ComponentStore.cpp
+++ b/media/codec2/hal/hidl/1.1/utils/ComponentStore.cpp
@@ -194,6 +194,37 @@
}
#endif
+std::shared_ptr<MultiAccessUnitInterface> ComponentStore::tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface) {
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf = nullptr;
+ if (c2interface == nullptr) {
+ return nullptr;
+ }
+ if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
+ c2_status_t err = C2_OK;
+ C2ComponentDomainSetting domain;
+ std::vector<std::unique_ptr<C2Param>> heapParams;
+ err = c2interface->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
+ if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
+ std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+ bool isComponentSupportsLargeAudioFrame = false;
+ c2interface->querySupportedParams_nb(¶ms);
+ for (const auto ¶mDesc : params) {
+ if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
+ isComponentSupportsLargeAudioFrame = true;
+ break;
+ }
+ }
+
+ if (!isComponentSupportsLargeAudioFrame) {
+ multiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
+ c2interface, std::static_pointer_cast<C2ReflectorHelper>(mParamReflector));
+ }
+ }
+ }
+ return multiAccessUnitIntf;
+}
+
// Methods from ::android::hardware::media::c2::V1_0::IComponentStore
Return<void> ComponentStore::createComponent(
const hidl_string& name,
@@ -241,7 +272,10 @@
c2interface = GetFilterWrapper()->maybeWrapInterface(c2interface);
#endif
onInterfaceLoaded(c2interface);
- interface = new ComponentInterface(c2interface, mParameterCache);
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf =
+ tryCreateMultiAccessUnitInterface(c2interface);
+ interface = new ComponentInterface(
+ c2interface, multiAccessUnitIntf, mParameterCache);
}
_hidl_cb(static_cast<Status>(res), interface);
return Void();
diff --git a/media/codec2/hal/hidl/1.1/utils/include/codec2/hidl/1.1/ComponentStore.h b/media/codec2/hal/hidl/1.1/utils/include/codec2/hidl/1.1/ComponentStore.h
index f6daee7..85862a9 100644
--- a/media/codec2/hal/hidl/1.1/utils/include/codec2/hidl/1.1/ComponentStore.h
+++ b/media/codec2/hal/hidl/1.1/utils/include/codec2/hidl/1.1/ComponentStore.h
@@ -79,6 +79,9 @@
static std::shared_ptr<FilterWrapper> GetFilterWrapper();
+ std::shared_ptr<MultiAccessUnitInterface> tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface);
+
// Methods from ::android::hardware::media::c2::V1_0::IComponentStore.
virtual Return<void> createComponent(
const hidl_string& name,
diff --git a/media/codec2/hal/hidl/1.2/utils/Component.cpp b/media/codec2/hal/hidl/1.2/utils/Component.cpp
index bbdbef5..f78e827 100644
--- a/media/codec2/hal/hidl/1.2/utils/Component.cpp
+++ b/media/codec2/hal/hidl/1.2/utils/Component.cpp
@@ -261,30 +261,7 @@
mBufferPoolSender{clientPoolManager} {
// Retrieve supported parameters from store
// TODO: We could cache this per component/interface type
- if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
- c2_status_t err = C2_OK;
- C2ComponentDomainSetting domain;
- std::vector<std::unique_ptr<C2Param>> heapParams;
- err = component->intf()->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
- if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
- std::vector<std::shared_ptr<C2ParamDescriptor>> params;
- bool isComponentSupportsLargeAudioFrame = false;
- component->intf()->querySupportedParams_nb(¶ms);
- for (const auto ¶mDesc : params) {
- if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
- isComponentSupportsLargeAudioFrame = true;
- LOG(VERBOSE) << "Underlying component supports large frame audio";
- break;
- }
- }
- if (!isComponentSupportsLargeAudioFrame) {
- mMultiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
- component->intf(),
- std::static_pointer_cast<C2ReflectorHelper>(
- GetCodec2PlatformComponentStore()->getParamReflector()));
- }
- }
- }
+ mMultiAccessUnitIntf = store->tryCreateMultiAccessUnitInterface(component->intf());
mInterface = new ComponentInterface(
component->intf(), mMultiAccessUnitIntf, store->getParameterCache());
mInit = mInterface->status();
@@ -633,7 +610,19 @@
void Component::initListener(const sp<Component>& self) {
std::shared_ptr<C2Component::Listener> c2listener;
if (mMultiAccessUnitIntf) {
- mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(mMultiAccessUnitIntf);
+ std::shared_ptr<C2Allocator> allocator;
+ std::shared_ptr<C2BlockPool> linearPool;
+ std::shared_ptr<C2AllocatorStore> store = ::android::GetCodec2PlatformAllocatorStore();
+ if(store->fetchAllocator(C2AllocatorStore::DEFAULT_LINEAR, &allocator) == C2_OK) {
+ ::android::C2PlatformAllocatorDesc desc;
+ desc.allocatorId = allocator->getId();
+ if (C2_OK == CreateCodec2BlockPool(desc, mComponent, &linearPool)) {
+ if (linearPool) {
+ mMultiAccessUnitHelper = std::make_shared<MultiAccessUnitHelper>(
+ mMultiAccessUnitIntf, linearPool);
+ }
+ }
+ }
}
c2listener = mMultiAccessUnitHelper ?
std::make_shared<MultiAccessUnitListener>(self, mMultiAccessUnitHelper) :
diff --git a/media/codec2/hal/hidl/1.2/utils/ComponentStore.cpp b/media/codec2/hal/hidl/1.2/utils/ComponentStore.cpp
index 9fac5d5..f89c835 100644
--- a/media/codec2/hal/hidl/1.2/utils/ComponentStore.cpp
+++ b/media/codec2/hal/hidl/1.2/utils/ComponentStore.cpp
@@ -194,6 +194,36 @@
}
#endif
+std::shared_ptr<MultiAccessUnitInterface> ComponentStore::tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface) {
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf = nullptr;
+ if (c2interface == nullptr) {
+ return nullptr;
+ }
+ if (MultiAccessUnitHelper::isEnabledOnPlatform()) {
+ c2_status_t err = C2_OK;
+ C2ComponentDomainSetting domain;
+ std::vector<std::unique_ptr<C2Param>> heapParams;
+ err = c2interface->query_vb({&domain}, {}, C2_MAY_BLOCK, &heapParams);
+ if (err == C2_OK && (domain.value == C2Component::DOMAIN_AUDIO)) {
+ std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+ bool isComponentSupportsLargeAudioFrame = false;
+ c2interface->querySupportedParams_nb(¶ms);
+ for (const auto ¶mDesc : params) {
+ if (paramDesc->name().compare(C2_PARAMKEY_OUTPUT_LARGE_FRAME) == 0) {
+ isComponentSupportsLargeAudioFrame = true;
+ break;
+ }
+ }
+ if (!isComponentSupportsLargeAudioFrame) {
+ multiAccessUnitIntf = std::make_shared<MultiAccessUnitInterface>(
+ c2interface, std::static_pointer_cast<C2ReflectorHelper>(mParamReflector));
+ }
+ }
+ }
+ return multiAccessUnitIntf;
+}
+
// Methods from ::android::hardware::media::c2::V1_0::IComponentStore
Return<void> ComponentStore::createComponent(
const hidl_string& name,
@@ -241,7 +271,9 @@
c2interface = GetFilterWrapper()->maybeWrapInterface(c2interface);
#endif
onInterfaceLoaded(c2interface);
- interface = new ComponentInterface(c2interface, mParameterCache);
+ std::shared_ptr<MultiAccessUnitInterface> multiAccessUnitIntf =
+ tryCreateMultiAccessUnitInterface(c2interface);
+ interface = new ComponentInterface(c2interface, multiAccessUnitIntf, mParameterCache);
}
_hidl_cb(static_cast<Status>(res), interface);
return Void();
diff --git a/media/codec2/hal/hidl/1.2/utils/include/codec2/hidl/1.2/ComponentStore.h b/media/codec2/hal/hidl/1.2/utils/include/codec2/hidl/1.2/ComponentStore.h
index e95a651..c08fce4 100644
--- a/media/codec2/hal/hidl/1.2/utils/include/codec2/hidl/1.2/ComponentStore.h
+++ b/media/codec2/hal/hidl/1.2/utils/include/codec2/hidl/1.2/ComponentStore.h
@@ -79,6 +79,9 @@
static std::shared_ptr<FilterWrapper> GetFilterWrapper();
+ std::shared_ptr<MultiAccessUnitInterface> tryCreateMultiAccessUnitInterface(
+ const std::shared_ptr<C2ComponentInterface> &c2interface);
+
// Methods from ::android::hardware::media::c2::V1_0::IComponentStore.
virtual Return<void> createComponent(
const hidl_string& name,
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index d867eb1..4de2347 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -45,6 +45,8 @@
static_libs: [
"libSurfaceFlingerProperties",
+ "aconfig_mediacodec_flags_c_lib",
+ "android.media.codec-aconfig-cc",
],
shared_libs: [
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 40656ff..f58dc65 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -552,8 +552,7 @@
}
ssize_t result = -1;
- ssize_t codecDataOffset = 0;
- size_t inBufferOffset = 0;
+ size_t srcOffset = offset;
size_t outBufferSize = 0;
uint32_t cryptoInfoIdx = 0;
int32_t heapSeqNum = getHeapSeqNum(memory);
@@ -565,18 +564,20 @@
for (int i = 0; i < bufferInfos->value.size(); i++) {
if (bufferInfos->value[i].mSize > 0) {
std::unique_ptr<CodecCryptoInfo> info = std::move(cryptoInfos->value[cryptoInfoIdx++]);
+ src.offset = srcOffset;
+ src.size = bufferInfos->value[i].mSize;
result = mCrypto->decrypt(
(uint8_t*)info->mKey,
(uint8_t*)info->mIv,
info->mMode,
info->mPattern,
src,
- inBufferOffset,
+ 0,
info->mSubSamples,
info->mNumSubSamples,
dst,
errorDetailMsg);
- inBufferOffset += bufferInfos->value[i].mSize;
+ srcOffset += bufferInfos->value[i].mSize;
if (result < 0) {
ALOGI("[%s] attachEncryptedBuffers: decrypt failed: result = %zd",
mName, result);
@@ -599,7 +600,7 @@
wView.setOffset(0);
}
std::shared_ptr<C2Buffer> c2Buffer{C2Buffer::CreateLinearBuffer(
- block->share(codecDataOffset, outBufferSize - codecDataOffset, C2Fence{}))};
+ block->share(0, outBufferSize, C2Fence{}))};
if (!buffer->copy(c2Buffer)) {
ALOGI("[%s] attachEncryptedBuffers: buffer copy failed", mName);
return -ENOSYS;
@@ -980,8 +981,7 @@
}
// size of cryptoInfo and accessUnitInfo should be the same?
ssize_t result = -1;
- ssize_t codecDataOffset = 0;
- size_t inBufferOffset = 0;
+ size_t srcOffset = 0;
size_t outBufferSize = 0;
uint32_t cryptoInfoIdx = 0;
{
@@ -994,6 +994,7 @@
encryptedBuffer->getMappedBlock(&mappedBlock);
hardware::drm::V1_0::SharedBuffer source;
encryptedBuffer->fillSourceBuffer(&source);
+ srcOffset = source.offset;
for (int i = 0 ; i < bufferInfos->value.size(); i++) {
if (bufferInfos->value[i].mSize > 0) {
std::unique_ptr<CodecCryptoInfo> info =
@@ -1004,18 +1005,20 @@
// no data so we only populate the bufferInfo
result = 0;
} else {
+ source.offset = srcOffset;
+ source.size = bufferInfos->value[i].mSize;
result = mCrypto->decrypt(
(uint8_t*)info->mKey,
(uint8_t*)info->mIv,
info->mMode,
info->mPattern,
source,
- inBufferOffset,
+ buffer->offset(),
info->mSubSamples,
info->mNumSubSamples,
destination,
errorDetailMsg);
- inBufferOffset += bufferInfos->value[i].mSize;
+ srcOffset += bufferInfos->value[i].mSize;
if (result < 0) {
ALOGI("[%s] decrypt failed: result=%zd", mName, result);
return result;
@@ -1028,7 +1031,7 @@
}
}
}
- buffer->setRange(codecDataOffset, outBufferSize - codecDataOffset);
+ buffer->setRange(0, outBufferSize);
}
return queueInputBufferInternal(buffer, block, bufferSize);
}
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 453a0d2..37a7a4f 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -20,6 +20,9 @@
#include <strings.h>
+#include <com_android_media_codec_flags.h>
+#include <android_media_codec.h>
+
#include <C2Component.h>
#include <C2Config.h>
#include <C2Debug.h>
@@ -752,6 +755,25 @@
}
addSupportedColorFormats(
intf, caps.get(), trait, mediaType, it->second);
+
+ if (com::android::media::codec::flags::provider_->large_audio_frame()
+ && android::media::codec::provider_->large_audio_frame_finish()) {
+ // Adding feature-multiple-frames when C2LargeFrame param is present
+ if (trait.domain == C2Component::DOMAIN_AUDIO) {
+ std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+ c2_status_t err = intf->querySupportedParams(¶ms);
+ if (err == C2_OK) {
+ for (const auto ¶mDesc : params) {
+ if (C2LargeFrame::output::PARAM_TYPE == paramDesc->index()) {
+ std::string featureMultipleFrames =
+ std::string(KEY_FEATURE_) + FEATURE_MultipleFrames;
+ caps->addDetail(featureMultipleFrames.c_str(), 0);
+ break;
+ }
+ }
+ }
+ }
+ }
}
}
}
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index e780f4f..cd7679c 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -278,3 +278,9 @@
mDataQueue->eraseMemory();
}
}
+
+void AudioEndpoint::eraseEmptyDataMemory(int32_t numFrames) {
+ if (mDataQueue != nullptr) {
+ mDataQueue->eraseEmptyMemory(numFrames);
+ }
+}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index b117572..7e97c6a 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -107,6 +107,8 @@
*/
void eraseDataMemory();
+ void eraseEmptyDataMemory(int32_t numFrames);
+
void freeDataQueue() { mDataQueue.reset(); }
void dump() const;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 7648e25..b2e93f0 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -575,10 +575,20 @@
return AAUDIO_ERROR_INVALID_STATE;
}
+ // For playback, sleep until all the audio data has played.
+ // Then clear the buffer to prevent noise.
+ prepareBuffersForStop();
+
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_STOPPING);
mAtomicInternalTimestamp.clear();
+#if 0
+ // Simulate very slow CPU, force race condition where the
+ // DSP keeps playing after we stop writing.
+ AudioClock::sleepForNanos(800 * AAUDIO_NANOS_PER_MILLISECOND);
+#endif
+
result = mServiceInterface.stopStream(mServiceStreamHandleInfo);
if (result == AAUDIO_ERROR_INVALID_HANDLE) {
ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index a5981b1..20d55f9 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -123,6 +123,8 @@
virtual void prepareBuffersForStart() {}
+ virtual void prepareBuffersForStop() {}
+
virtual void advanceClientToMatchServerPosition(int32_t serverMargin) = 0;
virtual void onFlushFromServer() {}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5d4c3d4..0427777 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -19,6 +19,8 @@
#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include <algorithm>
+
#include <media/MediaMetricsItem.h>
#include <utils/Trace.h>
@@ -108,6 +110,61 @@
mAudioEndpoint->eraseDataMemory();
}
+void AudioStreamInternalPlay::prepareBuffersForStop() {
+ // If this is a shared stream and the FIFO is being read by the mixer then
+ // we don't have to worry about the DSP reading past the valid data. We can skip all this.
+ if(!mAudioEndpoint->isFreeRunning()) {
+ return;
+ }
+ // Sleep until the DSP has read all of the data written.
+ int64_t validFramesInBuffer = getFramesWritten() - getFramesRead();
+ if (validFramesInBuffer >= 0) {
+ int64_t emptyFramesInBuffer = ((int64_t) getBufferCapacity()) - validFramesInBuffer;
+
+ // Prevent stale data from being played if the DSP is still running.
+ // Erase some of the FIFO memory in front of the DSP read cursor.
+ // Subtract one burst so we do not accidentally erase data that the DSP might be using.
+ int64_t framesToErase = std::max((int64_t) 0,
+ emptyFramesInBuffer - getFramesPerBurst());
+ mAudioEndpoint->eraseEmptyDataMemory(framesToErase);
+
+ // Sleep until we are confident the DSP has consumed all of the valid data.
+ // Sleep for one extra burst as a safety margin because the IsochronousClockModel
+ // is not perfectly accurate.
+ int64_t positionInEmptyMemory = getFramesWritten() + getFramesPerBurst();
+ int64_t timeAllConsumed = mClockModel.convertPositionToTime(positionInEmptyMemory);
+ int64_t durationAllConsumed = timeAllConsumed - AudioClock::getNanoseconds();
+ // Prevent sleeping for too long.
+ durationAllConsumed = std::min(200 * AAUDIO_NANOS_PER_MILLISECOND, durationAllConsumed);
+ AudioClock::sleepForNanos(durationAllConsumed);
+ }
+
+ // Erase all of the memory in case the DSP keeps going and wraps around.
+ mAudioEndpoint->eraseDataMemory();
+
+ // Wait for the last buffer to reach the DAC.
+ // This is because the expected behavior of stop() is that all data written to the stream
+ // should be played before the hardware actually shuts down.
+ // This is different than pause(), where we just end as soon as possible.
+ // This can be important when, for example, playing car navigation and
+ // you want the user to hear the complete instruction.
+ if (mAtomicInternalTimestamp.isValid()) {
+ // Use timestamps to calculate the latency between the DSP reading
+ // a frame and when it reaches the DAC.
+ // This code assumes that timestamps are accurate.
+ Timestamp timestamp = mAtomicInternalTimestamp.read();
+ int64_t dacPosition = timestamp.getPosition();
+ int64_t hardwareReadTime = mClockModel.convertPositionToTime(dacPosition);
+ int64_t hardwareLatencyNanos = timestamp.getNanoseconds() - hardwareReadTime;
+ ALOGD("%s() hardwareLatencyNanos = %lld", __func__,
+ (long long) hardwareLatencyNanos);
+ // Prevent sleeping for too long.
+ hardwareLatencyNanos = std::min(30 * AAUDIO_NANOS_PER_MILLISECOND,
+ hardwareLatencyNanos);
+ AudioClock::sleepForNanos(hardwareLatencyNanos);
+ }
+}
+
void AudioStreamInternalPlay::advanceClientToMatchServerPosition(int32_t serverMargin) {
int64_t readCounter = mAudioEndpoint->getDataReadCounter() + serverMargin;
int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();
@@ -353,20 +410,26 @@
// Call application using the AAudio callback interface.
callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
- if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- // Write audio data to stream. This is a BLOCKING WRITE!
- result = write(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
- if ((result != mCallbackFrames)) {
- if (result >= 0) {
- // Only wrote some of the frames requested. The stream can be disconnected
- // or timed out.
- processCommands();
- result = isDisconnected() ? AAUDIO_ERROR_DISCONNECTED : AAUDIO_ERROR_TIMEOUT;
- }
- maybeCallErrorCallback(result);
- break;
+ // Write audio data to stream. This is a BLOCKING WRITE!
+ // Write data regardless of the callbackResult because we assume the data
+ // is valid even when the callback returns AAUDIO_CALLBACK_RESULT_STOP.
+ // Imagine a callback that is playing a large sound in menory.
+ // When it gets to the end of the sound it can partially fill
+ // the last buffer with the end of the sound, then zero pad the buffer, then return STOP.
+ // If the callback has no valid data then it should zero-fill the entire buffer.
+ result = write(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
+ if ((result != mCallbackFrames)) {
+ if (result >= 0) {
+ // Only wrote some of the frames requested. The stream can be disconnected
+ // or timed out.
+ processCommands();
+ result = isDisconnected() ? AAUDIO_ERROR_DISCONNECTED : AAUDIO_ERROR_TIMEOUT;
}
- } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ maybeCallErrorCallback(result);
+ break;
+ }
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
result = systemStopInternal();
break;
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index b51b5d0..4e14f18 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -66,6 +66,8 @@
void prepareBuffersForStart() override;
+ void prepareBuffersForStop() override;
+
void advanceClientToMatchServerPosition(int32_t serverMargin) override;
void onFlushFromServer() override;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index 5c11882..f3e3bbd 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -150,7 +150,7 @@
getEmptyRoomAvailable(&wrappingBuffer);
- // Read data in one or two parts.
+ // Write data in one or two parts.
int partIndex = 0;
while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
fifo_frames_t framesToWrite = framesLeft;
@@ -192,3 +192,29 @@
memset(getStorage(), 0, (size_t) numBytes);
}
}
+
+fifo_frames_t FifoBuffer::eraseEmptyMemory(fifo_frames_t numFrames) {
+ WrappingBuffer wrappingBuffer;
+ fifo_frames_t framesLeft = numFrames;
+
+ getEmptyRoomAvailable(&wrappingBuffer);
+
+ // Erase data in one or two parts.
+ int partIndex = 0;
+ while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+ fifo_frames_t framesToWrite = framesLeft;
+ fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable > 0) {
+ if (framesToWrite > framesAvailable) {
+ framesToWrite = framesAvailable;
+ }
+ int32_t numBytes = convertFramesToBytes(framesToWrite);
+ memset(wrappingBuffer.data[partIndex], 0, numBytes);
+ framesLeft -= framesToWrite;
+ } else {
+ break;
+ }
+ partIndex++;
+ }
+ return numFrames - framesLeft; // number erased
+}
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 7b0aca1..860ccad 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -115,6 +115,13 @@
*/
void eraseMemory();
+ /**
+ * Clear some memory after the write pointer.
+ * This can be used to prevent the reader from accidentally reading stale data
+ * in case it is reading asynchronously.
+ */
+ fifo_frames_t eraseEmptyMemory(fifo_frames_t numFrames);
+
protected:
virtual uint8_t *getStorage() const = 0;
diff --git a/media/libaudioclient/include/media/EffectClientAsyncProxy.h b/media/libaudioclient/include/media/EffectClientAsyncProxy.h
new file mode 100644
index 0000000..e7d6d80
--- /dev/null
+++ b/media/libaudioclient/include/media/EffectClientAsyncProxy.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/media/BnEffectClient.h>
+#include <audio_utils/CommandThread.h>
+
+namespace android::media {
+
+class EffectClientAsyncProxy : public IEffectClient {
+public:
+
+ /**
+ * Call this factory method to interpose a worker thread when a binder
+ * callback interface is invoked in-proc.
+ */
+ static sp<IEffectClient> makeIfNeeded(const sp<IEffectClient>& effectClient) {
+ if (isLocalBinder(effectClient)) {
+ return sp<EffectClientAsyncProxy>::make(effectClient);
+ }
+ return effectClient;
+ }
+
+ explicit EffectClientAsyncProxy(const sp<IEffectClient>& effectClient)
+ : mEffectClient(effectClient) {}
+
+ ::android::IBinder* onAsBinder() override {
+ return nullptr;
+ }
+
+ ::android::binder::Status controlStatusChanged(bool controlGranted) override {
+ getThread().add(__func__, [=, effectClient = mEffectClient]() {
+ effectClient->controlStatusChanged(controlGranted);
+ });
+ return ::android::binder::Status::fromStatusT(::android::NO_ERROR);
+ }
+
+ ::android::binder::Status enableStatusChanged(bool enabled) override {
+ getThread().add(__func__, [=, effectClient = mEffectClient]() {
+ effectClient->enableStatusChanged(enabled);
+ });
+ return ::android::binder::Status::fromStatusT(::android::NO_ERROR);
+ }
+
+ ::android::binder::Status commandExecuted(
+ int32_t cmdCode, const ::std::vector<uint8_t>& cmdData,
+ const ::std::vector<uint8_t>& replyData) override {
+ getThread().add(__func__, [=, effectClient = mEffectClient]() {
+ effectClient->commandExecuted(cmdCode, cmdData, replyData);
+ });
+ return ::android::binder::Status::fromStatusT(::android::NO_ERROR);
+ }
+
+ ::android::binder::Status framesProcessed(int32_t frames) override {
+ getThread().add(__func__, [=, effectClient = mEffectClient]() {
+ effectClient->framesProcessed(frames);
+ });
+ return ::android::binder::Status::fromStatusT(::android::NO_ERROR);
+ }
+
+ /**
+ * Returns true if the binder interface is local (in-proc).
+ *
+ * Move to a binder helper class?
+ */
+ static bool isLocalBinder(const sp<IInterface>& interface) {
+ const auto b = IInterface::asBinder(interface);
+ return b && b->localBinder();
+ }
+
+private:
+ const sp<IEffectClient> mEffectClient;
+
+ /**
+ * Returns the per-interface-descriptor CommandThread for in-proc binder transactions.
+ *
+ * Note: Remote RPC transactions to a given binder (kernel) node enter that node's
+ * async_todo list, which serializes all async operations to that binder node.
+ * Each transaction on the async_todo list must complete before the next one
+ * starts, even though there may be available threads in the process threadpool.
+ *
+ * For local transactions, we order all async requests entering
+ * the CommandThread. We do not maintain a threadpool, though a future implementation
+ * could use a shared ThreadPool.
+ *
+ * By using a static here, all in-proc binder interfaces made async with
+ * EffectClientAsyncProxy will get the same CommandThread.
+ *
+ * @return CommandThread to use.
+ */
+ static audio_utils::CommandThread& getThread() {
+ [[clang::no_destroy]] static audio_utils::CommandThread commandThread;
+ return commandThread;
+ }
+}; // class EffectClientAsyncProxy
+
+} // namespace android::media
diff --git a/media/libaudiohal/impl/EffectBufferHalAidl.cpp b/media/libaudiohal/impl/EffectBufferHalAidl.cpp
index a701852..33fe3ed 100644
--- a/media/libaudiohal/impl/EffectBufferHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalAidl.cpp
@@ -58,25 +58,14 @@
}
EffectBufferHalAidl::~EffectBufferHalAidl() {
+ if (mAudioBuffer.raw) free(mAudioBuffer.raw);
}
status_t EffectBufferHalAidl::init() {
- int fd = ashmem_create_region("audioEffectAidl", mBufferSize);
- if (fd < 0) {
- ALOGE("%s create ashmem failed %d", __func__, fd);
- return fd;
+ if (0 != posix_memalign(&mAudioBuffer.raw, 32, mBufferSize)) {
+ return NO_MEMORY;
}
- ScopedFileDescriptor tempFd(fd);
- mAudioBuffer.raw = mmap(nullptr /* address */, mBufferSize /* length */, PROT_READ | PROT_WRITE,
- MAP_SHARED, fd, 0 /* offset */);
- if (mAudioBuffer.raw == MAP_FAILED) {
- ALOGE("mmap failed for fd %d", fd);
- mAudioBuffer.raw = nullptr;
- return INVALID_OPERATION;
- }
-
- mMemory = {std::move(tempFd), static_cast<int64_t>(mBufferSize)};
return OK;
}
diff --git a/media/libaudiohal/impl/EffectBufferHalAidl.h b/media/libaudiohal/impl/EffectBufferHalAidl.h
index 035314b..cf6031f 100644
--- a/media/libaudiohal/impl/EffectBufferHalAidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalAidl.h
@@ -50,7 +50,6 @@
const size_t mBufferSize;
bool mFrameCountChanged;
void* mExternalData;
- aidl::android::hardware::common::Ashmem mMemory;
audio_buffer_t mAudioBuffer;
// Can not be constructed directly by clients.
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.cpp b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
index e1a82f8..882c550 100644
--- a/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
@@ -193,6 +193,7 @@
Parameter aidlParam = UNION_MAKE(Parameter, common, common);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->setParameter(aidlParam)));
}
+ mOutputAccessMode = config->outputCfg.accessMode;
mCommon = common;
return *static_cast<int32_t*>(pReplyData) = OK;
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.h b/media/libaudiohal/impl/EffectConversionHelperAidl.h
index 0c0184e..29c5a83 100644
--- a/media/libaudiohal/impl/EffectConversionHelperAidl.h
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.h
@@ -49,6 +49,8 @@
::aidl::android::hardware::audio::effect::Descriptor getDescriptor() const;
status_t reopen();
+ uint8_t mOutputAccessMode = EFFECT_BUFFER_ACCESS_WRITE;
+
protected:
const int32_t mSessionId;
const int32_t mIoId;
diff --git a/media/libaudiohal/impl/EffectHalAidl.cpp b/media/libaudiohal/impl/EffectHalAidl.cpp
index b1b1dfe..7262148 100644
--- a/media/libaudiohal/impl/EffectHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectHalAidl.cpp
@@ -20,6 +20,7 @@
#include <memory>
+#include <audio_utils/primitives.h>
#include <error/expected_utils.h>
#include <media/AidlConversionCppNdk.h>
#include <media/AidlConversionEffect.h>
@@ -240,13 +241,22 @@
mOutBuffer->getSize() / sizeof(float), available);
return INVALID_OPERATION;
}
+
+ float *outputRawBuffer = mOutBuffer->audioBuffer()->f32;
+ std::vector<float> tempBuffer;
+ if (mConversion->mOutputAccessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ tempBuffer.resize(floatsToRead);
+ outputRawBuffer = tempBuffer.data();
+ }
// always read floating point data for AIDL
- if (!mOutBuffer->audioBuffer() ||
- !outputQ->read(mOutBuffer->audioBuffer()->f32, floatsToRead)) {
+ if (!outputQ->read(outputRawBuffer, floatsToRead)) {
ALOGE("%s failed to read %zu from outputQ to audioBuffer %p", __func__, floatsToRead,
mOutBuffer->audioBuffer());
return INVALID_OPERATION;
}
+ if (mConversion->mOutputAccessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ accumulate_float(mOutBuffer->audioBuffer()->f32, outputRawBuffer, floatsToRead);
+ }
ALOGD("%s %s consumed %zu produced %zu", __func__, effectName.c_str(), floatsToWrite,
floatsToRead);
diff --git a/media/libaudiohal/impl/EffectProxy.cpp b/media/libaudiohal/impl/EffectProxy.cpp
index d440ef8..9a8d96b 100644
--- a/media/libaudiohal/impl/EffectProxy.cpp
+++ b/media/libaudiohal/impl/EffectProxy.cpp
@@ -208,6 +208,10 @@
if (desc.common.flags.volume == Flags::Volume::NONE) {
common.flags.volume = Flags::Volume::NONE;
}
+ // set to AUXILIARY if any sub-effect is of AUXILIARY type
+ if (desc.common.flags.type == Flags::Type::AUXILIARY) {
+ common.flags.type = Flags::Type::AUXILIARY;
+ }
}
// copy type UUID from any of sub-effects, all sub-effects should have same type
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
index 042b063..311d60a 100644
--- a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
@@ -485,7 +485,7 @@
if (!stageInUse) {
LOG(WARNING) << __func__ << " not in use " << ::android::internal::ToString(channels);
- return RetCode::SUCCESS;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
}
RETURN_VALUE_IF(!stageInUse, RetCode::ERROR_ILLEGAL_PARAMETER, "stageNotInUse");
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index 5d9886c..f60d616 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -114,10 +114,11 @@
std::stringstream ss;
ss << "\t\tHaptic setting:\n";
ss << "\t\t- tracks intensity map:\n";
- for (const auto&[id, intensity] : param.id2Intensity) {
- ss << "\t\t\t- id=" << id << ", intensity=" << (int) intensity;
+ for (const auto&[id, hapticScale] : param.id2HapticScale) {
+ ss << "\t\t\t- id=" << id << ", hapticLevel=" << (int) hapticScale.getLevel()
+ << ", adaptiveScaleFactor=" << hapticScale.getAdaptiveScaleFactor();
}
- ss << "\t\t- max intensity: " << (int) param.maxHapticIntensity << '\n';
+ ss << "\t\t- max scale level: " << (int) param.maxHapticScale.getLevel() << '\n';
ss << "\t\t- max haptic amplitude: " << param.maxHapticAmplitude << '\n';
return ss.str();
}
@@ -145,7 +146,7 @@
memset(context->param.hapticChannelSource, 0, sizeof(context->param.hapticChannelSource));
context->param.hapticChannelCount = 0;
context->param.audioChannelCount = 0;
- context->param.maxHapticIntensity = os::HapticLevel::MUTE;
+ context->param.maxHapticScale = os::HapticScale::mute();
context->param.resonantFrequency = DEFAULT_RESONANT_FREQUENCY;
context->param.bpfQ = 1.0f;
@@ -312,22 +313,25 @@
void *value) {
switch (param) {
case HG_PARAM_HAPTIC_INTENSITY: {
- if (value == nullptr || size != (uint32_t) (2 * sizeof(int))) {
+ if (value == nullptr || size != (uint32_t) (2 * sizeof(int) + sizeof(float))) {
return -EINVAL;
}
- int id = *(int *) value;
- os::HapticLevel hapticIntensity =
- static_cast<os::HapticLevel>(*((int *) value + 1));
- ALOGD("Setting haptic intensity as %d", static_cast<int>(hapticIntensity));
- if (hapticIntensity == os::HapticLevel::MUTE) {
- context->param.id2Intensity.erase(id);
+ const int id = *(int *) value;
+ const os::HapticLevel hapticLevel = static_cast<os::HapticLevel>(*((int *) value + 1));
+ const float adaptiveScaleFactor = (*((float *) value + 2));
+ const os::HapticScale hapticScale = {hapticLevel, adaptiveScaleFactor};
+ ALOGD("Updating haptic scale, hapticLevel=%d, adaptiveScaleFactor=%f",
+ static_cast<int>(hapticLevel), adaptiveScaleFactor);
+ if (hapticScale.isScaleMute()) {
+ context->param.id2HapticScale.erase(id);
} else {
- context->param.id2Intensity.emplace(id, hapticIntensity);
+ context->param.id2HapticScale.emplace(id, hapticScale);
}
- context->param.maxHapticIntensity = hapticIntensity;
- for (const auto&[id, intensity] : context->param.id2Intensity) {
- context->param.maxHapticIntensity = std::max(
- context->param.maxHapticIntensity, intensity);
+ context->param.maxHapticScale = hapticScale;
+ for (const auto&[id, scale] : context->param.id2HapticScale) {
+ if (scale.getLevel() > context->param.maxHapticScale.getLevel()) {
+ context->param.maxHapticScale = scale;
+ }
}
break;
}
@@ -479,7 +483,7 @@
return -ENODATA;
}
- if (context->param.maxHapticIntensity == os::HapticLevel::MUTE) {
+ if (context->param.maxHapticScale.isScaleMute()) {
// Haptic channels are muted, not need to generate haptic data.
return 0;
}
@@ -506,7 +510,7 @@
context->processingChain, context->inputBuffer.data(),
context->outputBuffer.data(), inBuffer->frameCount);
os::scaleHapticData(hapticOutBuffer, hapticSampleCount,
- { /*level=*/context->param.maxHapticIntensity},
+ context->param.maxHapticScale,
context->param.maxHapticAmplitude);
// For haptic data, the haptic playback thread will copy the data from effect input buffer,
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index f122c0a..dbfc5ea 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -48,9 +48,9 @@
uint32_t audioChannelCount;
uint32_t hapticChannelCount;
- // A map from track id to haptic intensity.
- std::map<int, os::HapticLevel> id2Intensity;
- os::HapticLevel maxHapticIntensity; // max intensity will be used to scale haptic data.
+ // A map from track id to haptic scale.
+ std::map<int, os::HapticScale> id2HapticScale;
+ os::HapticScale maxHapticScale; // max haptic scale will be used to scale haptic data.
float maxHapticAmplitude; // max amplitude will be used to limit haptic data absolute values.
float resonantFrequency;
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
index c714bc9..5fe2f44 100644
--- a/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
@@ -82,7 +82,6 @@
namespace aidl::android::hardware::audio::effect {
EffectReverb::EffectReverb(const AudioUuid& uuid) {
- LOG(DEBUG) << __func__ << uuid.toString();
if (uuid == getEffectImplUuidAuxEnvReverb()) {
mType = lvm::ReverbEffectType::AUX_ENV;
mDescriptor = &lvm::kAuxEnvReverbDesc;
@@ -106,18 +105,16 @@
EffectReverb::~EffectReverb() {
cleanUp();
- LOG(DEBUG) << __func__;
}
ndk::ScopedAStatus EffectReverb::getDescriptor(Descriptor* _aidl_return) {
RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
- LOG(DEBUG) << _aidl_return->toString();
*_aidl_return = *mDescriptor;
return ndk::ScopedAStatus::ok();
}
ndk::ScopedAStatus EffectReverb::setParameterSpecific(const Parameter::Specific& specific) {
- LOG(DEBUG) << __func__ << " specific " << specific.toString();
+ LOG(VERBOSE) << __func__ << " specific " << specific.toString();
RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
auto tag = specific.getTag();
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
index 1c66c78..67518af 100644
--- a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
@@ -68,24 +68,21 @@
// allocate lvm reverb instance
LVREV_ReturnStatus_en status = LVREV_SUCCESS;
- {
- std::lock_guard lg(mMutex);
- LVREV_InstanceParams_st params = {
- .MaxBlockSize = lvm::kMaxCallSize,
- // Max format, could be mono during process
- .SourceFormat = LVM_STEREO,
- .NumDelays = LVREV_DELAYLINES_4,
- };
- /* Init sets the instance handle */
- status = LVREV_GetInstanceHandle(&mInstance, ¶ms);
- GOTO_IF_LVREV_ERROR(status, deinit, "LVREV_GetInstanceHandleFailed");
+ LVREV_InstanceParams_st params = {
+ .MaxBlockSize = lvm::kMaxCallSize,
+ // Max format, could be mono during process
+ .SourceFormat = LVM_STEREO,
+ .NumDelays = LVREV_DELAYLINES_4,
+ };
+ /* Init sets the instance handle */
+ status = LVREV_GetInstanceHandle(&mInstance, ¶ms);
+ GOTO_IF_LVREV_ERROR(status, deinit, "LVREV_GetInstanceHandleFailed");
- // set control
- LVREV_ControlParams_st controlParams;
- initControlParameter(controlParams);
- status = LVREV_SetControlParameters(mInstance, &controlParams);
- GOTO_IF_LVREV_ERROR(status, deinit, "LVREV_SetControlParametersFailed");
- }
+ // set control
+ LVREV_ControlParams_st controlParams;
+ initControlParameter(controlParams);
+ status = LVREV_SetControlParameters(mInstance, &controlParams);
+ GOTO_IF_LVREV_ERROR(status, deinit, "LVREV_SetControlParametersFailed");
return RetCode::SUCCESS;
@@ -95,7 +92,6 @@
}
void ReverbContext::deInit() {
- std::lock_guard lg(mMutex);
if (mInstance) {
LVREV_FreeInstance(mInstance);
mInstance = nullptr;
@@ -143,19 +139,16 @@
RetCode ReverbContext::setEnvironmentalReverbRoomLevel(int roomLevel) {
// Update Control Parameter
LVREV_ControlParams_st params;
- {
- std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- // Sum of room and reverb level controls
- // needs to subtract max levels for both room level and reverb level
- int combinedLevel = (roomLevel + mLevel) - lvm::kMaxReverbLevel;
- params.Level = convertLevel(combinedLevel);
+ // Sum of room and reverb level controls
+ // needs to subtract max levels for both room level and reverb level
+ int combinedLevel = (roomLevel + mLevel) - lvm::kMaxReverbLevel;
+ params.Level = convertLevel(combinedLevel);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
- }
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
mRoomLevel = roomLevel;
return RetCode::SUCCESS;
}
@@ -163,16 +156,13 @@
RetCode ReverbContext::setEnvironmentalReverbRoomHfLevel(int roomHfLevel) {
// Update Control Parameter
LVREV_ControlParams_st params;
- {
- std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- params.LPF = convertHfLevel(roomHfLevel);
+ params.LPF = convertHfLevel(roomHfLevel);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
- }
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
mRoomHfLevel = roomHfLevel;
return RetCode::SUCCESS;
}
@@ -185,17 +175,15 @@
// Update Control Parameter
LVREV_ControlParams_st params;
- {
- std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- params.T60 = (LVM_UINT16)time;
- mSamplesToExitCount = (params.T60 * mCommon.input.base.sampleRate) / 1000;
+ params.T60 = (LVM_UINT16)time;
+ mSamplesToExitCount = (params.T60 * mCommon.input.base.sampleRate) / 1000;
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
- }
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+
mDecayTime = time;
return RetCode::SUCCESS;
}
@@ -203,16 +191,13 @@
RetCode ReverbContext::setEnvironmentalReverbDecayHfRatio(int decayHfRatio) {
// Update Control Parameter
LVREV_ControlParams_st params;
- {
- std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- params.Damping = (LVM_INT16)(decayHfRatio / 20);
+ params.Damping = (LVM_INT16)(decayHfRatio / 20);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
- }
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
mDecayHfRatio = decayHfRatio;
return RetCode::SUCCESS;
}
@@ -220,19 +205,17 @@
RetCode ReverbContext::setEnvironmentalReverbLevel(int level) {
// Update Control Parameter
LVREV_ControlParams_st params;
- {
- std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- // Sum of room and reverb level controls
- // needs to subtract max levels for both room level and level
- int combinedLevel = (level + mRoomLevel) - lvm::kMaxReverbLevel;
- params.Level = convertLevel(combinedLevel);
+ // Sum of room and reverb level controls
+ // needs to subtract max levels for both room level and level
+ int combinedLevel = (level + mRoomLevel) - lvm::kMaxReverbLevel;
+ params.Level = convertLevel(combinedLevel);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
- }
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+
mLevel = level;
return RetCode::SUCCESS;
}
@@ -245,16 +228,14 @@
RetCode ReverbContext::setEnvironmentalReverbDiffusion(int diffusion) {
// Update Control Parameter
LVREV_ControlParams_st params;
- {
- std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- params.Density = (LVM_INT16)(diffusion / 10);
+ params.Density = (LVM_INT16)(diffusion / 10);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
- }
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+
mDiffusion = diffusion;
return RetCode::SUCCESS;
}
@@ -262,16 +243,14 @@
RetCode ReverbContext::setEnvironmentalReverbDensity(int density) {
// Update Control Parameter
LVREV_ControlParams_st params;
- {
- std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- params.RoomSize = (LVM_INT16)(((density * 99) / 1000) + 1);
+ params.RoomSize = (LVM_INT16)(((density * 99) / 1000) + 1);
- RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
- }
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+
mDensity = density;
return RetCode::SUCCESS;
}
@@ -362,9 +341,6 @@
RETURN_VALUE_IF(inputFrameCount != outputFrameCount, status, "FrameCountMismatch");
RETURN_VALUE_IF(0 == getInputFrameSize(), status, "zeroFrameSize");
- LOG(DEBUG) << __func__ << " start processing";
- std::lock_guard lg(mMutex);
-
int channels = ::aidl::android::hardware::audio::common::getChannelCount(
mCommon.input.base.channelMask);
int outChannels = ::aidl::android::hardware::audio::common::getChannelCount(
@@ -405,7 +381,6 @@
} else {
if (!mEnabled && mSamplesToExitCount > 0) {
std::fill(outFrames.begin(), outFrames.end(), 0);
- LOG(VERBOSE) << "Zeroing " << channels << " samples per frame at the end of call ";
}
/* Process the samples, producing a stereo output */
@@ -415,7 +390,7 @@
outFrames.data(), /* Output buffer */
frameCount); /* Number of samples to read */
if (lvrevStatus != LVREV_SUCCESS) {
- LOG(ERROR) << __func__ << lvrevStatus;
+ LOG(ERROR) << __func__ << " LVREV_Process error: " << lvrevStatus;
return {EX_UNSUPPORTED_OPERATION, 0, 0};
}
}
@@ -464,19 +439,10 @@
}
}
- bool accumulate = false;
if (outChannels > 2) {
- // Accumulate if required
- if (accumulate) {
- for (int i = 0; i < frameCount; i++) {
- out[outChannels * i] += outFrames[FCC_2 * i];
- out[outChannels * i + 1] += outFrames[FCC_2 * i + 1];
- }
- } else {
- for (int i = 0; i < frameCount; i++) {
- out[outChannels * i] = outFrames[FCC_2 * i];
- out[outChannels * i + 1] = outFrames[FCC_2 * i + 1];
- }
+ for (int i = 0; i < frameCount; i++) {
+ out[outChannels * i] = outFrames[FCC_2 * i];
+ out[outChannels * i + 1] = outFrames[FCC_2 * i + 1];
}
if (!isAuxiliary()) {
for (int i = 0; i < frameCount; i++) {
@@ -487,29 +453,15 @@
}
}
} else {
- if (accumulate) {
- if (outChannels == FCC_1) {
- for (int i = 0; i < frameCount; i++) {
- out[i] += ((outFrames[i * FCC_2] + outFrames[i * FCC_2 + 1]) * 0.5f);
- }
- } else {
- for (int i = 0; i < frameCount * FCC_2; i++) {
- out[i] += outFrames[i];
- }
- }
+ if (outChannels == FCC_1) {
+ From2iToMono_Float(outFrames.data(), out, frameCount);
} else {
- if (outChannels == FCC_1) {
- From2iToMono_Float(outFrames.data(), out, frameCount);
- } else {
- for (int i = 0; i < frameCount * FCC_2; i++) {
- out[i] = outFrames[i];
- }
+ for (int i = 0; i < frameCount * FCC_2; i++) {
+ out[i] = outFrames[i];
}
}
}
- LOG(DEBUG) << __func__ << " done processing";
-
if (!mEnabled && mSamplesToExitCount > 0) {
// signed - unsigned will trigger integer overflow if result becomes negative.
mSamplesToExitCount -= samples;
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.h b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.h
index 7d0ccff..8068f33 100644
--- a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.h
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.h
@@ -158,10 +158,9 @@
{-400, -600, 1800, 700, -2000, 30, -1400, 60, 1000, 1000}},
{PresetReverb::Presets::PLATE, {-400, -200, 1300, 900, 0, 2, 0, 10, 1000, 750}}};
- std::mutex mMutex;
const lvm::ReverbEffectType mType;
bool mEnabled = false;
- LVREV_Handle_t mInstance GUARDED_BY(mMutex) = LVM_NULL;
+ LVREV_Handle_t mInstance = LVM_NULL;
int mRoomLevel = 0;
int mRoomHfLevel = 0;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index e79e55b..e4f3b83 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -3395,9 +3395,6 @@
if (bufferInfos == nullptr || bufferInfos->value.empty()) {
return BAD_VALUE;
}
- if (cryptoInfos == nullptr || cryptoInfos->value.empty()) {
- return BAD_VALUE;
- }
status_t err = OK;
sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, this);
msg->setSize("index", index);
@@ -3405,8 +3402,12 @@
new WrapperObject<sp<hardware::HidlMemory>>{buffer}};
msg->setObject("memory", memory);
msg->setSize("offset", offset);
- msg->setSize("ssize", size);
- msg->setObject("cryptoInfos", cryptoInfos);
+ if (cryptoInfos != nullptr) {
+ msg->setSize("ssize", size);
+ msg->setObject("cryptoInfos", cryptoInfos);
+ } else {
+ msg->setSize("size", size);
+ }
msg->setObject("accessUnitInfo", bufferInfos);
if (OK != (err = generateFlagsFromAccessUnitInfo(msg, bufferInfos))) {
return err;
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 5dd8423..dd6da15 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -1,13 +1,4 @@
{
- "postsubmit": [
- // writerTest fails about 5 out of 66
- // { "name": "writerTest" },
-
- { "name": "HEVCUtilsUnitTest" },
- { "name": "ExtractorFactoryTest" }
-
- ],
-
"presubmit-large": [
{
"name": "CtsMediaMiscTestCases",
@@ -92,8 +83,16 @@
}
],
"postsubmit": [
+ // writerTest fails about 5 out of 66
+ // { "name": "writerTest" },
{
"name": "BatteryChecker_test"
+ },
+ {
+ "name": "ExtractorFactoryTest"
+ },
+ {
+ "name": "HEVCUtilsUnitTest"
}
]
}
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index f4c40e1..24ac2e8 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -697,6 +697,7 @@
inline constexpr char FEATURE_AdaptivePlayback[] = "adaptive-playback";
inline constexpr char FEATURE_EncodingStatistics[] = "encoding-statistics";
inline constexpr char FEATURE_IntraRefresh[] = "intra-refresh";
+inline constexpr char FEATURE_MultipleFrames[] = "multiple-frames";
inline constexpr char FEATURE_PartialFrame[] = "partial-frame";
inline constexpr char FEATURE_QpBounds[] = "qp-bounds";
inline constexpr char FEATURE_SecurePlayback[] = "secure-playback";
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 959f43e..458ac9c 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -626,6 +626,10 @@
// ACodec is waiting for all buffers to be returned, do NOT
// submit any more buffers to the codec.
bufferSource->onOmxIdle();
+ } else if (param == OMX_StateExecuting) {
+ // Initiating transition from Idle -> Executing
+ // Start submitting buffers to codec.
+ bufferSource->onOmxExecuting();
} else if (param == OMX_StateLoaded) {
// Initiating transition from Idle/Executing -> Loaded
// Buffers are about to be freed.
@@ -2404,13 +2408,6 @@
asString(event), event, arg1String, arg1, arg2String, arg2);
const sp<IOMXBufferSource> bufferSource(getBufferSource());
- if (bufferSource != NULL
- && event == OMX_EventCmdComplete
- && arg1 == OMX_CommandStateSet
- && arg2 == OMX_StateExecuting) {
- bufferSource->onOmxExecuting();
- }
-
// allow configuration if we return to the loaded state
if (event == OMX_EventCmdComplete
&& arg1 == OMX_CommandStateSet
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index 19f9549..2341af1 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -1,4 +1,3 @@
-
package {
default_applicable_licenses: ["frameworks_av_media_mediaserver_license"],
}
@@ -86,7 +85,16 @@
"-Wall",
],
- vintf_fragments: ["manifest_media_c2_software.xml"],
+ // AIDL is only used when release_aidl_use_unfrozen is true
+ // because the swcodec mainline module is a prebuilt from an
+ // Android U branch in that case.
+ // TODO(b/327508501)
+ vintf_fragments: ["manifest_media_c2_software_hidl.xml"],
+ product_variables: {
+ release_aidl_use_unfrozen: {
+ vintf_fragments: ["manifest_media_c2_software_aidl.xml"],
+ },
+ },
soong_config_variables: {
TARGET_DYNAMIC_64_32_MEDIASERVER: {
diff --git a/media/mediaserver/manifest_media_c2_software_aidl.xml b/media/mediaserver/manifest_media_c2_software_aidl.xml
new file mode 100644
index 0000000..e6bcafa
--- /dev/null
+++ b/media/mediaserver/manifest_media_c2_software_aidl.xml
@@ -0,0 +1,7 @@
+<manifest version="1.0" type="framework">
+ <hal format="aidl">
+ <name>android.hardware.media.c2</name>
+ <version>1</version>
+ <fqname>IComponentStore/software</fqname>
+ </hal>
+</manifest>
diff --git a/media/mediaserver/manifest_media_c2_software.xml b/media/mediaserver/manifest_media_c2_software_hidl.xml
similarity index 68%
rename from media/mediaserver/manifest_media_c2_software.xml
rename to media/mediaserver/manifest_media_c2_software_hidl.xml
index 31dfafb..69a27be 100644
--- a/media/mediaserver/manifest_media_c2_software.xml
+++ b/media/mediaserver/manifest_media_c2_software_hidl.xml
@@ -8,9 +8,4 @@
<instance>software</instance>
</interface>
</hal>
- <hal format="aidl">
- <name>android.hardware.media.c2</name>
- <version>1</version>
- <fqname>IComponentStore/software</fqname>
- </hal>
</manifest>
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index c4f2808..deef31d 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
#define LOG_TAG "ServiceUtilities"
#include <audio_utils/clock.h>
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index b270813..819f2d6 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -31,6 +31,7 @@
#include <media/AudioContainers.h>
#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioEffect.h>
+#include <media/EffectClientAsyncProxy.h>
#include <media/ShmemCompat.h>
#include <media/TypeConverter.h>
#include <media/audiohal/EffectHalInterface.h>
@@ -1545,13 +1546,16 @@
return INVALID_OPERATION;
}
- std::vector<uint8_t> request(sizeof(effect_param_t) + 3 * sizeof(uint32_t));
+ std::vector<uint8_t> request(sizeof(effect_param_t) + 3 * sizeof(uint32_t) + sizeof(float));
effect_param_t *param = (effect_param_t*) request.data();
param->psize = sizeof(int32_t);
- param->vsize = sizeof(int32_t) * 2;
+ param->vsize = sizeof(int32_t) * 2 + sizeof(float);
*(int32_t*)param->data = HG_PARAM_HAPTIC_INTENSITY;
- *((int32_t*)param->data + 1) = id;
- *((int32_t*)param->data + 2) = static_cast<int32_t>(hapticScale.getLevel());
+ int32_t* hapticScalePtr = reinterpret_cast<int32_t*>(param->data + sizeof(int32_t));
+ hapticScalePtr[0] = id;
+ hapticScalePtr[1] = static_cast<int32_t>(hapticScale.getLevel());
+ float* adaptiveScaleFactorPtr = reinterpret_cast<float*>(param->data + 3 * sizeof(int32_t));
+ *adaptiveScaleFactorPtr = hapticScale.getAdaptiveScaleFactor();
std::vector<uint8_t> response;
status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
if (status == NO_ERROR) {
@@ -1726,7 +1730,8 @@
const sp<media::IEffectClient>& effectClient,
int32_t priority, bool notifyFramesProcessed)
: BnEffect(),
- mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
+ mEffect(effect), mEffectClient(media::EffectClientAsyncProxy::makeIfNeeded(effectClient)),
+ mClient(client), mCblk(nullptr),
mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false),
mNotifyFramesProcessed(notifyFramesProcessed)
{
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index d1a09a4..f1be3976 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2927,7 +2927,6 @@
// Set haptic intensity for effect
if (chain != nullptr) {
- // TODO(b/324559333): Add adaptive haptics scaling support for the HapticGenerator.
chain->setHapticScale_l(track->id(), hapticScale);
}
}
@@ -3509,7 +3508,7 @@
char *endptr;
unsigned long ul = strtoul(value, &endptr, 0);
if (*endptr == '\0' && ul != 0) {
- ALOGD("Silence is golden");
+ ALOGW("%s: mute from ro.audio.silent. Silence is golden", __func__);
// The setprop command will not allow a property to be changed after
// the first time it is set, so we don't have to worry about un-muting.
setMasterMute_l(true);
@@ -7895,6 +7894,11 @@
if (mSupportedLatencyModes.empty()) {
return;
}
+ // Do not update the HAL latency mode if no track is active
+ if (mActiveTracks.isEmpty()) {
+ return;
+ }
+
audio_latency_mode_t latencyMode = AUDIO_LATENCY_MODE_FREE;
if (mSupportedLatencyModes.size() == 1) {
// If the HAL only support one latency mode currently, confirm the choice
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 77abaf6..4d07821 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1233,7 +1233,7 @@
&& (state == IDLE || state == STOPPED || state == FLUSHED)) {
mFrameMap.reset();
- if (!isFastTrack() && (isDirect() || isOffloaded())) {
+ if (!isFastTrack()) {
// Start point of track -> sink frame map. If the HAL returns a
// frame position smaller than the first written frame in
// updateTrackFrameInfo, the timestamp can be interpolated
diff --git a/services/audioflinger/afutils/NBAIO_Tee.cpp b/services/audioflinger/afutils/NBAIO_Tee.cpp
index 86fb128..cdc8e95 100644
--- a/services/audioflinger/afutils/NBAIO_Tee.cpp
+++ b/services/audioflinger/afutils/NBAIO_Tee.cpp
@@ -514,6 +514,12 @@
return NO_ERROR; // return full path
}
+/* static */
+NBAIO_Tee::RunningTees& NBAIO_Tee::getRunningTees() {
+ [[clang::no_destroy]] static RunningTees runningTees;
+ return runningTees;
+}
+
} // namespace android
#endif // TEE_SINK
diff --git a/services/audioflinger/afutils/NBAIO_Tee.h b/services/audioflinger/afutils/NBAIO_Tee.h
index a5c544e..5ab1949 100644
--- a/services/audioflinger/afutils/NBAIO_Tee.h
+++ b/services/audioflinger/afutils/NBAIO_Tee.h
@@ -310,10 +310,7 @@
};
// singleton
- static RunningTees &getRunningTees() {
- static RunningTees runningTees;
- return runningTees;
- }
+ static RunningTees& getRunningTees();
// The NBAIO TeeImpl may have lifetime longer than NBAIO_Tee if
// RunningTees::dump() is being called simultaneous to ~NBAIO_Tee().
diff --git a/services/audiopolicy/TEST_MAPPING b/services/audiopolicy/TEST_MAPPING
index a2ebb8d..cf1a771 100644
--- a/services/audiopolicy/TEST_MAPPING
+++ b/services/audiopolicy/TEST_MAPPING
@@ -46,6 +46,9 @@
"include-filter": "com.google.android.gts.audio.AudioPolicyHostTest"
}
]
+ },
+ {
+ "name": "spatializer_tests"
}
]
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 6f71ac5..44f84b9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -235,7 +235,8 @@
&deviceType,
String8(mDevice->address().c_str()),
source,
- flags);
+ static_cast<audio_input_flags_t>(
+ flags & mProfile->getFlags()));
LOG_ALWAYS_FATAL_IF(mDevice->type() != deviceType,
"%s openInput returned device %08x when given device %08x",
__FUNCTION__, mDevice->type(), deviceType);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index d1819fd..3430f4b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -30,7 +30,7 @@
#include <AudioOutputDescriptor.h>
#include <android_media_audiopolicy.h>
-namespace audio_flags = android::media::audiopolicy;
+namespace audiopolicy_flags = android::media::audiopolicy;
namespace android {
namespace {
@@ -193,7 +193,7 @@
mix.mDeviceType, mix.mDeviceAddress.c_str());
return BAD_VALUE;
}
- if (audio_flags::audio_mix_ownership()) {
+ if (audiopolicy_flags::audio_mix_ownership()) {
if (mix.mToken == registeredMix->mToken) {
ALOGE("registerMix(): same mix already registered - skipping");
return BAD_VALUE;
@@ -221,7 +221,7 @@
{
for (size_t i = 0; i < size(); i++) {
const sp<AudioPolicyMix>& registeredMix = itemAt(i);
- if (audio_flags::audio_mix_ownership()) {
+ if (audiopolicy_flags::audio_mix_ownership()) {
if (mix.mToken == registeredMix->mToken) {
ALOGD("unregisterMix(): removing mix for dev=0x%x addr=%s",
mix.mDeviceType, mix.mDeviceAddress.c_str());
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 3bebb11..154172a 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -3825,7 +3825,6 @@
status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)
{
ALOGV("unregisterPolicyMixes() num mixes %zu", mixes.size());
- status_t endResult = NO_ERROR;
status_t res = NO_ERROR;
bool checkOutputs = false;
sp<HwModule> rSubmixModule;
@@ -3838,7 +3837,6 @@
AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
if (rSubmixModule == 0) {
res = INVALID_OPERATION;
- endResult = INVALID_OPERATION;
continue;
}
}
@@ -3847,20 +3845,25 @@
if (mPolicyMixes.unregisterMix(mix) != NO_ERROR) {
res = INVALID_OPERATION;
- endResult = INVALID_OPERATION;
continue;
}
- for (auto device : {AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX}) {
+ for (auto device: {AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX}) {
if (getDeviceConnectionState(device, address.c_str()) ==
- AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
- res = setDeviceConnectionStateInt(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address.c_str(), "remote-submix",
- AUDIO_FORMAT_DEFAULT);
- if (res != OK) {
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
+ status_t currentRes =
+ setDeviceConnectionStateInt(device,
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.c_str(),
+ "remote-submix",
+ AUDIO_FORMAT_DEFAULT);
+ if (!audio_flags::audio_mix_ownership()) {
+ res = currentRes;
+ }
+ if (currentRes != OK) {
ALOGE("Error making RemoteSubmix device unavailable for mix "
"with type %d, address %s", device, address.c_str());
- endResult = INVALID_OPERATION;
+ res = INVALID_OPERATION;
}
}
}
@@ -3870,24 +3873,16 @@
} else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
if (mPolicyMixes.unregisterMix(mix) != NO_ERROR) {
res = INVALID_OPERATION;
- endResult = INVALID_OPERATION;
continue;
} else {
checkOutputs = true;
}
}
}
- if (audio_flags::audio_mix_ownership()) {
- res = endResult;
- if (res == NO_ERROR && checkOutputs) {
- checkForDeviceAndOutputChanges();
- updateCallAndOutputRouting();
- }
- } else {
- if (res == NO_ERROR && checkOutputs) {
- checkForDeviceAndOutputChanges();
- updateCallAndOutputRouting();
- }
+
+ if (res == NO_ERROR && checkOutputs) {
+ checkForDeviceAndOutputChanges();
+ updateCallAndOutputRouting();
}
return res;
}
@@ -3907,7 +3902,7 @@
_aidl_return.back().mToken = policyMix->mToken;
}
- ALOGVV("%s() returning %zu registered mixes", __func__, _aidl_return->size());
+ ALOGVV("%s() returning %zu registered mixes", __func__, _aidl_return.size());
return OK;
}
@@ -7280,7 +7275,6 @@
DeviceVector devices;
for (const auto &productStrategy : mEngine->getOrderedProductStrategies()) {
StreamTypeVector streams = mEngine->getStreamTypesForProductStrategy(productStrategy);
- auto attr = mEngine->getAllAttributesForProductStrategy(productStrategy).front();
auto hasStreamActive = [&](auto stream) {
return hasStream(streams, stream) && isStreamActive(stream, 0);
};
@@ -7305,6 +7299,7 @@
mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc))) {
// Retrieval of devices for voice DL is done on primary output profile, cannot
// check the route (would force modifying configuration file for this profile)
+ auto attr = mEngine->getAllAttributesForProductStrategy(productStrategy).front();
devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, fromCache);
break;
}
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index cddbf39..05b0ddb 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -107,3 +107,9 @@
"framework-permission-aidl-cpp",
],
}
+
+cc_library_headers {
+ name: "libaudiopolicyservice_headers",
+ host_supported: true,
+ export_include_dirs: ["."],
+}
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 71edd57..d67ddb6 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -61,11 +61,6 @@
}
}
-void AudioPolicyEffects::setDefaultDeviceEffects() {
- mDefaultDeviceEffectFuture = std::async(
- std::launch::async, &AudioPolicyEffects::initDefaultDeviceEffects, this);
-}
-
status_t AudioPolicyEffects::addInputEffects(audio_io_handle_t input,
audio_source_t inputSource,
audio_session_t audioSession)
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index a9628c2..259b84a 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -116,10 +116,8 @@
// Remove the default stream effect from wherever it's attached.
status_t removeStreamDefaultEffect(audio_unique_id_t id) EXCLUDES_AudioPolicyEffects_Mutex;
- // Called by AudioPolicyService::onFirstRef() to load device effects
- // on a separate worker thread.
- // TODO(b/319515492) move this initialization after AudioPolicyService::onFirstRef().
- void setDefaultDeviceEffects();
+ // Initializes the Effects (AudioSystem must be ready as this creates audio client objects).
+ void initDefaultDeviceEffects() EXCLUDES(mDeviceEffectsMutex) EXCLUDES_EffectHandle_Mutex;
private:
@@ -201,11 +199,6 @@
};
- // Called on an async thread because it creates AudioEffects
- // which register with AudioFlinger and AudioPolicy.
- // We must therefore exclude the EffectHandle_Mutex.
- void initDefaultDeviceEffects() EXCLUDES(mDeviceEffectsMutex) EXCLUDES_EffectHandle_Mutex;
-
status_t loadAudioEffectConfig_ll(const sp<EffectsFactoryHalInterface>& effectsFactoryHal)
REQUIRES(mMutex, mDeviceEffectsMutex);
@@ -281,18 +274,6 @@
std::mutex mDeviceEffectsMutex;
std::map<std::string, std::unique_ptr<DeviceEffects>> mDeviceEffects
GUARDED_BY(mDeviceEffectsMutex);
-
- /**
- * Device Effect initialization must be asynchronous: the audio_policy service parses and init
- * effect on first reference. AudioFlinger will handle effect creation and register these
- * effect on audio_policy service.
- *
- * The future is associated with the std::async launched thread - no need to lock as
- * it is only set once on init. Due to the async nature, it is conceivable that
- * some device effects are not available immediately after AudioPolicyService::onFirstRef()
- * while the effects are being created.
- */
- std::future<void> mDefaultDeviceEffectFuture;
};
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index bc6498a..e95147e 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -313,9 +313,16 @@
}
}
AudioSystem::audioPolicyReady();
- // AudioFlinger will handle effect creation and register these effects on audio_policy
- // service. Hence, audio_policy service must be ready.
- audioPolicyEffects->setDefaultDeviceEffects();
+}
+
+void AudioPolicyService::onAudioSystemReady() {
+ sp<AudioPolicyEffects> audioPolicyEffects;
+ {
+ audio_utils::lock_guard _l(mMutex);
+
+ audioPolicyEffects = mAudioPolicyEffects;
+ }
+ audioPolicyEffects->initDefaultDeviceEffects();
}
void AudioPolicyService::unloadAudioPolicyManager()
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index bd56366..7aa80cf 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -322,6 +322,9 @@
// RefBase
virtual void onFirstRef();
+ // Commence initialization when AudioSystem is ready.
+ void onAudioSystemReady();
+
//
// Helpers for the struct audio_policy_service_ops implementation.
// This is used by the audio policy manager for certain operations that
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 16f3a9a..233a484 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -1096,7 +1096,7 @@
}
void Spatializer::checkSensorsState_l() {
- audio_latency_mode_t requestedLatencyMode = AUDIO_LATENCY_MODE_FREE;
+ mRequestedLatencyMode = AUDIO_LATENCY_MODE_FREE;
const bool supportsSetLatencyMode = !mSupportedLatencyModes.empty();
bool supportsLowLatencyMode;
if (com::android::media::audio::dsa_over_bt_le_audio()) {
@@ -1117,7 +1117,7 @@
&& mDesiredHeadTrackingMode != HeadTrackingMode::STATIC
&& mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
if (supportsLowLatencyMode) {
- requestedLatencyMode = selectHeadtrackingConnectionMode_l();
+ mRequestedLatencyMode = selectHeadtrackingConnectionMode_l();
}
if (mEngine != nullptr) {
setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
@@ -1139,9 +1139,9 @@
}
if (mOutput != AUDIO_IO_HANDLE_NONE && supportsSetLatencyMode) {
const status_t status =
- AudioSystem::setRequestedLatencyMode(mOutput, requestedLatencyMode);
+ AudioSystem::setRequestedLatencyMode(mOutput, mRequestedLatencyMode);
ALOGD("%s: setRequestedLatencyMode for output thread(%d) to %s returned %d", __func__,
- mOutput, toString(requestedLatencyMode).c_str(), status);
+ mOutput, toString(mRequestedLatencyMode).c_str(), status);
}
}
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 355df18..c5f159c 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -27,6 +27,7 @@
#include <audio_utils/SimpleLog.h>
#include <math.h>
#include <media/AudioEffect.h>
+#include <media/MediaMetricsItem.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <media/VectorRecorder.h>
#include <media/audiohal/EffectHalInterface.h>
@@ -153,6 +154,34 @@
return mLevel;
}
+ /** For test only */
+ std::unordered_set<media::audio::common::HeadTracking::ConnectionMode>
+ getSupportedHeadtrackingConnectionModes() const {
+ return mSupportedHeadtrackingConnectionModes;
+ }
+
+ /** For test only */
+ media::audio::common::HeadTracking::ConnectionMode getHeadtrackingConnectionMode() const {
+ return mHeadtrackingConnectionMode;
+ }
+
+ /** For test only */
+ std::vector<audio_latency_mode_t> getSupportedLatencyModes() const {
+ audio_utils::lock_guard lock(mMutex);
+ return mSupportedLatencyModes;
+ }
+
+ /** For test only */
+ std::vector<audio_latency_mode_t> getOrderedLowLatencyModes() const {
+ return mOrderedLowLatencyModes;
+ }
+
+ /** For test only */
+ audio_latency_mode_t getRequestedLatencyMode() const {
+ audio_utils::lock_guard lock(mMutex);
+ return mRequestedLatencyMode;
+ }
+
/** Called by audio policy service when the special output mixer dedicated to spatialization
* is opened and the spatializer engine must be created.
*/
@@ -164,6 +193,12 @@
/** Returns the output stream the spatializer is attached to. */
audio_io_handle_t getOutput() const { audio_utils::lock_guard lock(mMutex); return mOutput; }
+ /** For test only */
+ void setOutput(audio_io_handle_t output) {
+ audio_utils::lock_guard lock(mMutex);
+ mOutput = output;
+ }
+
void updateActiveTracks(size_t numActiveTracks);
/** Gets the channel mask, sampling rate and format set for the spatializer input. */
@@ -188,6 +223,10 @@
// NO_INIT: Spatializer creation failed.
static void sendEmptyCreateSpatializerMetricWithStatus(status_t status);
+ /** Made public for test only */
+ void onSupportedLatencyModesChangedMsg(
+ audio_io_handle_t output, std::vector<audio_latency_mode_t>&& modes);
+
private:
Spatializer(effect_descriptor_t engineDescriptor,
SpatializerPolicyCallback *callback);
@@ -200,8 +239,6 @@
void onHeadToStagePoseMsg(const std::vector<float>& headToStage);
void onActualModeChangeMsg(media::HeadTrackingMode mode);
- void onSupportedLatencyModesChangedMsg(
- audio_io_handle_t output, std::vector<audio_latency_mode_t>&& modes);
static constexpr int kMaxEffectParamValues = 10;
/**
@@ -484,9 +521,11 @@
std::vector<media::audio::common::Spatialization::Mode> mSpatializationModes;
std::vector<audio_channel_mask_t> mChannelMasks;
bool mSupportsHeadTracking;
- /** List of supported headtracking connection modes reported by the spatializer.
+
+ /** List of supported head tracking connection modes reported by the spatializer.
* If the list is empty, the spatializer does not support any optional connection
* mode and mode HeadTracking::ConnectionMode::FRAMEWORK_PROCESSED is assumed.
+ * This is set in the factory constructor and can be accessed without mutex.
*/
std::unordered_set<media::audio::common::HeadTracking::ConnectionMode>
mSupportedHeadtrackingConnectionModes;
@@ -504,6 +543,9 @@
std::vector<audio_latency_mode_t> mSupportedLatencyModes GUARDED_BY(mMutex);
/** preference order for low latency modes according to persist.bluetooth.hid.transport */
std::vector<audio_latency_mode_t> mOrderedLowLatencyModes;
+
+ audio_latency_mode_t mRequestedLatencyMode GUARDED_BY(mMutex) = AUDIO_LATENCY_MODE_FREE;
+
/** string to latency mode map used to parse bluetooth.core.le.dsa_transport_preference */
static const std::map<std::string, audio_latency_mode_t> sStringToLatencyModeMap;
static const std::vector<const char*> sHeadPoseKeys;
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index 34bd3b4..21ded60 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -108,3 +108,40 @@
test_suites: ["device-tests"],
}
+
+cc_test {
+ name: "spatializer_tests",
+
+ defaults: [
+ "latest_android_media_audio_common_types_cpp_shared",
+ "libaudiopolicyservice_dependencies",
+ ],
+
+ require_root: true,
+
+ shared_libs: [
+ "libaudioclient",
+ "libaudiofoundation",
+ "libcutils",
+ "liblog",
+ ],
+
+ static_libs: [
+ "libaudiopolicyservice",
+ ],
+
+ header_libs: [
+ "libaudiohal_headers",
+ "libaudiopolicyservice_headers",
+ "libmediametrics_headers",
+ ],
+
+ srcs: ["spatializer_tests.cpp"],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
+ test_suites: ["device-tests"],
+}
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index e02c93a..afd8765 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -1369,6 +1369,7 @@
AudioMix myAudioMix(matchCriteria, mixType, audioConfig, mixFlag,
String8(mixAddress.c_str()), 0);
myAudioMix.mDeviceType = deviceType;
+ myAudioMix.mToken = sp<BBinder>::make();
// Clear mAudioMix before add new one to make sure we don't add already exist mixes.
mAudioMixes.clear();
return addPolicyMix(myAudioMix);
@@ -1569,8 +1570,7 @@
validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
mAudioMixes.clear();
- mAudioMixes.add(validAudioMix);
- status_t ret = mManager->registerPolicyMixes(mAudioMixes);
+ status_t ret = addPolicyMix(validAudioMix);
ASSERT_EQ(NO_ERROR, ret);
@@ -1586,8 +1586,7 @@
MIX_ROUTE_FLAG_LOOP_BACK, String8(mMixAddress.c_str()), 0);
validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
- mAudioMixes.add(invalidAudioMix);
- ret = mManager->registerPolicyMixes(mAudioMixes);
+ ret = addPolicyMix(invalidAudioMix);
ASSERT_EQ(INVALID_OPERATION, ret);
@@ -1614,8 +1613,7 @@
validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
mAudioMixes.clear();
- mAudioMixes.add(validAudioMix);
- status_t ret = mManager->registerPolicyMixes(mAudioMixes);
+ status_t ret = addPolicyMix(validAudioMix);
ASSERT_EQ(NO_ERROR, ret);
@@ -1629,7 +1627,7 @@
AudioMix invalidAudioMix(invalidMixMatchCriteria, MIX_TYPE_PLAYERS, audioConfig,
MIX_ROUTE_FLAG_LOOP_BACK, String8(mMixAddress.c_str()), 0);
- validAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+ invalidAudioMix.mDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
Vector<AudioMix> mixes;
mixes.add(invalidAudioMix);
diff --git a/services/audiopolicy/tests/spatializer_tests.cpp b/services/audiopolicy/tests/spatializer_tests.cpp
new file mode 100644
index 0000000..73bef43
--- /dev/null
+++ b/services/audiopolicy/tests/spatializer_tests.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Spatializer_Test"
+
+#include "Spatializer.h"
+
+#include <string>
+#include <unordered_set>
+
+#include <gtest/gtest.h>
+
+#include <android/media/audio/common/AudioLatencyMode.h>
+#include <android/media/audio/common/HeadTracking.h>
+#include <android/media/audio/common/Spatialization.h>
+#include <com_android_media_audio.h>
+#include <utils/Log.h>
+
+using namespace android;
+using media::audio::common::HeadTracking;
+using media::audio::common::Spatialization;
+
+class TestSpatializerPolicyCallback :
+ public SpatializerPolicyCallback {
+public:
+ void onCheckSpatializer() override {};
+};
+
+class SpatializerTest : public ::testing::Test {
+protected:
+ void SetUp() override {
+ const sp<EffectsFactoryHalInterface> effectsFactoryHal
+ = EffectsFactoryHalInterface::create();
+ mSpatializer = Spatializer::create(&mTestCallback, effectsFactoryHal);
+ if (mSpatializer == nullptr) {
+ GTEST_SKIP() << "Skipping Spatializer tests: no spatializer";
+ }
+ std::vector<Spatialization::Level> levels;
+ binder::Status status = mSpatializer->getSupportedLevels(&levels);
+ ASSERT_TRUE(status.isOk());
+ for (auto level : levels) {
+ if (level != Spatialization::Level::NONE) {
+ mSpatializer->setLevel(level);
+ break;
+ }
+ }
+ mSpatializer->setOutput(sTestOutput);
+ }
+
+ void TearDown() override {
+ if (mSpatializer == nullptr) {
+ return;
+ }
+ mSpatializer->setLevel(Spatialization::Level::NONE);
+ mSpatializer->setOutput(AUDIO_IO_HANDLE_NONE);
+ mSpatializer->setDesiredHeadTrackingMode(HeadTracking::Mode::DISABLED);
+ mSpatializer->setHeadSensor(SpatializerPoseController::INVALID_SENSOR);
+ mSpatializer->updateActiveTracks(0);
+ }
+
+ static constexpr audio_io_handle_t sTestOutput= 1977;
+ static constexpr int sTestSensorHandle = 1980;
+
+ const static inline std::vector<audio_latency_mode_t> sA2DPLatencyModes = {
+ AUDIO_LATENCY_MODE_LOW,
+ AUDIO_LATENCY_MODE_FREE
+ };
+ const static inline std::vector<audio_latency_mode_t> sBLELatencyModes = {
+ AUDIO_LATENCY_MODE_LOW,
+ AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_SOFTWARE,
+ AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_HARDWARE,
+ AUDIO_LATENCY_MODE_FREE
+ };
+
+ bool setpUpForHeadtracking() {
+ bool htSupported;
+ mSpatializer->isHeadTrackingSupported(&htSupported);
+ if (!htSupported) {
+ return false;
+ }
+
+ std::vector<HeadTracking::Mode> htModes;
+ mSpatializer->getSupportedHeadTrackingModes(&htModes);
+ for (auto htMode : htModes) {
+ if (htMode != HeadTracking::Mode::DISABLED) {
+ mSpatializer->setDesiredHeadTrackingMode(htMode);
+ break;
+ }
+ }
+
+ mSpatializer->setHeadSensor(sTestSensorHandle);
+ return true;
+ }
+
+ TestSpatializerPolicyCallback mTestCallback;
+ sp<Spatializer> mSpatializer;
+};
+
+TEST_F(SpatializerTest, SupportedA2dpLatencyTest) {
+ if (!setpUpForHeadtracking()) {
+ GTEST_SKIP() << "Skipping SupportedA2dpLatencyTest: head tracking not supported";
+ }
+ std::vector<audio_latency_mode_t> latencies = sA2DPLatencyModes;
+ mSpatializer->onSupportedLatencyModesChangedMsg(sTestOutput, std::move(latencies));
+
+ std::vector<audio_latency_mode_t> supportedLatencies =
+ mSpatializer->getSupportedLatencyModes();
+
+ ASSERT_TRUE(supportedLatencies == sA2DPLatencyModes);
+ // Free mode must always be the last of the ordered list
+ ASSERT_TRUE(supportedLatencies.back() == AUDIO_LATENCY_MODE_FREE);
+}
+
+TEST_F(SpatializerTest, SupportedBleLatencyTest) {
+ if (!setpUpForHeadtracking()) {
+ GTEST_SKIP() << "Skipping SupportedBleLatencyTest: head tracking not supported";
+ }
+ if (!com::android::media::audio::dsa_over_bt_le_audio()) {
+ GTEST_SKIP() << "Skipping SupportedBleLatencyTest: DSA over LE not enabled";
+ }
+ std::vector<audio_latency_mode_t> latencies = sBLELatencyModes;
+ mSpatializer->onSupportedLatencyModesChangedMsg(sTestOutput, std::move(latencies));
+
+ std::vector<audio_latency_mode_t> supportedLatencies =
+ mSpatializer->getSupportedLatencyModes();
+
+ ASSERT_TRUE(supportedLatencies.back() == AUDIO_LATENCY_MODE_FREE);
+ ASSERT_TRUE(std::find(supportedLatencies.begin(), supportedLatencies.end(),
+ AUDIO_LATENCY_MODE_LOW) != supportedLatencies.end());
+
+ std::vector<audio_latency_mode_t> orderedLowLatencyModes =
+ mSpatializer->getOrderedLowLatencyModes();
+
+ std::vector<audio_latency_mode_t> supportedLowLatencyModes;
+ // remove free mode at the end of the supported list to only retain low latency modes
+ std::copy(supportedLatencies.begin(),
+ supportedLatencies.begin() + supportedLatencies.size() - 1,
+ std::back_inserter(supportedLowLatencyModes));
+
+ // Verify that supported low latency modes are always in ordered latency modes list and
+ // in the same order
+ std::vector<audio_latency_mode_t>::iterator lastIt = orderedLowLatencyModes.begin();
+ for (auto latency : supportedLowLatencyModes) {
+ auto it = std::find(orderedLowLatencyModes.begin(), orderedLowLatencyModes.end(), latency);
+ ASSERT_NE(it, orderedLowLatencyModes.end());
+ ASSERT_LE(lastIt, it);
+ lastIt = it;
+ }
+}
+
+TEST_F(SpatializerTest, RequestedA2dpLatencyTest) {
+ if (!setpUpForHeadtracking()) {
+ GTEST_SKIP() << "Skipping RequestedA2dpLatencyTest: head tracking not supported";
+ }
+
+ std::vector<audio_latency_mode_t> latencies = sA2DPLatencyModes;
+ mSpatializer->onSupportedLatencyModesChangedMsg(sTestOutput, std::move(latencies));
+
+ // requested latency mode must be free if no spatialized tracks are active
+ audio_latency_mode_t requestedLatencyMode = mSpatializer->getRequestedLatencyMode();
+ ASSERT_EQ(requestedLatencyMode, AUDIO_LATENCY_MODE_FREE);
+
+ // requested latency mode must be low if at least one spatialized tracks is active
+ mSpatializer->updateActiveTracks(1);
+ requestedLatencyMode = mSpatializer->getRequestedLatencyMode();
+ ASSERT_EQ(requestedLatencyMode, AUDIO_LATENCY_MODE_LOW);
+
+ // requested latency mode must be free after stopping the last spatialized tracks
+ mSpatializer->updateActiveTracks(0);
+ requestedLatencyMode = mSpatializer->getRequestedLatencyMode();
+ ASSERT_EQ(requestedLatencyMode, AUDIO_LATENCY_MODE_FREE);
+}
+
+TEST_F(SpatializerTest, RequestedBleLatencyTest) {
+ if (!setpUpForHeadtracking()) {
+ GTEST_SKIP() << "Skipping RequestedBleLatencyTest: head tracking not supported";
+ }
+ if (!com::android::media::audio::dsa_over_bt_le_audio()) {
+ GTEST_SKIP() << "Skipping RequestedBleLatencyTest: DSA over LE not enabled";
+ }
+
+ mSpatializer->onSupportedLatencyModesChangedMsg(sTestOutput,
+ { AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_SOFTWARE,
+ AUDIO_LATENCY_MODE_FREE });
+
+ // requested latency mode must be free if no spatialized tracks are active
+ audio_latency_mode_t requestedLatencyMode = mSpatializer->getRequestedLatencyMode();
+ ASSERT_EQ(requestedLatencyMode, AUDIO_LATENCY_MODE_FREE);
+
+ // requested latency mode must be low software if at least one spatialized tracks is active
+ // and the only supported low latency mode is low software
+ mSpatializer->updateActiveTracks(1);
+ requestedLatencyMode = mSpatializer->getRequestedLatencyMode();
+ ASSERT_EQ(requestedLatencyMode, AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_SOFTWARE);
+
+ mSpatializer->onSupportedLatencyModesChangedMsg(sTestOutput,
+ { AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_SOFTWARE,
+ AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_HARDWARE,
+ AUDIO_LATENCY_MODE_FREE });
+
+ requestedLatencyMode = mSpatializer->getRequestedLatencyMode();
+ HeadTracking::ConnectionMode connectionMode = mSpatializer->getHeadtrackingConnectionMode();
+
+ // If low hardware mode is used, the spatializer must use either use one of the sensor
+ // connection tunneled modes.
+ // Otherwise, low software mode must be used
+ if (requestedLatencyMode == AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_HARDWARE) {
+ ASSERT_TRUE(connectionMode == HeadTracking::ConnectionMode::DIRECT_TO_SENSOR_TUNNEL
+ || connectionMode == HeadTracking::ConnectionMode::DIRECT_TO_SENSOR_SW);
+ } else {
+ ASSERT_EQ(requestedLatencyMode, AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_SOFTWARE);
+ }
+
+ // requested latency mode must be free after stopping the last spatialized tracks
+ mSpatializer->updateActiveTracks(0);
+ requestedLatencyMode = mSpatializer->getRequestedLatencyMode();
+ ASSERT_EQ(requestedLatencyMode, AUDIO_LATENCY_MODE_FREE);
+}
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index b748888..c9e9090 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -263,7 +263,7 @@
"liblog",
"libutils",
"libxml2",
- "camera_platform_flags_c_lib"
+ "camera_platform_flags_c_lib",
],
include_dirs: [
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index ebe771e..78c14b2 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -155,17 +155,18 @@
CameraService::CameraService(
std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper,
std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils) :
+ AttributionAndPermissionUtilsEncapsulator(attributionAndPermissionUtils == nullptr ?
+ std::make_shared<AttributionAndPermissionUtils>()\
+ : attributionAndPermissionUtils),
mCameraServiceProxyWrapper(cameraServiceProxyWrapper == nullptr ?
std::make_shared<CameraServiceProxyWrapper>() : cameraServiceProxyWrapper),
- mAttributionAndPermissionUtils(attributionAndPermissionUtils == nullptr ?
- std::make_shared<AttributionAndPermissionUtils>(this)\
- : attributionAndPermissionUtils),
mEventLog(DEFAULT_EVENT_LOG_LENGTH),
mNumberOfCameras(0),
mNumberOfCamerasWithoutSystemCamera(0),
mSoundRef(0), mInitialized(false),
mAudioRestriction(hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE) {
ALOGI("CameraService started (pid=%d)", getpid());
+ mAttributionAndPermissionUtils->setCameraService(this);
mServiceLockWrapper = std::make_shared<WaitableMutexWrapper>(&mServiceLock);
mMemFd = memfd_create(sFileName, MFD_ALLOW_SEALING);
if (mMemFd == -1) {
@@ -707,14 +708,6 @@
broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
}
-bool CameraService::isAutomotiveDevice() const {
- return mAttributionAndPermissionUtils->isAutomotiveDevice();
-}
-
-bool CameraService::isAutomotivePrivilegedClient(int32_t uid) const {
- return mAttributionAndPermissionUtils->isAutomotivePrivilegedClient(uid);
-}
-
bool CameraService::isAutomotiveExteriorSystemCamera(const std::string& cam_id) const {
// Returns false if this is not an automotive device type.
if (!isAutomotiveDevice())
@@ -760,49 +753,6 @@
return true;
}
-static AttributionSourceState attributionSourceFromPidAndUid(int callingPid, int callingUid) {
- AttributionSourceState attributionSource{};
- attributionSource.pid = callingPid;
- attributionSource.uid = callingUid;
- return attributionSource;
-}
-
-bool CameraService::hasPermissionsForCamera(int callingPid, int callingUid) const {
- return hasPermissionsForCamera(std::string(), callingPid, callingUid);
-}
-
-bool CameraService::hasPermissionsForCamera(const std::string& cameraId, int callingPid,
- int callingUid) const {
- auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
- return mAttributionAndPermissionUtils->hasPermissionsForCamera(cameraId, attributionSource);
-}
-
-bool CameraService::hasPermissionsForSystemCamera(const std::string& cameraId, int callingPid,
- int callingUid, bool checkCameraPermissions) const {
- auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
- return mAttributionAndPermissionUtils->hasPermissionsForSystemCamera(
- cameraId, attributionSource, checkCameraPermissions);
-}
-
-bool CameraService::hasPermissionsForCameraHeadlessSystemUser(const std::string& cameraId,
- int callingPid, int callingUid) const {
- auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
- return mAttributionAndPermissionUtils->hasPermissionsForCameraHeadlessSystemUser(
- cameraId, attributionSource);
-}
-
-bool CameraService::hasPermissionsForCameraPrivacyAllowlist(int callingPid, int callingUid) const {
- auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
- return mAttributionAndPermissionUtils->hasPermissionsForCameraPrivacyAllowlist(
- attributionSource);
-}
-
-bool CameraService::hasPermissionsForOpenCloseListener(int callingPid, int callingUid) const {
- auto attributionSource = attributionSourceFromPidAndUid(callingPid, callingUid);
- return mAttributionAndPermissionUtils->hasPermissionsForOpenCloseListener(
- attributionSource);
-}
-
Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
ATRACE_CALL();
Mutex::Autolock l(mServiceLock);
@@ -1353,6 +1303,7 @@
int32_t* torchStrength) {
ATRACE_CALL();
Mutex::Autolock l(mServiceLock);
+
const std::string cameraId = resolveCameraId(
unresolvedCameraId, CameraThreadState::getCallingUid());
if (!mInitialized) {
@@ -1681,16 +1632,6 @@
return STATUS_ERROR(ERROR_INVALID_OPERATION, "Unable to initialize legacy parameters");
}
-// Can camera service trust the caller based on the calling UID?
-bool CameraService::isTrustedCallingUid(uid_t uid) const {
- return mAttributionAndPermissionUtils->isTrustedCallingUid(uid);
-}
-
-status_t CameraService::getUidForPackage(const std::string &packageName, int userId,
- /*inout*/uid_t& uid, int err) const {
- return mAttributionAndPermissionUtils->getUidForPackage(packageName, userId, uid, err);
-}
-
Status CameraService::validateConnectLocked(const std::string& cameraId,
const std::string& clientName8, /*inout*/int& clientUid, /*inout*/int& clientPid,
/*out*/int& originalClientPid) const {
@@ -1755,7 +1696,7 @@
"Untrusted caller (calling PID %d, UID %d) trying to "
"forward camera access to camera %s for client %s (PID %d, UID %d)",
callingPid, callingUid, cameraId.c_str(),
- clientName.c_str(), clientUid, clientPid);
+ clientName.c_str(), clientPid, clientUid);
}
// Check if we can trust clientPid
@@ -1768,7 +1709,7 @@
"Untrusted caller (calling PID %d, UID %d) trying to "
"forward camera access to camera %s for client %s (PID %d, UID %d)",
callingPid, callingUid, cameraId.c_str(),
- clientName.c_str(), clientUid, clientPid);
+ clientName.c_str(), clientPid, clientUid);
}
if (shouldRejectSystemCameraConnection(cameraId)) {
@@ -1788,13 +1729,14 @@
// If it's not calling from cameraserver, check the permission if the
// device isn't a system only camera (shouldRejectSystemCameraConnection already checks for
// android.permission.SYSTEM_CAMERA for system only camera devices).
- bool checkPermissionForCamera = hasPermissionsForCamera(cameraId, clientPid, clientUid);
+ bool checkPermissionForCamera =
+ hasPermissionsForCamera(cameraId, clientPid, clientUid, clientName);
if (callingPid != getpid() &&
(deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) && !checkPermissionForCamera) {
ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
"Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" without camera permission",
- clientName.c_str(), clientUid, clientPid, cameraId.c_str());
+ clientName.c_str(), clientPid, clientUid, cameraId.c_str());
}
// Make sure the UID is in an active state to use the camera
@@ -1805,7 +1747,7 @@
return STATUS_ERROR_FMT(ERROR_DISABLED,
"Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" from background ("
"calling UID %d proc state %" PRId32 ")",
- clientName.c_str(), clientUid, clientPid, cameraId.c_str(),
+ clientName.c_str(), clientPid, clientUid, cameraId.c_str(),
callingUid, procState);
}
@@ -1818,7 +1760,7 @@
ALOGE("Access Denial: cannot use the camera when sensor privacy is enabled");
return STATUS_ERROR_FMT(ERROR_DISABLED,
"Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" when sensor privacy "
- "is enabled", clientName.c_str(), clientUid, clientPid, cameraId.c_str());
+ "is enabled", clientName.c_str(), clientPid, clientUid, cameraId.c_str());
}
// Only use passed in clientPid to check permission. Use calling PID as the client PID that's
@@ -1844,14 +1786,14 @@
// If the System User tries to access the camera when the device is running in
// headless system user mode, ensure that client has the required permission
// CAMERA_HEADLESS_SYSTEM_USER.
- if (mAttributionAndPermissionUtils->isHeadlessSystemUserMode()
+ if (isHeadlessSystemUserMode()
&& (clientUserId == USER_SYSTEM)
&& !hasPermissionsForCameraHeadlessSystemUser(cameraId, callingPid, callingUid)) {
ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
"Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" as Headless System \
User without camera headless system user permission",
- clientName.c_str(), clientUid, clientPid, cameraId.c_str());
+ clientName.c_str(), clientPid, clientUid, cameraId.c_str());
}
}
@@ -2212,7 +2154,7 @@
}
// (1) Cameraserver trying to connect, accept.
- if (mAttributionAndPermissionUtils->isCallerCameraServerNotDelegating()) {
+ if (isCallerCameraServerNotDelegating()) {
return false;
}
// (2)
@@ -2249,16 +2191,17 @@
sp<CameraDeviceClient> client = nullptr;
std::string clientPackageNameAdj = clientPackageName;
int callingPid = CameraThreadState::getCallingPid();
+ int callingUid = CameraThreadState::getCallingUid();
bool systemNativeClient = false;
if (doesClientHaveSystemUid() && (clientPackageNameAdj.size() == 0)) {
- std::string systemClient =
- fmt::sprintf("client.pid<%d>", CameraThreadState::getCallingPid());
+ std::string systemClient = fmt::sprintf("client.pid<%d>", callingPid);
clientPackageNameAdj = systemClient;
systemNativeClient = true;
}
+
const std::string cameraId = resolveCameraId(
unresolvedCameraId,
- CameraThreadState::getCallingUid(),
+ callingUid,
clientPackageNameAdj);
if (oomScoreOffset < 0) {
@@ -2271,7 +2214,6 @@
}
userid_t clientUserId = multiuser_get_user_id(clientUid);
- int callingUid = CameraThreadState::getCallingUid();
if (clientUid == USE_CALLING_UID) {
clientUserId = multiuser_get_user_id(callingUid);
}
@@ -2288,8 +2230,8 @@
// enforce system camera permissions
if (oomScoreOffset > 0
&& !hasPermissionsForSystemCamera(cameraId, callingPid,
- CameraThreadState::getCallingUid())
- && !isTrustedCallingUid(CameraThreadState::getCallingUid())) {
+ callingUid)
+ && !isTrustedCallingUid(callingUid)) {
std::string msg = fmt::sprintf("Cannot change the priority of a client %s pid %d for "
"camera id %s without SYSTEM_CAMERA permissions",
clientPackageNameAdj.c_str(), callingPid, cameraId.c_str());
@@ -2329,8 +2271,8 @@
const auto& mActivityManager = getActivityManager();
if (mActivityManager) {
mActivityManager->logFgsApiBegin(LOG_FGS_CAMERA_API,
- CameraThreadState::getCallingUid(),
- CameraThreadState::getCallingPid());
+ callingUid,
+ callingPid);
}
return ret;
}
@@ -2353,12 +2295,8 @@
return true;
} else if (mSensorPrivacyPolicy->getCameraPrivacyState() == SensorPrivacyManager::DISABLED) {
return false;
- } else if ((mSensorPrivacyPolicy->getCameraPrivacyState()
- == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_HELPFUL_APPS) ||
- (mSensorPrivacyPolicy->getCameraPrivacyState()
- == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_REQUIRED_APPS) ||
- (mSensorPrivacyPolicy->getCameraPrivacyState() ==
- SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_APPS)) {
+ } else if (mSensorPrivacyPolicy->getCameraPrivacyState()
+ == SensorPrivacyManager::ENABLED_EXCEPT_ALLOWLISTED_APPS) {
if (hasPermissionsForCameraPrivacyAllowlist(callingPid, callingUid)) {
return false;
} else {
@@ -4015,7 +3953,7 @@
const std::optional<std::string>& clientFeatureId, const std::string& cameraIdStr,
int cameraFacing, int sensorOrientation, int clientPid, uid_t clientUid,
int servicePid, bool overrideToPortrait):
- mAttributionAndPermissionUtils(attributionAndPermissionUtils),
+ AttributionAndPermissionUtilsEncapsulator(attributionAndPermissionUtils),
mDestructionStarted(false),
mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing), mOrientation(sensorOrientation),
mClientPackageName(clientPackageName), mSystemNativeClient(nativeClient),
@@ -4048,7 +3986,7 @@
mAppOpsManager = std::make_unique<AppOpsManager>();
}
- mUidIsTrusted = mAttributionAndPermissionUtils->isTrustedCallingUid(mClientUid);
+ mUidIsTrusted = isTrustedCallingUid(mClientUid);
}
CameraService::BasicClient::~BasicClient() {
@@ -4787,7 +4725,7 @@
}
hasCameraPrivacyFeature(); // Called so the result is cached
mSpm.addSensorPrivacyListener(this);
- if (mAttributionAndPermissionUtils->isAutomotiveDevice()) {
+ if (isAutomotiveDevice()) {
mSpm.addToggleSensorPrivacyListener(this);
}
mSensorPrivacyEnabled = mSpm.isSensorPrivacyEnabled();
@@ -4827,7 +4765,7 @@
void CameraService::SensorPrivacyPolicy::unregisterSelf() {
Mutex::Autolock _l(mSensorPrivacyLock);
mSpm.removeSensorPrivacyListener(this);
- if (mAttributionAndPermissionUtils->isAutomotiveDevice()) {
+ if (isAutomotiveDevice()) {
mSpm.removeToggleSensorPrivacyListener(this);
}
mSpm.unlinkToDeath(this);
@@ -4903,9 +4841,7 @@
// if sensor privacy is enabled then block all clients from accessing the camera
if (state == SensorPrivacyManager::ENABLED) {
service->blockAllClients();
- } else if ((state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_APPS)
- || (state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_HELPFUL_APPS)
- || (state == SensorPrivacyManager::AUTOMOTIVE_DRIVER_ASSISTANCE_REQUIRED_APPS)) {
+ } else if (state == SensorPrivacyManager::ENABLED_EXCEPT_ALLOWLISTED_APPS) {
service->blockPrivacyEnabledClients();
}
return binder::Status::ok();
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 11cf1a1..1a887a1 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -78,7 +78,8 @@
public virtual ::android::hardware::BnCameraService,
public virtual IBinder::DeathRecipient,
public virtual CameraProviderManager::StatusListener,
- public virtual IServiceManager::LocalRegistrationCallback
+ public virtual IServiceManager::LocalRegistrationCallback,
+ public AttributionAndPermissionUtilsEncapsulator
{
friend class BinderService<CameraService>;
friend class CameraOfflineSessionClient;
@@ -317,13 +318,6 @@
// Shared utilities
static binder::Status filterGetInfoErrorCode(status_t err);
- bool isAutomotiveDevice() const;
-
- /**
- * Returns true if the client has uid AID_AUTOMOTIVE_EVS and the device is an automotive device.
- */
- bool isAutomotivePrivilegedClient(int32_t uid) const;
-
/**
* Returns true if the device is an automotive device and cameraId is system
* only camera which has characteristic AUTOMOTIVE_LOCATION value as either
@@ -335,7 +329,9 @@
/////////////////////////////////////////////////////////////////////
// CameraClient functionality
- class BasicClient : public virtual RefBase {
+ class BasicClient :
+ public virtual RefBase,
+ public AttributionAndPermissionUtilsEncapsulator {
friend class CameraService;
public:
virtual status_t initialize(sp<CameraProviderManager> manager,
@@ -460,8 +456,6 @@
virtual ~BasicClient();
- std::shared_ptr<AttributionAndPermissionUtils> mAttributionAndPermissionUtils;
-
// the instance is in the middle of destruction. When this is set,
// the instance should not be accessed from callback.
// CameraService's mClientLock should be acquired to access this.
@@ -681,20 +675,6 @@
return activityManager;
}
- bool hasPermissionsForCamera(int callingPid, int callingUid) const;
-
- bool hasPermissionsForCamera(const std::string& cameraId, int callingPid, int callingUid) const;
-
- bool hasPermissionsForSystemCamera(const std::string& cameraId, int callingPid, int callingUid,
- bool checkCameraPermissions = true) const;
-
- bool hasPermissionsForCameraHeadlessSystemUser(const std::string& cameraId, int callingPid,
- int callingUid) const;
-
- bool hasPermissionsForCameraPrivacyAllowlist(int callingPid, int callingUid) const;
-
- bool hasPermissionsForOpenCloseListener(int callingPid, int callingUid) const;
-
/**
* Typesafe version of device status, containing both the HAL-layer and the service interface-
* layer values.
@@ -879,12 +859,13 @@
// prevented from accessing the camera.
class SensorPrivacyPolicy : public hardware::BnSensorPrivacyListener,
public virtual IBinder::DeathRecipient,
- public virtual IServiceManager::LocalRegistrationCallback {
+ public virtual IServiceManager::LocalRegistrationCallback,
+ public AttributionAndPermissionUtilsEncapsulator {
public:
explicit SensorPrivacyPolicy(wp<CameraService> service,
std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils)
- : mService(service),
- mAttributionAndPermissionUtils(attributionAndPermissionUtils),
+ : AttributionAndPermissionUtilsEncapsulator(attributionAndPermissionUtils),
+ mService(service),
mSensorPrivacyEnabled(false),
mCameraPrivacyState(SensorPrivacyManager::DISABLED), mRegistered(false) {}
@@ -909,7 +890,6 @@
private:
SensorPrivacyManager mSpm;
wp<CameraService> mService;
- std::shared_ptr<AttributionAndPermissionUtils> mAttributionAndPermissionUtils;
Mutex mSensorPrivacyLock;
bool mSensorPrivacyEnabled;
int mCameraPrivacyState;
@@ -924,7 +904,6 @@
sp<SensorPrivacyPolicy> mSensorPrivacyPolicy;
std::shared_ptr<CameraServiceProxyWrapper> mCameraServiceProxyWrapper;
- std::shared_ptr<AttributionAndPermissionUtils> mAttributionAndPermissionUtils;
// Delay-load the Camera HAL module
virtual void onFirstRef();
@@ -937,11 +916,6 @@
void addStates(const std::string& id);
void removeStates(const std::string& id);
- bool isTrustedCallingUid(uid_t uid) const;
-
- status_t getUidForPackage(const std::string &packageName, int userId,
- /*inout*/uid_t& uid, int err) const;
-
// Check if we can connect, before we acquire the service lock.
// The returned originalClientPid is the PID of the original process that wants to connect to
// camera.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index ac4b039..fa063b8 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -1060,7 +1060,7 @@
static const nsecs_t kRequestTimeout = 50e6; // 50 ms
// TODO: does this need to be adjusted for long exposure requests?
- static const nsecs_t kRequestSubmitTimeout = 200e6; // 200 ms
+ static const nsecs_t kRequestSubmitTimeout = 500e6; // 500 ms
// Used to prepare a batch of requests.
struct NextRequest {
diff --git a/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.h b/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.h
index dc4cfb1..7f4a1bd 100644
--- a/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.h
+++ b/services/camera/libcameraservice/utils/AttributionAndPermissionUtils.h
@@ -33,10 +33,13 @@
*/
class AttributionAndPermissionUtils {
public:
- AttributionAndPermissionUtils(wp<CameraService> cameraService) : mCameraService(cameraService)
- {}
+ AttributionAndPermissionUtils() { }
virtual ~AttributionAndPermissionUtils() {}
+ void setCameraService(wp<CameraService> cameraService) {
+ mCameraService = cameraService;
+ }
+
/**
* Pre-grants the permission if the attribution source uid is for an automotive
* privileged client. Otherwise uses system service permission checker to check
@@ -49,10 +52,18 @@
virtual bool checkPermissionForPreflight(const std::string &cameraId,
const std::string &permission, const AttributionSourceState& attributionSource,
const std::string& message, int32_t attributedOpCode);
+
+ // Can camera service trust the caller based on the calling UID?
virtual bool isTrustedCallingUid(uid_t uid);
+
virtual bool isAutomotiveDevice();
virtual bool isHeadlessSystemUserMode();
+
+ /**
+ * Returns true if the client has uid AID_AUTOMOTIVE_EVS and the device is an automotive device.
+ */
virtual bool isAutomotivePrivilegedClient(int32_t uid);
+
virtual status_t getUidForPackage(const std::string &packageName, int userId,
/*inout*/uid_t& uid, int err);
virtual bool isCallerCameraServerNotDelegating();
@@ -86,6 +97,107 @@
const AttributionSourceState &attributionSource);
};
+/**
+ * Class to be inherited by classes encapsulating AttributionAndPermissionUtils. Provides an
+ * additional utility layer above AttributionAndPermissionUtils calls, and avoids verbosity
+ * in the encapsulating class's methods.
+ */
+class AttributionAndPermissionUtilsEncapsulator {
+protected:
+ std::shared_ptr<AttributionAndPermissionUtils> mAttributionAndPermissionUtils;
+
+public:
+ AttributionAndPermissionUtilsEncapsulator(
+ std::shared_ptr<AttributionAndPermissionUtils> attributionAndPermissionUtils)
+ : mAttributionAndPermissionUtils(attributionAndPermissionUtils) { }
+
+ static AttributionSourceState buildAttributionSource(int callingPid, int callingUid) {
+ AttributionSourceState attributionSource{};
+ attributionSource.pid = callingPid;
+ attributionSource.uid = callingUid;
+ return attributionSource;
+ }
+
+ static AttributionSourceState buildAttributionSource(int callingPid, int callingUid,
+ const std::string& packageName) {
+ AttributionSourceState attributionSource = buildAttributionSource(callingPid, callingUid);
+ attributionSource.packageName = packageName;
+ return attributionSource;
+ }
+
+ bool hasPermissionsForCamera(int callingPid, int callingUid) const {
+ return hasPermissionsForCamera(std::string(), callingPid, callingUid);
+ }
+
+ bool hasPermissionsForCamera(int callingPid, int callingUid,
+ const std::string& packageName) const {
+ return hasPermissionsForCamera(std::string(), callingPid, callingUid, packageName);
+ }
+
+ bool hasPermissionsForCamera(const std::string& cameraId, int callingPid,
+ int callingUid) const {
+ auto attributionSource = buildAttributionSource(callingPid, callingUid);
+ return mAttributionAndPermissionUtils->hasPermissionsForCamera(cameraId, attributionSource);
+ }
+
+ bool hasPermissionsForCamera(const std::string& cameraId, int callingPid, int callingUid,
+ const std::string& packageName) const {
+ auto attributionSource = buildAttributionSource(callingPid, callingUid, packageName);
+ return mAttributionAndPermissionUtils->hasPermissionsForCamera(cameraId, attributionSource);
+ }
+
+ bool hasPermissionsForSystemCamera(const std::string& cameraId, int callingPid, int callingUid,
+ bool checkCameraPermissions = true) const {
+ auto attributionSource = buildAttributionSource(callingPid, callingUid);
+ return mAttributionAndPermissionUtils->hasPermissionsForSystemCamera(
+ cameraId, attributionSource, checkCameraPermissions);
+ }
+
+ bool hasPermissionsForCameraHeadlessSystemUser(const std::string& cameraId, int callingPid,
+ int callingUid) const {
+ auto attributionSource = buildAttributionSource(callingPid, callingUid);
+ return mAttributionAndPermissionUtils->hasPermissionsForCameraHeadlessSystemUser(
+ cameraId, attributionSource);
+ }
+
+ bool hasPermissionsForCameraPrivacyAllowlist(int callingPid, int callingUid) const {
+ auto attributionSource = buildAttributionSource(callingPid, callingUid);
+ return mAttributionAndPermissionUtils->hasPermissionsForCameraPrivacyAllowlist(
+ attributionSource);
+ }
+
+ bool hasPermissionsForOpenCloseListener(int callingPid, int callingUid) const {
+ auto attributionSource = buildAttributionSource(callingPid, callingUid);
+ return mAttributionAndPermissionUtils->hasPermissionsForOpenCloseListener(
+ attributionSource);
+ }
+
+ bool isAutomotiveDevice() const {
+ return mAttributionAndPermissionUtils->isAutomotiveDevice();
+ }
+
+ bool isAutomotivePrivilegedClient(int32_t uid) const {
+ return mAttributionAndPermissionUtils->isAutomotivePrivilegedClient(uid);
+ }
+
+ bool isTrustedCallingUid(uid_t uid) const {
+ return mAttributionAndPermissionUtils->isTrustedCallingUid(uid);
+ }
+
+ bool isHeadlessSystemUserMode() const {
+ return mAttributionAndPermissionUtils->isHeadlessSystemUserMode();
+ }
+
+ status_t getUidForPackage(const std::string &packageName, int userId,
+ /*inout*/uid_t& uid, int err) const {
+ return mAttributionAndPermissionUtils->getUidForPackage(packageName, userId, uid, err);
+ }
+
+ bool isCallerCameraServerNotDelegating() const {
+ return mAttributionAndPermissionUtils->isCallerCameraServerNotDelegating();
+ }
+};
+
} // namespace android
#endif // ANDROID_SERVERS_CAMERA_ATTRIBUTION_AND_PERMISSION_UTILS_H
diff --git a/services/camera/virtualcamera/TEST_MAPPING b/services/camera/virtualcamera/TEST_MAPPING
index 25fca73..e976704 100644
--- a/services/camera/virtualcamera/TEST_MAPPING
+++ b/services/camera/virtualcamera/TEST_MAPPING
@@ -9,7 +9,8 @@
{
"exclude-annotation": "androidx.test.filters.FlakyTest"
}
- ]
+ ],
+ "keywords": ["primary-device"]
}
]
}
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.cc b/services/camera/virtualcamera/VirtualCameraDevice.cc
index e633737..ddb245a 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.cc
+++ b/services/camera/virtualcamera/VirtualCameraDevice.cc
@@ -81,8 +81,6 @@
constexpr MetadataBuilder::ControlRegion kDefaultEmptyControlRegion{};
-constexpr float kAspectRatioEpsilon = 0.05;
-
const std::array<Resolution, 5> kStandardJpegThumbnailSizes{
Resolution(176, 144), Resolution(240, 144), Resolution(256, 144),
Resolution(240, 160), Resolution(240, 180)};
@@ -91,14 +89,15 @@
PixelFormat::IMPLEMENTATION_DEFINED, PixelFormat::YCBCR_420_888,
PixelFormat::BLOB};
-bool isApproximatellySameAspectRatio(const Resolution r1, const Resolution r2) {
- float aspectRatio1 =
- static_cast<float>(r1.width) / static_cast<float>(r1.height);
- float aspectRatio2 =
- static_cast<float>(r2.width) / static_cast<float>(r2.height);
-
- return abs(aspectRatio1 - aspectRatio2) < kAspectRatioEpsilon;
-}
+// The resolutions below will used to extend the set of supported output formats.
+// All resolutions with lower pixel count and same aspect ratio as some supported
+// input resolution will be added to the set of supported output resolutions.
+const std::array<Resolution, 10> kOutputResolutions{
+ Resolution(320, 240), Resolution(640, 360), Resolution(640, 480),
+ Resolution(720, 480), Resolution(720, 576), Resolution(800, 600),
+ Resolution(1024, 576), Resolution(1280, 720), Resolution(1280, 960),
+ Resolution(1280, 1080),
+};
std::vector<Resolution> getSupportedJpegThumbnailSizes(
const std::vector<SupportedStreamConfiguration>& configs) {
@@ -125,9 +124,9 @@
kOutputFormats.end();
}
-std::vector<MetadataBuilder::FpsRange> fpsRangesForInputConfig(
+std::vector<FpsRange> fpsRangesForInputConfig(
const std::vector<SupportedStreamConfiguration>& configs) {
- std::set<MetadataBuilder::FpsRange> availableRanges;
+ std::set<FpsRange> availableRanges;
for (const SupportedStreamConfiguration& config : configs) {
availableRanges.insert({.minFps = kMinFps, .maxFps = config.maxFps});
@@ -142,8 +141,7 @@
availableRanges.insert({.minFps = 30, .maxFps = 30});
}
- return std::vector<MetadataBuilder::FpsRange>(availableRanges.begin(),
- availableRanges.end());
+ return std::vector<FpsRange>(availableRanges.begin(), availableRanges.end());
}
std::optional<Resolution> getMaxResolution(
@@ -180,6 +178,36 @@
}
}
+ std::map<Resolution, int> additionalResolutionToMaxFpsMap;
+ // Add additional resolutions we can support by downscaling input streams with
+ // same aspect ratio.
+ for (const Resolution& outputResolution : kOutputResolutions) {
+ for (const auto& [resolution, maxFps] : resolutionToMaxFpsMap) {
+ if (resolutionToMaxFpsMap.find(outputResolution) !=
+ resolutionToMaxFpsMap.end()) {
+ // Resolution is already in the map, skip it.
+ continue;
+ }
+
+ if (outputResolution < resolution &&
+ isApproximatellySameAspectRatio(outputResolution, resolution)) {
+ // Lower resolution with same aspect ratio, we can achieve this by
+ // downscaling, let's add it to the map.
+ ALOGD(
+ "Extending set of output resolutions with %dx%d which has same "
+ "aspect ratio as supported input %dx%d.",
+ outputResolution.width, outputResolution.height, resolution.width,
+ resolution.height);
+ additionalResolutionToMaxFpsMap[outputResolution] = maxFps;
+ break;
+ }
+ }
+ }
+
+ // Add all resolution we can achieve by downscaling to the map.
+ resolutionToMaxFpsMap.insert(additionalResolutionToMaxFpsMap.begin(),
+ additionalResolutionToMaxFpsMap.end());
+
return resolutionToMaxFpsMap;
}
@@ -272,14 +300,41 @@
ANDROID_JPEG_THUMBNAIL_QUALITY,
ANDROID_NOISE_REDUCTION_MODE,
ANDROID_STATISTICS_FACE_DETECT_MODE})
- .setAvailableResultKeys(
- {ANDROID_COLOR_CORRECTION_ABERRATION_MODE, ANDROID_CONTROL_AE_MODE,
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, ANDROID_CONTROL_AF_MODE,
- ANDROID_CONTROL_AWB_MODE, ANDROID_CONTROL_EFFECT_MODE,
- ANDROID_CONTROL_MODE, ANDROID_FLASH_MODE, ANDROID_FLASH_STATE,
- ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, ANDROID_JPEG_QUALITY,
- ANDROID_JPEG_THUMBNAIL_QUALITY, ANDROID_LENS_FOCAL_LENGTH,
- ANDROID_SENSOR_TIMESTAMP, ANDROID_NOISE_REDUCTION_MODE})
+ .setAvailableResultKeys({
+ ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+ ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+ ANDROID_CONTROL_AE_LOCK,
+ ANDROID_CONTROL_AE_MODE,
+ ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+ ANDROID_CONTROL_AE_STATE,
+ ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ ANDROID_CONTROL_AF_MODE,
+ ANDROID_CONTROL_AF_STATE,
+ ANDROID_CONTROL_AF_TRIGGER,
+ ANDROID_CONTROL_AWB_LOCK,
+ ANDROID_CONTROL_AWB_MODE,
+ ANDROID_CONTROL_AWB_STATE,
+ ANDROID_CONTROL_CAPTURE_INTENT,
+ ANDROID_CONTROL_EFFECT_MODE,
+ ANDROID_CONTROL_MODE,
+ ANDROID_CONTROL_SCENE_MODE,
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+ ANDROID_STATISTICS_FACE_DETECT_MODE,
+ ANDROID_FLASH_MODE,
+ ANDROID_FLASH_STATE,
+ ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+ ANDROID_JPEG_QUALITY,
+ ANDROID_JPEG_THUMBNAIL_QUALITY,
+ ANDROID_LENS_FOCAL_LENGTH,
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+ ANDROID_NOISE_REDUCTION_MODE,
+ ANDROID_REQUEST_PIPELINE_DEPTH,
+ ANDROID_SENSOR_TIMESTAMP,
+ ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE,
+ ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+ ANDROID_STATISTICS_SCENE_FLICKER,
+ })
.setAvailableCapabilities(
{ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE});
@@ -401,6 +456,22 @@
return false;
}
+ const std::vector<Stream>& streams = streamConfiguration.streams;
+
+ Resolution firstStreamResolution(streams[0].width, streams[0].height);
+ auto isSameAspectRatioAsFirst = [firstStreamResolution](const Stream& stream) {
+ return isApproximatellySameAspectRatio(
+ firstStreamResolution, Resolution(stream.width, stream.height));
+ };
+ if (!std::all_of(streams.begin(), streams.end(), isSameAspectRatioAsFirst)) {
+ ALOGW(
+ "%s: Requested streams do not have same aspect ratio. Different aspect "
+ "ratios are currently "
+ "not supported by virtual camera. Stream configuration: %s",
+ __func__, streamConfiguration.toString().c_str());
+ return false;
+ }
+
int numberOfProcessedStreams = 0;
int numberOfStallStreams = 0;
for (const Stream& stream : streamConfiguration.streams) {
@@ -423,9 +494,13 @@
numberOfProcessedStreams++;
}
+ Resolution requestedResolution(stream.width, stream.height);
auto matchesSupportedInputConfig =
- [&stream](const SupportedStreamConfiguration& config) {
- return stream.width == config.width && stream.height == config.height;
+ [requestedResolution](const SupportedStreamConfiguration& config) {
+ Resolution supportedInputResolution(config.width, config.height);
+ return requestedResolution <= supportedInputResolution &&
+ isApproximatellySameAspectRatio(requestedResolution,
+ supportedInputResolution);
};
if (std::none_of(mSupportedInputConfigurations.begin(),
mSupportedInputConfigurations.end(),
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.h b/services/camera/virtualcamera/VirtualCameraDevice.h
index c274dc9..4c50041 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.h
+++ b/services/camera/virtualcamera/VirtualCameraDevice.h
@@ -24,6 +24,7 @@
#include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
#include "aidl/android/companion/virtualcamera/VirtualCameraConfiguration.h"
#include "aidl/android/hardware/camera/device/BnCameraDevice.h"
+#include "system/camera_metadata.h"
#include "util/Util.h"
namespace android {
@@ -121,6 +122,9 @@
// Default JPEG compression quality.
static constexpr uint8_t kDefaultJpegQuality = 80;
+ static constexpr camera_metadata_enum_android_control_capture_intent_t
+ kDefaultCaptureIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+
private:
std::shared_ptr<VirtualCameraDevice> sharedFromThis();
diff --git a/services/camera/virtualcamera/VirtualCameraRenderThread.cc b/services/camera/virtualcamera/VirtualCameraRenderThread.cc
index 8bd8c9b..6e89b5f 100644
--- a/services/camera/virtualcamera/VirtualCameraRenderThread.cc
+++ b/services/camera/virtualcamera/VirtualCameraRenderThread.cc
@@ -92,30 +92,62 @@
const std::chrono::nanoseconds timestamp,
const RequestSettings& requestSettings,
const Resolution reportedSensorSize) {
- std::unique_ptr<CameraMetadata> metadata =
+ // All of the keys used in the response needs to be referenced in
+ // availableResultKeys in CameraCharacteristics (see initCameraCharacteristics
+ // in VirtualCameraDevice.cc).
+ MetadataBuilder builder =
MetadataBuilder()
.setAberrationCorrectionMode(
ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF)
+ .setControlAeAvailableAntibandingModes(
+ {ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF})
+ .setControlAeAntibandingMode(ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF)
+ .setControlAeExposureCompensation(0)
+ .setControlAeLockAvailable(false)
+ .setControlAeLock(ANDROID_CONTROL_AE_LOCK_OFF)
.setControlAeMode(ANDROID_CONTROL_AE_MODE_ON)
.setControlAePrecaptureTrigger(
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE)
+ .setControlAeState(ANDROID_CONTROL_AE_STATE_INACTIVE)
.setControlAfMode(ANDROID_CONTROL_AF_MODE_OFF)
+ .setControlAfTrigger(ANDROID_CONTROL_AF_TRIGGER_IDLE)
+ .setControlAfState(ANDROID_CONTROL_AF_STATE_INACTIVE)
.setControlAwbMode(ANDROID_CONTROL_AWB_MODE_AUTO)
+ .setControlAwbLock(ANDROID_CONTROL_AWB_LOCK_OFF)
+ .setControlAwbState(ANDROID_CONTROL_AWB_STATE_INACTIVE)
+ .setControlCaptureIntent(requestSettings.captureIntent)
.setControlEffectMode(ANDROID_CONTROL_EFFECT_MODE_OFF)
.setControlMode(ANDROID_CONTROL_MODE_AUTO)
+ .setControlSceneMode(ANDROID_CONTROL_SCENE_MODE_DISABLED)
+ .setControlVideoStabilizationMode(
+ ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF)
.setCropRegion(0, 0, reportedSensorSize.width,
reportedSensorSize.height)
.setFaceDetectMode(ANDROID_STATISTICS_FACE_DETECT_MODE_OFF)
.setFlashState(ANDROID_FLASH_STATE_UNAVAILABLE)
+ .setFlashMode(ANDROID_FLASH_MODE_OFF)
.setFocalLength(VirtualCameraDevice::kFocalLength)
.setJpegQuality(requestSettings.jpegQuality)
.setJpegThumbnailSize(requestSettings.thumbnailResolution.width,
requestSettings.thumbnailResolution.height)
.setJpegThumbnailQuality(requestSettings.thumbnailJpegQuality)
+ .setLensOpticalStabilizationMode(
+ ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF)
.setNoiseReductionMode(ANDROID_NOISE_REDUCTION_MODE_OFF)
.setPipelineDepth(kPipelineDepth)
.setSensorTimestamp(timestamp)
- .build();
+ .setStatisticsHotPixelMapMode(
+ ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF)
+ .setStatisticsLensShadingMapMode(
+ ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF)
+ .setStatisticsSceneFlicker(ANDROID_STATISTICS_SCENE_FLICKER_NONE);
+
+ if (requestSettings.fpsRange.has_value()) {
+ builder.setControlAeTargetFpsRange(requestSettings.fpsRange.value());
+ }
+
+ std::unique_ptr<CameraMetadata> metadata = builder.build();
+
if (metadata == nullptr) {
ALOGE("%s: Failed to build capture result metadata", __func__);
return CameraMetadata();
@@ -581,37 +613,36 @@
std::shared_ptr<AHardwareBuffer> inHwBuffer = framebuffer->getHardwareBuffer();
GraphicBuffer* gBuffer = GraphicBuffer::fromAHardwareBuffer(inHwBuffer.get());
- std::optional<size_t> compressedSize;
- if (gBuffer != nullptr) {
- if (gBuffer->getPixelFormat() != HAL_PIXEL_FORMAT_YCbCr_420_888) {
- // This should never happen since we're allocating the temporary buffer
- // with YUV420 layout above.
- ALOGE("%s: Cannot compress non-YUV buffer (pixelFormat %d)", __func__,
- gBuffer->getPixelFormat());
- return cameraStatus(Status::INTERNAL_ERROR);
- }
-
- YCbCrLockGuard yCbCrLock(inHwBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN);
- if (yCbCrLock.getStatus() != OK) {
- return cameraStatus(Status::INTERNAL_ERROR);
- }
-
- std::vector<uint8_t> app1ExifData =
- createExif(Resolution(stream->width, stream->height),
- createThumbnail(requestSettings.thumbnailResolution,
- requestSettings.thumbnailJpegQuality));
- compressedSize = compressJpeg(
- gBuffer->getWidth(), gBuffer->getHeight(), requestSettings.jpegQuality,
- *yCbCrLock, app1ExifData, stream->bufferSize - sizeof(CameraBlob),
- (*planesLock).planes[0].data);
- } else {
- std::vector<uint8_t> app1ExifData =
- createExif(Resolution(stream->width, stream->height));
- compressedSize = compressBlackJpeg(
- stream->width, stream->height, requestSettings.jpegQuality, app1ExifData,
- stream->bufferSize - sizeof(CameraBlob), (*planesLock).planes[0].data);
+ if (gBuffer == nullptr) {
+ ALOGE(
+ "%s: Encountered invalid temporary buffer while rendering JPEG "
+ "into BLOB stream",
+ __func__);
+ return cameraStatus(Status::INTERNAL_ERROR);
}
+ if (gBuffer->getPixelFormat() != HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ // This should never happen since we're allocating the temporary buffer
+ // with YUV420 layout above.
+ ALOGE("%s: Cannot compress non-YUV buffer (pixelFormat %d)", __func__,
+ gBuffer->getPixelFormat());
+ return cameraStatus(Status::INTERNAL_ERROR);
+ }
+
+ YCbCrLockGuard yCbCrLock(inHwBuffer, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN);
+ if (yCbCrLock.getStatus() != OK) {
+ return cameraStatus(Status::INTERNAL_ERROR);
+ }
+
+ std::vector<uint8_t> app1ExifData =
+ createExif(Resolution(stream->width, stream->height),
+ createThumbnail(requestSettings.thumbnailResolution,
+ requestSettings.thumbnailJpegQuality));
+ std::optional<size_t> compressedSize = compressJpeg(
+ gBuffer->getWidth(), gBuffer->getHeight(), requestSettings.jpegQuality,
+ *yCbCrLock, app1ExifData, stream->bufferSize - sizeof(CameraBlob),
+ (*planesLock).planes[0].data);
+
if (!compressedSize.has_value()) {
ALOGE("%s: Failed to compress JPEG image", __func__);
return cameraStatus(Status::INTERNAL_ERROR);
diff --git a/services/camera/virtualcamera/VirtualCameraRenderThread.h b/services/camera/virtualcamera/VirtualCameraRenderThread.h
index 86dad0b..e8e0bd4 100644
--- a/services/camera/virtualcamera/VirtualCameraRenderThread.h
+++ b/services/camera/virtualcamera/VirtualCameraRenderThread.h
@@ -33,6 +33,7 @@
#include "util/EglFramebuffer.h"
#include "util/EglProgram.h"
#include "util/EglSurfaceTexture.h"
+#include "util/MetadataUtil.h"
#include "util/Util.h"
namespace android {
@@ -58,6 +59,9 @@
int jpegQuality = VirtualCameraDevice::kDefaultJpegQuality;
Resolution thumbnailResolution = Resolution(0, 0);
int thumbnailJpegQuality = VirtualCameraDevice::kDefaultJpegQuality;
+ std::optional<FpsRange> fpsRange = {};
+ camera_metadata_enum_android_control_capture_intent_t captureIntent =
+ VirtualCameraDevice::kDefaultCaptureIntent;
};
// Represents single capture request to fill set of buffers.
diff --git a/services/camera/virtualcamera/VirtualCameraSession.cc b/services/camera/virtualcamera/VirtualCameraSession.cc
index d972609..a0bb72e 100644
--- a/services/camera/virtualcamera/VirtualCameraSession.cc
+++ b/services/camera/virtualcamera/VirtualCameraSession.cc
@@ -21,6 +21,7 @@
#include <algorithm>
#include <atomic>
#include <chrono>
+#include <cmath>
#include <cstddef>
#include <cstdint>
#include <cstring>
@@ -39,6 +40,7 @@
#include "VirtualCameraDevice.h"
#include "VirtualCameraRenderThread.h"
#include "VirtualCameraStream.h"
+#include "aidl/android/companion/virtualcamera/SupportedStreamConfiguration.h"
#include "aidl/android/hardware/camera/common/Status.h"
#include "aidl/android/hardware/camera/device/BufferCache.h"
#include "aidl/android/hardware/camera/device/BufferStatus.h"
@@ -48,6 +50,7 @@
#include "aidl/android/hardware/camera/device/NotifyMsg.h"
#include "aidl/android/hardware/camera/device/RequestTemplate.h"
#include "aidl/android/hardware/camera/device/ShutterMsg.h"
+#include "aidl/android/hardware/camera/device/Stream.h"
#include "aidl/android/hardware/camera/device/StreamBuffer.h"
#include "aidl/android/hardware/camera/device/StreamConfiguration.h"
#include "aidl/android/hardware/camera/device/StreamRotation.h"
@@ -106,9 +109,6 @@
// Maximum number of buffers to use per single stream.
constexpr size_t kMaxStreamBuffers = 2;
-constexpr int32_t kDefaultJpegQuality = 80;
-constexpr int32_t kDefaultJpegThumbnailQuality = 70;
-
// Thumbnail size (0,0) correspods to disabling thumbnail.
const Resolution kDefaultJpegThumbnailSize(0, 0);
@@ -148,7 +148,7 @@
.setControlMode(ANDROID_CONTROL_MODE_AUTO)
.setControlAeMode(ANDROID_CONTROL_AE_MODE_ON)
.setControlAeExposureCompensation(0)
- .setControlAeTargetFpsRange(maxFps, maxFps)
+ .setControlAeTargetFpsRange(FpsRange{maxFps, maxFps})
.setControlAeAntibandingMode(ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO)
.setControlAePrecaptureTrigger(
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE)
@@ -204,6 +204,55 @@
}));
}
+Resolution resolutionFromStream(const Stream& stream) {
+ return Resolution(stream.width, stream.height);
+}
+
+Resolution resolutionFromInputConfig(
+ const SupportedStreamConfiguration& inputConfig) {
+ return Resolution(inputConfig.width, inputConfig.height);
+}
+
+std::optional<SupportedStreamConfiguration> pickInputConfigurationForStreams(
+ const std::vector<Stream>& requestedStreams,
+ const std::vector<SupportedStreamConfiguration>& supportedInputConfigs) {
+ Stream maxResolutionStream = getHighestResolutionStream(requestedStreams);
+ Resolution maxResolution = resolutionFromStream(maxResolutionStream);
+
+ // Find best fitting stream to satisfy all requested streams:
+ // Best fitting => same or higher resolution as input with lowest pixel count
+ // difference and same aspect ratio.
+ auto isBetterInputConfig = [maxResolution](
+ const SupportedStreamConfiguration& configA,
+ const SupportedStreamConfiguration& configB) {
+ int maxResPixelCount = maxResolution.width * maxResolution.height;
+ int pixelCountDiffA =
+ std::abs((configA.width * configA.height) - maxResPixelCount);
+ int pixelCountDiffB =
+ std::abs((configB.width * configB.height) - maxResPixelCount);
+
+ return pixelCountDiffA < pixelCountDiffB;
+ };
+
+ std::optional<SupportedStreamConfiguration> bestConfig;
+ for (const SupportedStreamConfiguration& inputConfig : supportedInputConfigs) {
+ Resolution inputConfigResolution = resolutionFromInputConfig(inputConfig);
+ if (inputConfigResolution < maxResolution ||
+ !isApproximatellySameAspectRatio(inputConfigResolution, maxResolution)) {
+ // We don't want to upscale from lower resolution, or use different aspect
+ // ratio, skip.
+ continue;
+ }
+
+ if (!bestConfig.has_value() ||
+ isBetterInputConfig(inputConfig, bestConfig.value())) {
+ bestConfig = inputConfig;
+ }
+ }
+
+ return bestConfig;
+}
+
RequestSettings createSettingsFromMetadata(const CameraMetadata& metadata) {
return RequestSettings{
.jpegQuality = getJpegQuality(metadata).value_or(
@@ -211,7 +260,10 @@
.thumbnailResolution =
getJpegThumbnailSize(metadata).value_or(Resolution(0, 0)),
.thumbnailJpegQuality = getJpegThumbnailQuality(metadata).value_or(
- VirtualCameraDevice::kDefaultJpegQuality)};
+ VirtualCameraDevice::kDefaultJpegQuality),
+ .fpsRange = getFpsRange(metadata),
+ .captureIntent = getCaptureIntent(metadata).value_or(
+ ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW)};
}
} // namespace
@@ -279,15 +331,13 @@
halStreams.clear();
halStreams.resize(in_requestedConfiguration.streams.size());
- sp<Surface> inputSurface = nullptr;
- int inputWidth;
- int inputHeight;
-
if (!virtualCamera->isStreamCombinationSupported(in_requestedConfiguration)) {
ALOGE("%s: Requested stream configuration is not supported", __func__);
return cameraStatus(Status::ILLEGAL_ARGUMENT);
}
+ sp<Surface> inputSurface = nullptr;
+ std::optional<SupportedStreamConfiguration> inputConfig;
{
std::lock_guard<std::mutex> lock(mLock);
for (int i = 0; i < in_requestedConfiguration.streams.size(); ++i) {
@@ -297,14 +347,20 @@
}
}
- Stream maxResStream = getHighestResolutionStream(streams);
- inputWidth = maxResStream.width;
- inputHeight = maxResStream.height;
+ inputConfig = pickInputConfigurationForStreams(
+ streams, virtualCamera->getInputConfigs());
+ if (!inputConfig.has_value()) {
+ ALOGE(
+ "%s: Failed to pick any input configuration for stream configuration "
+ "request: %s",
+ __func__, in_requestedConfiguration.toString().c_str());
+ return cameraStatus(Status::ILLEGAL_ARGUMENT);
+ }
if (mRenderThread == nullptr) {
// If there's no client callback, start camera in test mode.
const bool testMode = mVirtualCameraClientCallback == nullptr;
mRenderThread = std::make_unique<VirtualCameraRenderThread>(
- mSessionContext, Resolution(inputWidth, inputHeight),
+ mSessionContext, resolutionFromInputConfig(*inputConfig),
virtualCamera->getMaxInputResolution(), mCameraDeviceCallback,
testMode);
mRenderThread->start();
@@ -318,7 +374,7 @@
// create single texture.
mVirtualCameraClientCallback->onStreamConfigured(
/*streamId=*/0, aidl::android::view::Surface(inputSurface.get()),
- inputWidth, inputHeight, Format::YUV_420_888);
+ inputConfig->width, inputConfig->height, inputConfig->pixelFormat);
}
return ndk::ScopedAStatus::ok();
diff --git a/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc b/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
index 9146d8a..ad9d83b 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraDeviceTest.cc
@@ -55,6 +55,10 @@
camera_metadata_enum_android_scaler_available_stream_configurations_t;
constexpr int kCameraId = 42;
+constexpr int kQvgaWidth = 320;
+constexpr int kQvgaHeight = 240;
+constexpr int k360pWidth = 640;
+constexpr int k360pHeight = 360;
constexpr int kVgaWidth = 640;
constexpr int kVgaHeight = 480;
constexpr int kHdWidth = 1280;
@@ -79,7 +83,8 @@
const int width;
const int height;
const int pixelFormat;
- const metadata_stream_t streamConfiguration;
+ const metadata_stream_t streamConfiguration =
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT;
};
bool operator==(const AvailableStreamConfiguration& a,
@@ -173,24 +178,33 @@
.lensFacing = LensFacing::FRONT},
.expectedAvailableStreamConfigs =
{AvailableStreamConfiguration{
- .width = kVgaWidth,
- .height = kVgaHeight,
- .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+ .width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat =
+ ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+ AvailableStreamConfiguration{
+ .width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat =
+ ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+ AvailableStreamConfiguration{
+ .width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
AvailableStreamConfiguration{
.width = kVgaWidth,
.height = kVgaHeight,
.pixelFormat =
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+ ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
AvailableStreamConfiguration{
.width = kVgaWidth,
.height = kVgaHeight,
- .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT}}},
+ .pixelFormat =
+ ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+ AvailableStreamConfiguration{
+ .width = kVgaWidth,
+ .height = kVgaHeight,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB}}},
VirtualCameraConfigTestParam{
.inputConfig =
VirtualCameraConfiguration{
@@ -210,43 +224,70 @@
.lensFacing = LensFacing::BACK},
.expectedAvailableStreamConfigs = {
AvailableStreamConfiguration{
+ .width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+ AvailableStreamConfiguration{
+ .width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat =
+ ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+ AvailableStreamConfiguration{
+ .width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
+ AvailableStreamConfiguration{
+ .width = 640,
+ .height = 360,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+ AvailableStreamConfiguration{
+ .width = 640,
+ .height = 360,
+ .pixelFormat =
+ ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+ AvailableStreamConfiguration{
+ .width = 640,
+ .height = 360,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
+ AvailableStreamConfiguration{
.width = kVgaWidth,
.height = kVgaHeight,
- .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
AvailableStreamConfiguration{
.width = kVgaWidth,
.height = kVgaHeight,
.pixelFormat =
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+ ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
AvailableStreamConfiguration{
.width = kVgaWidth,
.height = kVgaHeight,
- .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
+ AvailableStreamConfiguration{
+ .width = 1024,
+ .height = 576,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
+ AvailableStreamConfiguration{
+ .width = 1024,
+ .height = 576,
+ .pixelFormat =
+ ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
+ AvailableStreamConfiguration{
+ .width = 1024,
+ .height = 576,
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB},
AvailableStreamConfiguration{
.width = kHdWidth,
.height = kHdHeight,
- .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888},
AvailableStreamConfiguration{
.width = kHdWidth,
.height = kHdHeight,
.pixelFormat =
- ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT},
+ ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED},
AvailableStreamConfiguration{
.width = kHdWidth,
.height = kHdHeight,
- .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
- .streamConfiguration =
- ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT}}}));
+ .pixelFormat = ANDROID_SCALER_AVAILABLE_FORMATS_BLOB}}}));
class VirtualCameraDeviceTest : public ::testing::Test {
public:
diff --git a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
index 1af8b80..5f313a0 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
@@ -37,6 +37,8 @@
namespace virtualcamera {
namespace {
+constexpr int kQvgaWidth = 320;
+constexpr int kQvgaHeight = 240;
constexpr int kVgaWidth = 640;
constexpr int kVgaHeight = 480;
constexpr int kSvgaWidth = 800;
@@ -104,32 +106,13 @@
MOCK_METHOD(ndk::ScopedAStatus, onStreamClosed, (int), (override));
};
-class VirtualCameraSessionTest : public ::testing::Test {
+class VirtualCameraSessionTestBase : public ::testing::Test {
public:
- void SetUp() override {
+ virtual void SetUp() override {
mMockCameraDeviceCallback =
ndk::SharedRefBase::make<MockCameraDeviceCallback>();
mMockVirtualCameraClientCallback =
ndk::SharedRefBase::make<MockVirtualCameraCallback>();
- mVirtualCameraDevice = ndk::SharedRefBase::make<VirtualCameraDevice>(
- kCameraId,
- VirtualCameraConfiguration{
- .supportedStreamConfigs = {SupportedStreamConfiguration{
- .width = kVgaWidth,
- .height = kVgaHeight,
- .pixelFormat = Format::YUV_420_888,
- .maxFps = kMaxFps},
- SupportedStreamConfiguration{
- .width = kSvgaWidth,
- .height = kSvgaHeight,
- .pixelFormat = Format::YUV_420_888,
- .maxFps = kMaxFps}},
- .virtualCameraCallback = nullptr,
- .sensorOrientation = SensorOrientation::ORIENTATION_0,
- .lensFacing = LensFacing::FRONT});
- mVirtualCameraSession = ndk::SharedRefBase::make<VirtualCameraSession>(
- mVirtualCameraDevice, mMockCameraDeviceCallback,
- mMockVirtualCameraClientCallback);
// Explicitly defining default actions below to prevent gmock from
// default-constructing ndk::ScopedAStatus, because default-constructed
@@ -155,6 +138,35 @@
protected:
std::shared_ptr<MockCameraDeviceCallback> mMockCameraDeviceCallback;
std::shared_ptr<MockVirtualCameraCallback> mMockVirtualCameraClientCallback;
+};
+
+class VirtualCameraSessionTest : public VirtualCameraSessionTestBase {
+ public:
+ void SetUp() override {
+ VirtualCameraSessionTestBase::SetUp();
+
+ mVirtualCameraDevice = ndk::SharedRefBase::make<VirtualCameraDevice>(
+ kCameraId,
+ VirtualCameraConfiguration{
+ .supportedStreamConfigs = {SupportedStreamConfiguration{
+ .width = kVgaWidth,
+ .height = kVgaHeight,
+ .pixelFormat = Format::YUV_420_888,
+ .maxFps = kMaxFps},
+ SupportedStreamConfiguration{
+ .width = kSvgaWidth,
+ .height = kSvgaHeight,
+ .pixelFormat = Format::YUV_420_888,
+ .maxFps = kMaxFps}},
+ .virtualCameraCallback = mMockVirtualCameraClientCallback,
+ .sensorOrientation = SensorOrientation::ORIENTATION_0,
+ .lensFacing = LensFacing::FRONT});
+ mVirtualCameraSession = ndk::SharedRefBase::make<VirtualCameraSession>(
+ mVirtualCameraDevice, mMockCameraDeviceCallback,
+ mMockVirtualCameraClientCallback);
+ }
+
+ protected:
std::shared_ptr<VirtualCameraDevice> mVirtualCameraDevice;
std::shared_ptr<VirtualCameraSession> mVirtualCameraSession;
};
@@ -272,6 +284,97 @@
Eq(static_cast<int32_t>(Status::ILLEGAL_ARGUMENT)));
}
+TEST_F(VirtualCameraSessionTest, ConfigureWithDifferentAspectRatioFails) {
+ StreamConfiguration streamConfiguration;
+ streamConfiguration.streams = {
+ createStream(kStreamId, kVgaWidth, kVgaHeight, PixelFormat::YCBCR_420_888),
+ createStream(kSecondStreamId, kVgaHeight, kVgaWidth,
+ PixelFormat::YCBCR_420_888)};
+
+ std::vector<HalStream> halStreams;
+
+ // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+ EXPECT_THAT(
+ mVirtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+ .getServiceSpecificError(),
+ Eq(static_cast<int32_t>(Status::ILLEGAL_ARGUMENT)));
+}
+
+class VirtualCameraSessionInputChoiceTest : public VirtualCameraSessionTestBase {
+ public:
+ std::shared_ptr<VirtualCameraSession> createSession(
+ const std::vector<SupportedStreamConfiguration>& supportedInputConfigs) {
+ mVirtualCameraDevice = ndk::SharedRefBase::make<VirtualCameraDevice>(
+ kCameraId, VirtualCameraConfiguration{
+ .supportedStreamConfigs = supportedInputConfigs,
+ .virtualCameraCallback = mMockVirtualCameraClientCallback,
+ .sensorOrientation = SensorOrientation::ORIENTATION_0,
+ .lensFacing = LensFacing::FRONT});
+ return ndk::SharedRefBase::make<VirtualCameraSession>(
+ mVirtualCameraDevice, mMockCameraDeviceCallback,
+ mMockVirtualCameraClientCallback);
+ }
+
+ protected:
+ std::shared_ptr<VirtualCameraDevice> mVirtualCameraDevice;
+};
+
+TEST_F(VirtualCameraSessionInputChoiceTest,
+ configureChoosesCorrectInputStreamForDownsampledOutput) {
+ // Create camera configured to support SVGA YUV input and RGB QVGA input.
+ auto virtualCameraSession = createSession(
+ {SupportedStreamConfiguration{.width = kSvgaWidth,
+ .height = kSvgaHeight,
+ .pixelFormat = Format::YUV_420_888,
+ .maxFps = kMaxFps},
+ SupportedStreamConfiguration{.width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat = Format::RGBA_8888,
+ .maxFps = kMaxFps}});
+
+ // Configure VGA stream. Expect SVGA input to be chosen to downscale from.
+ StreamConfiguration streamConfiguration;
+ streamConfiguration.streams = {createStream(
+ kStreamId, kVgaWidth, kVgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
+ std::vector<HalStream> halStreams;
+
+ // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+ EXPECT_CALL(*mMockVirtualCameraClientCallback,
+ onStreamConfigured(kStreamId, _, kSvgaWidth, kSvgaHeight,
+ Format::YUV_420_888));
+ EXPECT_TRUE(
+ virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+ .isOk());
+}
+
+TEST_F(VirtualCameraSessionInputChoiceTest,
+ configureChoosesCorrectInputStreamForMatchingResolution) {
+ // Create camera configured to support SVGA YUV input and RGB QVGA input.
+ auto virtualCameraSession = createSession(
+ {SupportedStreamConfiguration{.width = kSvgaWidth,
+ .height = kSvgaHeight,
+ .pixelFormat = Format::YUV_420_888,
+ .maxFps = kMaxFps},
+ SupportedStreamConfiguration{.width = kQvgaWidth,
+ .height = kQvgaHeight,
+ .pixelFormat = Format::RGBA_8888,
+ .maxFps = kMaxFps}});
+
+ // Configure VGA stream. Expect SVGA input to be chosen to downscale from.
+ StreamConfiguration streamConfiguration;
+ streamConfiguration.streams = {createStream(
+ kStreamId, kQvgaWidth, kQvgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
+ std::vector<HalStream> halStreams;
+
+ // Expect configuration attempt returns CAMERA_DISCONNECTED service specific code.
+ EXPECT_CALL(*mMockVirtualCameraClientCallback,
+ onStreamConfigured(kStreamId, _, kQvgaWidth, kQvgaHeight,
+ Format::RGBA_8888));
+ EXPECT_TRUE(
+ virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+ .isOk());
+}
+
} // namespace
} // namespace virtualcamera
} // namespace companion
diff --git a/services/camera/virtualcamera/util/JpegUtil.cc b/services/camera/virtualcamera/util/JpegUtil.cc
index 98f2448..8569eff 100644
--- a/services/camera/virtualcamera/util/JpegUtil.cc
+++ b/services/camera/virtualcamera/util/JpegUtil.cc
@@ -153,18 +153,6 @@
return compress(yLines, cbLines, crLines);
}
- std::optional<size_t> compressBlackImage() {
- // We only really need to prepare one scanline for Y and one shared scanline
- // for Cb & Cr.
- std::vector<uint8_t> yLine(mWidth, 0);
- std::vector<uint8_t> chromaLine(mWidth / 2, 0xff / 2);
-
- std::vector<JSAMPROW> yLines(mHeight, yLine.data());
- std::vector<JSAMPROW> cLines(mHeight / 2, chromaLine.data());
-
- return compress(yLines, cLines, cLines);
- }
-
private:
void setSuccess(const boolean success) {
mSuccess = success;
@@ -279,17 +267,6 @@
return context.compress(ycbcr);
}
-std::optional<size_t> compressBlackJpeg(const int width, const int height,
- const int quality,
- const std::vector<uint8_t>& app1ExifData,
- size_t outBufferSize, void* outBuffer) {
- LibJpegContext context(width, height, quality, outBufferSize, outBuffer);
- if (!app1ExifData.empty()) {
- context.setApp1Data(app1ExifData.data(), app1ExifData.size());
- }
- return context.compressBlackImage();
-}
-
} // namespace virtualcamera
} // namespace companion
} // namespace android
diff --git a/services/camera/virtualcamera/util/JpegUtil.h b/services/camera/virtualcamera/util/JpegUtil.h
index e64fb4f..83ed74b 100644
--- a/services/camera/virtualcamera/util/JpegUtil.h
+++ b/services/camera/virtualcamera/util/JpegUtil.h
@@ -17,10 +17,8 @@
#ifndef ANDROID_COMPANION_VIRTUALCAMERA_JPEGUTIL_H
#define ANDROID_COMPANION_VIRTUALCAMERA_JPEGUTIL_H
-#include <memory>
#include <optional>
-#include "android/hardware_buffer.h"
#include "system/graphics.h"
namespace android {
@@ -43,20 +41,6 @@
const std::vector<uint8_t>& app1ExifData,
size_t outBufferSize, void* outBuffer);
-// Jpeg-compress all-black image into the output buffer.
-// * width - width of the image
-// * heigh - height of the image
-// * quality - 0-100, higher number corresponds to higher quality.
-// * app1ExifData - vector containing data to be included in APP1
-// segment. Can be empty.
-// * outBufferSize - capacity of the output buffer.
-// * outBuffer - output buffer to write compressed data into.
-// Returns size of compressed data if the compression was successful,
-// empty optional otherwise.
-std::optional<size_t> compressBlackJpeg(int width, int height, int quality,
- const std::vector<uint8_t>& app1ExifData,
- size_t outBufferSize, void* outBuffer);
-
} // namespace virtualcamera
} // namespace companion
} // namespace android
diff --git a/services/camera/virtualcamera/util/MetadataUtil.cc b/services/camera/virtualcamera/util/MetadataUtil.cc
index e3d9e28..119260f 100644
--- a/services/camera/virtualcamera/util/MetadataUtil.cc
+++ b/services/camera/virtualcamera/util/MetadataUtil.cc
@@ -23,6 +23,7 @@
#include <cstdint>
#include <iterator>
#include <memory>
+#include <optional>
#include <utility>
#include <variant>
#include <vector>
@@ -59,7 +60,7 @@
} // namespace
MetadataBuilder& MetadataBuilder::setSupportedHardwareLevel(
- camera_metadata_enum_android_info_supported_hardware_level_t hwLevel) {
+ const camera_metadata_enum_android_info_supported_hardware_level_t hwLevel) {
mEntryMap[ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL] =
asVectorOf<uint8_t>(hwLevel);
return *this;
@@ -86,7 +87,7 @@
}
MetadataBuilder& MetadataBuilder::setLensFacing(
- camera_metadata_enum_android_lens_facing lensFacing) {
+ const camera_metadata_enum_android_lens_facing lensFacing) {
mEntryMap[ANDROID_LENS_FACING] = asVectorOf<uint8_t>(lensFacing);
return *this;
}
@@ -175,6 +176,12 @@
return *this;
}
+MetadataBuilder& MetadataBuilder::setControlSceneMode(
+ const camera_metadata_enum_android_control_scene_mode sceneMode) {
+ mEntryMap[ANDROID_CONTROL_SCENE_MODE] = asVectorOf<uint8_t>(sceneMode);
+ return *this;
+}
+
MetadataBuilder& MetadataBuilder::setControlAvailableEffects(
const std::vector<camera_metadata_enum_android_control_effect_mode>&
availableEffects) {
@@ -198,6 +205,14 @@
return *this;
}
+MetadataBuilder& MetadataBuilder::setControlVideoStabilizationMode(
+ const camera_metadata_enum_android_control_video_stabilization_mode
+ stabilizationMode) {
+ mEntryMap[ANDROID_CONTROL_VIDEO_STABILIZATION_MODE] =
+ asVectorOf<uint8_t>(stabilizationMode);
+ return *this;
+}
+
MetadataBuilder& MetadataBuilder::setControlAfAvailableModes(
const std::vector<camera_metadata_enum_android_control_af_mode_t>&
availableModes) {
@@ -212,6 +227,12 @@
return *this;
}
+MetadataBuilder& MetadataBuilder::setControlAfState(
+ const camera_metadata_enum_android_control_af_state afState) {
+ mEntryMap[ANDROID_CONTROL_AF_STATE] = asVectorOf<uint8_t>(afState);
+ return *this;
+}
+
// See ANDROID_CONTROL_AF_TRIGGER_MODE in CameraMetadataTag.aidl.
MetadataBuilder& MetadataBuilder::setControlAfTrigger(
const camera_metadata_enum_android_control_af_trigger_t trigger) {
@@ -232,14 +253,14 @@
}
MetadataBuilder& MetadataBuilder::setControlAeTargetFpsRange(
- const int32_t minFps, const int32_t maxFps) {
+ const FpsRange fpsRange) {
mEntryMap[ANDROID_CONTROL_AE_TARGET_FPS_RANGE] =
- std::vector<int32_t>({minFps, maxFps});
+ std::vector<int32_t>({fpsRange.minFps, fpsRange.maxFps});
return *this;
}
MetadataBuilder& MetadataBuilder::setControlAeMode(
- camera_metadata_enum_android_control_ae_mode_t mode) {
+ const camera_metadata_enum_android_control_ae_mode_t mode) {
mEntryMap[ANDROID_CONTROL_AE_MODE] = asVectorOf<uint8_t>(mode);
return *this;
}
@@ -277,6 +298,12 @@
return *this;
}
+MetadataBuilder& MetadataBuilder::setControlAwbState(
+ const camera_metadata_enum_android_control_awb_state awbState) {
+ mEntryMap[ANDROID_CONTROL_AWB_STATE] = asVectorOf<uint8_t>(awbState);
+ return *this;
+}
+
MetadataBuilder& MetadataBuilder::setControlAwbLockAvailable(
const bool awbLockAvailable) {
const uint8_t lockAvailable = awbLockAvailable
@@ -287,6 +314,12 @@
return *this;
}
+MetadataBuilder& MetadataBuilder::setControlAwbLock(
+ const camera_metadata_enum_android_control_awb_lock awbLock) {
+ mEntryMap[ANDROID_CONTROL_AWB_LOCK] = asVectorOf<uint8_t>(awbLock);
+ return *this;
+}
+
MetadataBuilder& MetadataBuilder::setControlAeAvailableAntibandingModes(
const std::vector<camera_metadata_enum_android_control_ae_antibanding_mode_t>&
antibandingModes) {
@@ -313,6 +346,12 @@
return *this;
}
+MetadataBuilder& MetadataBuilder::setControlAeLock(
+ const camera_metadata_enum_android_control_ae_lock aeLock) {
+ mEntryMap[ANDROID_CONTROL_AE_LOCK] = asVectorOf<uint8_t>(aeLock);
+ return *this;
+}
+
MetadataBuilder& MetadataBuilder::setControlAeRegions(
const std::vector<ControlRegion>& aeRegions) {
std::vector<int32_t> regions;
@@ -421,7 +460,7 @@
}
MetadataBuilder& MetadataBuilder::setSyncMaxLatency(
- camera_metadata_enum_android_sync_max_latency latency) {
+ const camera_metadata_enum_android_sync_max_latency latency) {
mEntryMap[ANDROID_SYNC_MAX_LATENCY] = asVectorOf<int32_t>(latency);
return *this;
}
@@ -506,7 +545,7 @@
}
MetadataBuilder& MetadataBuilder::setNoiseReductionMode(
- camera_metadata_enum_android_noise_reduction_mode noiseReductionMode) {
+ const camera_metadata_enum_android_noise_reduction_mode noiseReductionMode) {
mEntryMap[ANDROID_NOISE_REDUCTION_MODE] =
asVectorOf<uint8_t>(noiseReductionMode);
return *this;
@@ -585,6 +624,43 @@
return *this;
}
+MetadataBuilder& MetadataBuilder::setControlAeState(
+ const camera_metadata_enum_android_control_ae_state aeState) {
+ mEntryMap[ANDROID_CONTROL_AE_STATE] = asVectorOf<uint8_t>(aeState);
+ return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setStatisticsSceneFlicker(
+ const camera_metadata_enum_android_statistics_scene_flicker sceneFlicker) {
+ mEntryMap[ANDROID_STATISTICS_SCENE_FLICKER] =
+ asVectorOf<uint8_t>(sceneFlicker);
+ return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setStatisticsHotPixelMapMode(
+ const camera_metadata_enum_android_statistics_hot_pixel_map_mode
+ hotPixelMapMode) {
+ mEntryMap[ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE] =
+ asVectorOf<uint8_t>(hotPixelMapMode);
+ return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setStatisticsLensShadingMapMode(
+ const camera_metadata_enum_android_statistics_lens_shading_map_mode
+ lensShadingMapMode) {
+ mEntryMap[ANDROID_STATISTICS_LENS_SHADING_MAP_MODE] =
+ asVectorOf<uint8_t>(lensShadingMapMode);
+ return *this;
+}
+
+MetadataBuilder& MetadataBuilder::setLensOpticalStabilizationMode(
+ const camera_metadata_enum_android_lens_optical_stabilization_mode_t
+ opticalStabilizationMode) {
+ mEntryMap[ANDROID_LENS_OPTICAL_STABILIZATION_MODE] =
+ asVectorOf<uint8_t>(opticalStabilizationMode);
+ return *this;
+}
+
MetadataBuilder& MetadataBuilder::setAvailableRequestKeys(
const std::vector<int32_t>& keys) {
mEntryMap[ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS] = keys;
@@ -724,6 +800,38 @@
return thumbnailSizes;
}
+std::optional<FpsRange> getFpsRange(
+ const aidl::android::hardware::camera::device::CameraMetadata& cameraMetadata) {
+ auto metadata =
+ reinterpret_cast<const camera_metadata_t*>(cameraMetadata.metadata.data());
+
+ camera_metadata_ro_entry_t entry;
+ if (find_camera_metadata_ro_entry(
+ metadata, ANDROID_CONTROL_AE_TARGET_FPS_RANGE, &entry) != OK ||
+ entry.count != 2) {
+ return {};
+ }
+
+ FpsRange range{.minFps = entry.data.i32[0], .maxFps = entry.data.i32[1]};
+ return range;
+}
+
+std::optional<camera_metadata_enum_android_control_capture_intent>
+getCaptureIntent(const aidl::android::hardware::camera::device::CameraMetadata&
+ cameraMetadata) {
+ auto metadata =
+ reinterpret_cast<const camera_metadata_t*>(cameraMetadata.metadata.data());
+
+ camera_metadata_ro_entry_t entry;
+ if (find_camera_metadata_ro_entry(metadata, ANDROID_CONTROL_CAPTURE_INTENT,
+ &entry) != OK) {
+ return {};
+ }
+
+ return static_cast<camera_metadata_enum_android_control_capture_intent>(
+ entry.data.u8[0]);
+}
+
} // namespace virtualcamera
} // namespace companion
} // namespace android
diff --git a/services/camera/virtualcamera/util/MetadataUtil.h b/services/camera/virtualcamera/util/MetadataUtil.h
index b4d60cb..f6848cd 100644
--- a/services/camera/virtualcamera/util/MetadataUtil.h
+++ b/services/camera/virtualcamera/util/MetadataUtil.h
@@ -59,16 +59,6 @@
int32_t weight = 0;
};
- struct FpsRange {
- int32_t minFps;
- int32_t maxFps;
-
- bool operator<(const FpsRange& other) const {
- return maxFps == other.maxFps ? minFps < other.minFps
- : maxFps < other.maxFps;
- }
- };
-
MetadataBuilder() = default;
~MetadataBuilder() = default;
@@ -193,6 +183,10 @@
const std::vector<camera_metadata_enum_android_control_scene_mode>&
availableSceneModes);
+ // See ANDROID_CONTROL_SCENE_MODE in CameraMetadataTag.aidl
+ MetadataBuilder& setControlSceneMode(
+ camera_metadata_enum_android_control_scene_mode sceneMode);
+
// See ANDROID_CONTROL_AVAILABLE_EFFECTS in CameraMetadataTag.aidl.
MetadataBuilder& setControlAvailableEffects(
const std::vector<camera_metadata_enum_android_control_effect_mode>&
@@ -202,12 +196,17 @@
MetadataBuilder& setControlEffectMode(
camera_metadata_enum_android_control_effect_mode_t effectMode);
- // See ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES
+ // See ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES in CameraMetadataTag.aidl.
MetadataBuilder& setControlAvailableVideoStabilizationModes(
const std::vector<
camera_metadata_enum_android_control_video_stabilization_mode_t>&
videoStabilizationModes);
+ // See ANDROID_CONTROL_VIDEO_STABILIZATION_MODE in CameraMetadataTag.aidl.
+ MetadataBuilder& setControlVideoStabilizationMode(
+ camera_metadata_enum_android_control_video_stabilization_mode
+ stabilizationMode);
+
// See CONTROL_AE_AVAILABLE_ANTIBANDING_MODES in CameraCharacteristics.java.
MetadataBuilder& setControlAeAvailableAntibandingModes(
const std::vector<camera_metadata_enum_android_control_ae_antibanding_mode_t>&
@@ -256,7 +255,7 @@
const std::vector<FpsRange>& fpsRanges);
// See ANDROID_CONTROL_AE_TARGET_FPS_RANGE in CaptureRequest.java.
- MetadataBuilder& setControlAeTargetFpsRange(int32_t min, int32_t max);
+ MetadataBuilder& setControlAeTargetFpsRange(FpsRange fpsRange);
// See ANDROID_CONTROL_CAPTURE_INTENT in CameraMetadataTag.aidl.
MetadataBuilder& setControlCaptureIntent(
@@ -278,9 +277,21 @@
// See CONTROL_AWB_LOCK_AVAILABLE in CameraMetadataTag.aidl.
MetadataBuilder& setControlAwbLockAvailable(bool awbLockAvailable);
+ // See CONTROL_AWB_LOCK in CameraMetadataTag.aidl
+ MetadataBuilder& setControlAwbLock(
+ camera_metadata_enum_android_control_awb_lock awbLock);
+
// See CONTROL_AE_LOCK_AVAILABLE in CameraMetadataTag.aidl.
MetadataBuilder& setControlAeLockAvailable(bool aeLockAvailable);
+ // See CONTROL_AE_LOCK in CameraMetadataTag.aidl.
+ MetadataBuilder& setControlAeLock(
+ camera_metadata_enum_android_control_ae_lock aeLock);
+
+ // See CONTROL_AE_STATE in CameraMetadataTag.aidl
+ MetadataBuilder& setControlAeState(
+ camera_metadata_enum_android_control_ae_state aeState);
+
// See ANDROID_CONTROL_AE_REGIONS in CameraMetadataTag.aidl.
MetadataBuilder& setControlAeRegions(
const std::vector<ControlRegion>& aeRegions);
@@ -289,6 +300,10 @@
MetadataBuilder& setControlAwbRegions(
const std::vector<ControlRegion>& awbRegions);
+ // See ANDROID_CONTROL_AWB_STATE in CameraMetadataTag.aidl.
+ MetadataBuilder& setControlAwbState(
+ camera_metadata_enum_android_control_awb_state awbState);
+
// See ANDROID_SCALER_CROP_REGION in CaptureRequest.java.
MetadataBuilder& setCropRegion(int32_t x, int32_t y, int32_t width,
int32_t height);
@@ -297,6 +312,10 @@
MetadataBuilder& setControlAfRegions(
const std::vector<ControlRegion>& afRegions);
+ // See ANDROID_CONTROL_AF_STATE in CameraMetadataTag.aidl.
+ MetadataBuilder& setControlAfState(
+ camera_metadata_enum_android_control_af_state aeftate);
+
// The size of the compressed JPEG image, in bytes.
//
// See ANDROID_JPEG_SIZE in CameraMetadataTag.aidl.
@@ -342,6 +361,24 @@
// See ANDROID_CONTROL_ZOOM_RATIO_RANGE in CameraMetadataTag.aidl.
MetadataBuilder& setControlZoomRatioRange(float min, float max);
+ // See ANDROID_STATISTICS_SCENE_FLICKER in CameraMetadataTag.aidl.
+ MetadataBuilder& setStatisticsSceneFlicker(
+ camera_metadata_enum_android_statistics_scene_flicker sceneFlicker);
+
+ // See ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE in CameraMetadataTag.aidl.
+ MetadataBuilder& setStatisticsHotPixelMapMode(
+ camera_metadata_enum_android_statistics_hot_pixel_map_mode mode);
+
+ // See ANDROID_STATISTICS_LENS_SHADING_MAP_MODE in CameraMetadataTag.aidl.
+ MetadataBuilder& setStatisticsLensShadingMapMode(
+ camera_metadata_enum_android_statistics_lens_shading_map_mode
+ lensShadingMapMode);
+
+ // See ANDROID_LENS_OPTICAL_STABILIZATION_MODE in CameraMetadataTag.aidl.
+ MetadataBuilder& setLensOpticalStabilizationMode(
+ camera_metadata_enum_android_lens_optical_stabilization_mode_t
+ opticalStabilizationMode);
+
// See ANDROID_REQUEST_AVAILABLE_CAPABILITIES in CameraMetadataTag.aidl.
MetadataBuilder& setAvailableRequestCapabilities(
const std::vector<
@@ -410,6 +447,12 @@
std::vector<Resolution> getJpegAvailableThumbnailSizes(
const aidl::android::hardware::camera::device::CameraMetadata& metadata);
+std::optional<FpsRange> getFpsRange(
+ const aidl::android::hardware::camera::device::CameraMetadata& metadata);
+
+std::optional<camera_metadata_enum_android_control_capture_intent> getCaptureIntent(
+ const aidl::android::hardware::camera::device::CameraMetadata& metadata);
+
} // namespace virtualcamera
} // namespace companion
} // namespace android
diff --git a/services/camera/virtualcamera/util/Util.h b/services/camera/virtualcamera/util/Util.h
index d5b0b1f..f08eb1c 100644
--- a/services/camera/virtualcamera/util/Util.h
+++ b/services/camera/virtualcamera/util/Util.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_COMPANION_VIRTUALCAMERA_UTIL_H
#define ANDROID_COMPANION_VIRTUALCAMERA_UTIL_H
+#include <cmath>
#include <cstdint>
#include <memory>
@@ -129,6 +130,10 @@
: pixCount < otherPixCount;
}
+ bool operator<=(const Resolution& other) const {
+ return *this == other || *this < other;
+ }
+
bool operator==(const Resolution& other) const {
return width == other.width && height == other.height;
}
@@ -137,6 +142,27 @@
int height = 0;
};
+struct FpsRange {
+ int32_t minFps;
+ int32_t maxFps;
+
+ bool operator<(const FpsRange& other) const {
+ return maxFps == other.maxFps ? minFps < other.minFps
+ : maxFps < other.maxFps;
+ }
+};
+
+inline bool isApproximatellySameAspectRatio(const Resolution r1,
+ const Resolution r2) {
+ static constexpr float kAspectRatioEpsilon = 0.05;
+ float aspectRatio1 =
+ static_cast<float>(r1.width) / static_cast<float>(r1.height);
+ float aspectRatio2 =
+ static_cast<float>(r2.width) / static_cast<float>(r2.height);
+
+ return std::abs(aspectRatio1 - aspectRatio2) < kAspectRatioEpsilon;
+}
+
std::ostream& operator<<(std::ostream& os, const Resolution& resolution);
} // namespace virtualcamera