Merge "Add dependency on libdl_android"
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index 09b85d5..691996b 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -287,3 +287,16 @@
}
return device->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
+
+EXPORT
+camera_status_t ACameraDevice_isSessionConfigurationSupported(
+ const ACameraDevice* device,
+ const ACaptureSessionOutputContainer* sessionOutputContainer) {
+ ATRACE_CALL();
+ if (device == nullptr || sessionOutputContainer == nullptr) {
+ ALOGE("%s: Error: invalid input: device %p, sessionOutputContainer %p",
+ __FUNCTION__, device, sessionOutputContainer);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return device->isSessionConfigurationSupported(sessionOutputContainer);
+}
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 5e4fcd0..c9db01e 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -227,6 +227,55 @@
return ACAMERA_OK;
}
+camera_status_t CameraDevice::isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ Mutex::Autolock _l(mDeviceLock);
+ camera_status_t ret = checkCameraClosedOrErrorLocked();
+ if (ret != ACAMERA_OK) {
+ return ret;
+ }
+
+ SessionConfiguration sessionConfiguration(0 /*inputWidth*/, 0 /*inputHeight*/,
+ -1 /*inputFormat*/, CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
+ for (const auto& output : sessionOutputContainer->mOutputs) {
+ sp<IGraphicBufferProducer> iGBP(nullptr);
+ ret = getIGBPfromAnw(output.mWindow, iGBP);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera device %s failed to extract graphic producer from native window",
+ getId());
+ return ret;
+ }
+
+ String16 physicalId16(output.mPhysicalCameraId.c_str());
+ OutputConfiguration outConfig(iGBP, output.mRotation, physicalId16,
+ OutputConfiguration::INVALID_SET_ID, true);
+
+ for (auto& anw : output.mSharedWindows) {
+ ret = getIGBPfromAnw(anw, iGBP);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera device %s failed to extract graphic producer from native window",
+ getId());
+ return ret;
+ }
+ outConfig.addGraphicProducer(iGBP);
+ }
+
+ sessionConfiguration.addOutputConfiguration(outConfig);
+ }
+
+ bool supported = false;
+ binder::Status remoteRet = mRemote->isSessionConfigurationSupported(
+ sessionConfiguration, &supported);
+ if (remoteRet.serviceSpecificErrorCode() ==
+ hardware::ICameraService::ERROR_INVALID_OPERATION) {
+ return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
+ } else if (!remoteRet.isOk()) {
+ return ACAMERA_ERROR_UNKNOWN;
+ } else {
+ return supported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
+ }
+}
+
camera_status_t CameraDevice::updateOutputConfigurationLocked(ACaptureSessionOutput *output) {
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 103efd5..56741ce 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -35,6 +35,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <camera/CaptureResult.h>
#include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SessionConfiguration.h>
#include <camera/camera2/CaptureRequest.h>
#include <camera/NdkCameraManager.h>
@@ -77,6 +78,9 @@
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const;
+
// Callbacks from camera service
class ServiceCallback : public hardware::camera2::BnCameraDeviceCallbacks {
public:
@@ -369,6 +373,11 @@
return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
+ }
+
/***********************
* Device interal APIs *
***********************/
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index cedf83a..bc544e3 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -845,6 +845,43 @@
const ACameraIdList* physicalIdList,
/*out*/ACaptureRequest** request) __INTRODUCED_IN(29);
+/**
+ * Check whether a particular {@ACaptureSessionOutputContainer} is supported by
+ * the camera device.
+ *
+ * <p>This method performs a runtime check of a given {@link
+ * ACaptureSessionOutputContainer}. The result confirms whether or not the
+ * passed CaptureSession outputs can be successfully used to create a camera
+ * capture session using {@link ACameraDevice_createCaptureSession}.</p>
+ *
+ * <p>This method can be called at any point before, during and after active
+ * capture session. It must not impact normal camera behavior in any way and
+ * must complete significantly faster than creating a capture session.</p>
+ *
+ * <p>Although this method is faster than creating a new capture session, it is not intended
+ * to be used for exploring the entire space of supported stream combinations.</p>
+ *
+ * @param device the camera device of interest
+ * @param sessionOutputContainer the {@link ACaptureSessionOutputContainer} of
+ * interest.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the given {@link ACaptureSessionOutputContainer}
+ * is supported by the camera device.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device, or sessionOutputContainer
+ * is NULL.</li>
+ * <li>{@link ACAMERA_ERROR_STREAM_CONFIGURE_FAIL} if the given
+ * {@link ACaptureSessionOutputContainer}
+ * is not supported by
+ * the camera
+ * device.</li>
+ * <li>{@link ACAMERA_ERROR_UNSUPPORTED_OPERATION} if the query operation is not
+ * supported by the camera device.</li>
+ */
+camera_status_t ACameraDevice_isSessionConfigurationSupported(
+ const ACameraDevice* device,
+ const ACaptureSessionOutputContainer* sessionOutputContainer) __INTRODUCED_IN(29);
+
#endif /* __ANDROID_API__ >= 29 */
__END_DECLS
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index 6b58155..fc618ee 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -106,7 +106,8 @@
/**
* Camera device does not support the stream configuration provided by application in
- * {@link ACameraDevice_createCaptureSession}.
+ * {@link ACameraDevice_createCaptureSession} or {@link
+ * ACameraDevice_isSessionConfigurationSupported}.
*/
ACAMERA_ERROR_STREAM_CONFIGURE_FAIL = ACAMERA_ERROR_BASE - 9,
@@ -130,6 +131,11 @@
* The application does not have permission to open camera.
*/
ACAMERA_ERROR_PERMISSION_DENIED = ACAMERA_ERROR_BASE - 13,
+
+ /**
+ * The operation is not supported by the camera device.
+ */
+ ACAMERA_ERROR_UNSUPPORTED_OPERATION = ACAMERA_ERROR_BASE - 14,
} camera_status_t;
#endif /* __ANDROID_API__ >= 24 */
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index 946a98e..b6f1553 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -14,6 +14,7 @@
ACameraDevice_createCaptureRequest_withPhysicalIds; # introduced=29
ACameraDevice_createCaptureSession;
ACameraDevice_createCaptureSessionWithSessionParameters; # introduced=28
+ ACameraDevice_isSessionConfigurationSupported; # introduced=29
ACameraDevice_getId;
ACameraManager_create;
ACameraManager_delete;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index a38a31e..d7d774b 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -38,6 +38,7 @@
using HCameraMetadata = frameworks::cameraservice::device::V2_0::CameraMetadata;
using OutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
+using SessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using hardware::Void;
// Static member definitions
@@ -216,6 +217,47 @@
return ACAMERA_OK;
}
+camera_status_t CameraDevice::isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ Mutex::Autolock _l(mDeviceLock);
+ camera_status_t ret = checkCameraClosedOrErrorLocked();
+ if (ret != ACAMERA_OK) {
+ return ret;
+ }
+
+ SessionConfiguration sessionConfig;
+ sessionConfig.inputWidth = 0;
+ sessionConfig.inputHeight = 0;
+ sessionConfig.inputFormat = -1;
+ sessionConfig.operationMode = StreamConfigurationMode::NORMAL_MODE;
+ sessionConfig.outputStreams.resize(sessionOutputContainer->mOutputs.size());
+ size_t index = 0;
+ for (const auto& output : sessionOutputContainer->mOutputs) {
+ sessionConfig.outputStreams[index].rotation = utils::convertToHidl(output.mRotation);
+ sessionConfig.outputStreams[index].windowGroupId = -1;
+ sessionConfig.outputStreams[index].windowHandles.resize(output.mSharedWindows.size() + 1);
+ sessionConfig.outputStreams[index].windowHandles[0] = output.mWindow;
+ sessionConfig.outputStreams[index].physicalCameraId = output.mPhysicalCameraId;
+ index++;
+ }
+
+ bool configSupported = false;
+ Status status = Status::NO_ERROR;
+ auto remoteRet = mRemote->isSessionConfigurationSupported(sessionConfig,
+ [&status, &configSupported](auto s, auto supported) {
+ status = s;
+ configSupported = supported;
+ });
+
+ if (status == Status::INVALID_OPERATION) {
+ return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
+ } else if (!remoteRet.isOk()) {
+ return ACAMERA_ERROR_UNKNOWN;
+ } else {
+ return configSupported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
+ }
+}
+
void CameraDevice::addRequestSettingsMetadata(ACaptureRequest *aCaptureRequest,
sp<CaptureRequest> &req) {
CameraMetadata metadataCopy = aCaptureRequest->settings->getInternalData();
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 28092fd..47e6f56 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -101,6 +101,9 @@
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const;
+
// Callbacks from camera service
class ServiceCallback : public ICameraDeviceCallback {
public:
@@ -397,6 +400,11 @@
return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
+ }
+
/***********************
* Device interal APIs *
***********************/
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 2398922..c51f93b 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -121,6 +121,12 @@
cameraIdList.numCameras = idPointerList.size();
cameraIdList.cameraIds = idPointerList.data();
+ ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
+ if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
+ ALOGE("ACameraDevice_isSessionConfigurationSupported failed, ret=%d", ret);
+ return ret;
+ }
+
ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
if (ret != AMEDIA_OK) {
ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index 78a444b..f10835f 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -36,13 +36,18 @@
ldflags: ["-Wl,-Bsymbolic"],
}
+filegroup {
+ name: "codec2_soft_exports",
+ srcs: [ "exports.lds" ],
+}
+
// public dependency for software codec implementation
// to be used by code under media/codecs/* only as its stability is not guaranteed
cc_defaults {
name: "libcodec2_soft-defaults",
defaults: ["libcodec2-impl-defaults"],
vendor_available: true,
-
+ version_script: ":codec2_soft_exports",
export_shared_lib_headers: [
"libsfplugin_ccodec_utils",
],
diff --git a/media/codec2/components/base/exports.lds b/media/codec2/components/base/exports.lds
new file mode 100644
index 0000000..641bae8
--- /dev/null
+++ b/media/codec2/components/base/exports.lds
@@ -0,0 +1,7 @@
+{
+ global:
+ CreateCodec2Factory;
+ DestroyCodec2Factory;
+ local: *;
+};
+
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 7045b6a..402d9aa 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -94,6 +94,20 @@
// matches limits in codec library
addParameter(
+ DefineParam(mBitrateMode, C2_PARAMKEY_BITRATE_MODE)
+ .withDefault(new C2StreamBitrateModeTuning::output(
+ 0u, C2Config::BITRATE_VARIABLE))
+ .withFields({
+ C2F(mBitrateMode, value).oneOf({
+ C2Config::BITRATE_CONST,
+ C2Config::BITRATE_VARIABLE,
+ C2Config::BITRATE_IGNORE})
+ })
+ .withSetter(
+ Setter<decltype(*mBitrateMode)>::StrictValueWithNoDeps)
+ .build());
+
+ addParameter(
DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
.withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
@@ -102,6 +116,20 @@
// matches levels allowed within codec library
addParameter(
+ DefineParam(mComplexity, C2_PARAMKEY_COMPLEXITY)
+ .withDefault(new C2StreamComplexityTuning::output(0u, 0))
+ .withFields({C2F(mComplexity, value).inRange(0, 10)})
+ .withSetter(Setter<decltype(*mComplexity)>::NonStrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mQuality, C2_PARAMKEY_QUALITY)
+ .withDefault(new C2StreamQualityTuning::output(0u, 80))
+ .withFields({C2F(mQuality, value).inRange(0, 100)})
+ .withSetter(Setter<decltype(*mQuality)>::NonStrictValueWithNoDeps)
+ .build());
+
+ addParameter(
DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
.withDefault(new C2StreamProfileLevelInfo::output(
0u, PROFILE_HEVC_MAIN, LEVEL_HEVC_MAIN_1))
@@ -287,12 +315,21 @@
std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const {
return mFrameRate;
}
+ std::shared_ptr<C2StreamBitrateModeTuning::output> getBitrateMode_l() const {
+ return mBitrateMode;
+ }
std::shared_ptr<C2StreamBitrateInfo::output> getBitrate_l() const {
return mBitrate;
}
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const {
return mRequestSync;
}
+ std::shared_ptr<C2StreamComplexityTuning::output> getComplexity_l() const {
+ return mComplexity;
+ }
+ std::shared_ptr<C2StreamQualityTuning::output> getQuality_l() const {
+ return mQuality;
+ }
private:
std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
@@ -304,6 +341,9 @@
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
+ std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
+ std::shared_ptr<C2StreamQualityTuning::output> mQuality;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
@@ -387,6 +427,19 @@
work->workletsProcessed = 1u;
}
+static int getQpFromQuality(int quality) {
+ int qp;
+#define MIN_QP 4
+#define MAX_QP 50
+ /* Quality: 100 -> Qp : MIN_QP
+ * Quality: 0 -> Qp : MAX_QP
+ * Qp = ((MIN_QP - MAX_QP) * quality / 100) + MAX_QP;
+ */
+ qp = ((MIN_QP - MAX_QP) * quality / 100) + MAX_QP;
+ qp = std::min(qp, MAX_QP);
+ qp = std::max(qp, MIN_QP);
+ return qp;
+}
c2_status_t C2SoftHevcEnc::initEncParams() {
mCodecCtx = nullptr;
mNumCores = std::min(GetCPUCoreCount(), (size_t) CODEC_MAX_CORES);
@@ -416,9 +469,37 @@
mIvVideoColorFormat = IV_YUV_420P;
mEncParams.s_multi_thrd_prms.i4_max_num_cores = mNumCores;
mEncParams.s_out_strm_prms.i4_codec_profile = mHevcEncProfile;
- mEncParams.s_config_prms.i4_rate_control_mode = 2;
mEncParams.s_lap_prms.i4_rc_look_ahead_pics = 0;
+ switch (mBitrateMode->value) {
+ case C2Config::BITRATE_IGNORE:
+ mEncParams.s_config_prms.i4_rate_control_mode = 3;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_frame_qp[0] =
+ getQpFromQuality(mQuality->value);
+ break;
+ case C2Config::BITRATE_CONST:
+ mEncParams.s_config_prms.i4_rate_control_mode = 5;
+ break;
+ case C2Config::BITRATE_VARIABLE:
+ [[fallthrough]];
+ default:
+ mEncParams.s_config_prms.i4_rate_control_mode = 2;
+ break;
+ break;
+ }
+
+ if (mComplexity->value == 10) {
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P0;
+ } else if (mComplexity->value >= 8) {
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P2;
+ } else if (mComplexity->value >= 7) {
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P3;
+ } else if (mComplexity->value >= 5) {
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P4;
+ } else {
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P5;
+ }
+
return C2_OK;
}
@@ -447,11 +528,14 @@
{
IntfImpl::Lock lock = mIntf->lock();
mSize = mIntf->getSize_l();
+ mBitrateMode = mIntf->getBitrateMode_l();
mBitrate = mIntf->getBitrate_l();
mFrameRate = mIntf->getFrameRate_l();
mHevcEncProfile = mIntf->getProfile_l();
mHevcEncLevel = mIntf->getLevel_l();
mIDRInterval = mIntf->getSyncFramePeriod_l();
+ mComplexity = mIntf->getComplexity_l();
+ mQuality = mIntf->getQuality_l();
}
c2_status_t status = initEncParams();
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
index 9d90b95..8569a3e 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.h
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -77,6 +77,9 @@
std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
+ std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
+ std::shared_ptr<C2StreamQualityTuning::output> mQuality;
#ifdef FILE_DUMP_ENABLE
char mInFile[200];
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index 0bb2418..97dde71 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -33,6 +33,7 @@
"android.hardware.media.bufferpool@2.0",
"android.hardware.media.c2@1.0",
"android.hardware.media.omx@1.0",
+ "android.hidl.safe_union@1.0",
"libbase",
"libcodec2",
"libcodec2_vndk",
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
index b9f3aa8..817d148 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
@@ -22,6 +22,7 @@
#include <android/hardware/media/bufferpool/2.0/types.h>
#include <android/hardware/media/c2/1.0/IComponentStore.h>
#include <android/hardware/media/c2/1.0/types.h>
+#include <android/hidl/safe_union/1.0/types.h>
#include <gui/IGraphicBufferProducer.h>
#include <C2Component.h>
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 031e637..74320e7 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -120,9 +120,9 @@
return true;
}
-// C2FieldSupportedValues::range's type -> FieldSupportedValues::Range
+// C2FieldSupportedValues::range's type -> ValueRange
bool objcpy(
- FieldSupportedValues::Range* d,
+ ValueRange* d,
const decltype(C2FieldSupportedValues::range)& s) {
d->min = static_cast<PrimitiveValue>(s.min.u64);
d->max = static_cast<PrimitiveValue>(s.max.u64);
@@ -135,45 +135,45 @@
// C2FieldSupportedValues -> FieldSupportedValues
bool objcpy(FieldSupportedValues *d, const C2FieldSupportedValues &s) {
switch (s.type) {
- case C2FieldSupportedValues::EMPTY:
- d->type = FieldSupportedValues::Type::EMPTY;
- d->values.resize(0);
- break;
- case C2FieldSupportedValues::RANGE:
- d->type = FieldSupportedValues::Type::RANGE;
- if (!objcpy(&d->range, s.range)) {
- LOG(ERROR) << "Invalid C2FieldSupportedValues::range.";
- return false;
+ case C2FieldSupportedValues::EMPTY: {
+ d->empty(::android::hidl::safe_union::V1_0::Monostate{});
+ break;
}
- d->values.resize(0);
- break;
- default:
- switch (s.type) {
- case C2FieldSupportedValues::VALUES:
- d->type = FieldSupportedValues::Type::VALUES;
- break;
- case C2FieldSupportedValues::FLAGS:
- d->type = FieldSupportedValues::Type::FLAGS;
- break;
- default:
- LOG(DEBUG) << "Unrecognized C2FieldSupportedValues::type_t "
- << "with underlying value " << underlying_value(s.type)
- << ".";
- d->type = static_cast<FieldSupportedValues::Type>(s.type);
- if (!objcpy(&d->range, s.range)) {
+ case C2FieldSupportedValues::RANGE: {
+ ValueRange range{};
+ if (!objcpy(&range, s.range)) {
LOG(ERROR) << "Invalid C2FieldSupportedValues::range.";
+ d->range(range);
return false;
}
+ d->range(range);
+ break;
}
- copyVector<uint64_t>(&d->values, s.values);
+ case C2FieldSupportedValues::VALUES: {
+ hidl_vec<PrimitiveValue> values;
+ copyVector<uint64_t>(&values, s.values);
+ d->values(values);
+ break;
+ }
+ case C2FieldSupportedValues::FLAGS: {
+ hidl_vec<PrimitiveValue> flags;
+ copyVector<uint64_t>(&flags, s.values);
+ d->flags(flags);
+ break;
+ }
+ default:
+ LOG(DEBUG) << "Unrecognized C2FieldSupportedValues::type_t "
+ << "with underlying value " << underlying_value(s.type)
+ << ".";
+ return false;
}
return true;
}
-// FieldSupportedValues::Range -> C2FieldSupportedValues::range's type
+// ValueRange -> C2FieldSupportedValues::range's type
bool objcpy(
decltype(C2FieldSupportedValues::range)* d,
- const FieldSupportedValues::Range& s) {
+ const ValueRange& s) {
d->min.u64 = static_cast<uint64_t>(s.min);
d->max.u64 = static_cast<uint64_t>(s.max);
d->step.u64 = static_cast<uint64_t>(s.step);
@@ -184,37 +184,33 @@
// FieldSupportedValues -> C2FieldSupportedValues
bool objcpy(C2FieldSupportedValues *d, const FieldSupportedValues &s) {
- switch (s.type) {
- case FieldSupportedValues::Type::EMPTY:
- d->type = C2FieldSupportedValues::EMPTY;
- break;
- case FieldSupportedValues::Type::RANGE:
- d->type = C2FieldSupportedValues::RANGE;
- if (!objcpy(&d->range, s.range)) {
- LOG(ERROR) << "Invalid FieldSupportedValues::range.";
- return false;
+ switch (s.getDiscriminator()) {
+ case FieldSupportedValues::hidl_discriminator::empty: {
+ d->type = C2FieldSupportedValues::EMPTY;
+ break;
}
- d->values.resize(0);
- break;
- default:
- switch (s.type) {
- case FieldSupportedValues::Type::VALUES:
- d->type = C2FieldSupportedValues::VALUES;
- break;
- case FieldSupportedValues::Type::FLAGS:
- d->type = C2FieldSupportedValues::FLAGS;
- break;
- default:
- LOG(DEBUG) << "Unrecognized FieldSupportedValues::Type "
- << "with underlying value " << underlying_value(s.type)
- << ".";
- d->type = static_cast<C2FieldSupportedValues::type_t>(s.type);
- if (!objcpy(&d->range, s.range)) {
+ case FieldSupportedValues::hidl_discriminator::range: {
+ d->type = C2FieldSupportedValues::RANGE;
+ if (!objcpy(&d->range, s.range())) {
LOG(ERROR) << "Invalid FieldSupportedValues::range.";
return false;
}
+ d->values.resize(0);
+ break;
}
- copyVector<uint64_t>(&d->values, s.values);
+ case FieldSupportedValues::hidl_discriminator::values: {
+ d->type = C2FieldSupportedValues::VALUES;
+ copyVector<uint64_t>(&d->values, s.values());
+ break;
+ }
+ case FieldSupportedValues::hidl_discriminator::flags: {
+ d->type = C2FieldSupportedValues::FLAGS;
+ copyVector<uint64_t>(&d->values, s.flags());
+ break;
+ }
+ default:
+ LOG(WARNING) << "Unrecognized FieldSupportedValues::getDiscriminator()";
+ return false;
}
return true;
}
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index 1f36270..d73b731 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -127,4 +127,12 @@
queueCondition.notify_all();
}
}
-}
\ No newline at end of file
+}
+
+// Return current time in micro seconds
+int64_t getNowUs() {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+
+ return (int64_t)tv.tv_usec + tv.tv_sec * 1000000ll;
+}
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index fca2902..c577dac 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -201,4 +201,6 @@
std::list<std::unique_ptr<C2Work>>& workQueue, bool& eos, bool& csd,
uint32_t& framesReceived);
+int64_t getNowUs();
+
#endif // MEDIA_C2_HIDL_TEST_COMMON_H
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
index ec803d7..74548b5 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
@@ -26,6 +26,32 @@
#include <VtsHalHidlTargetTestBase.h>
#include "media_c2_hidl_test_common.h"
+/* Time_Out for start(), stop(), reset(), release(), flush(), queue() are
+ * defined in hardware/interfaces/media/c2/1.0/IComponent.hal. Adding 50ms
+ * extra in case of timeout is 500ms, 1ms extra in case timeout is 1ms/5ms. All
+ * timeout is calculated in us.
+ */
+#define START_TIME_OUT 550000
+#define STOP_TIME_OUT 550000
+#define RESET_TIME_OUT 550000
+#define RELEASE_TIME_OUT 550000
+#define FLUSH_TIME_OUT 6000
+#define QUEUE_TIME_OUT 2000
+
+// Time_Out for config(), query(), querySupportedParams() are defined in
+// hardware/interfaces/media/c2/1.0/IConfigurable.hal.
+#define CONFIG_TIME_OUT 6000
+#define QUERY_TIME_OUT 6000
+#define QUERYSUPPORTEDPARAMS_TIME_OUT 2000
+
+#define CHECK_TIMEOUT(timeConsumed, TIME_OUT, FuncName) \
+ if (timeConsumed > TIME_OUT) { \
+ ALOGW( \
+ "TIMED_OUT %s timeConsumed=%" PRId64 " us is " \
+ "greater than threshold %d us", \
+ FuncName, timeConsumed, TIME_OUT); \
+ }
+
static ComponentTestEnvironment* gEnv = nullptr;
namespace {
@@ -244,6 +270,93 @@
std::make_pair(C2FrameData::FLAG_CODEC_CONFIG, false),
std::make_pair(C2FrameData::FLAG_END_OF_STREAM, false)));
+// Test API's Timeout
+TEST_F(Codec2ComponentHidlTest, Timeout) {
+ ALOGV("Timeout Test");
+ c2_status_t err = C2_OK;
+
+ int64_t startTime = getNowUs();
+ err = mComponent->start();
+ int64_t timeConsumed = getNowUs() - startTime;
+ CHECK_TIMEOUT(timeConsumed, START_TIME_OUT, "start()");
+ ALOGV("mComponent->start() timeConsumed=%" PRId64 " us", timeConsumed);
+ ASSERT_EQ(err, C2_OK);
+
+ startTime = getNowUs();
+ err = mComponent->reset();
+ timeConsumed = getNowUs() - startTime;
+ CHECK_TIMEOUT(timeConsumed, RESET_TIME_OUT, "reset()");
+ ALOGV("mComponent->reset() timeConsumed=%" PRId64 " us", timeConsumed);
+ ASSERT_EQ(err, C2_OK);
+
+ err = mComponent->start();
+ ASSERT_EQ(err, C2_OK);
+
+ // Query supported params by the component
+ std::vector<std::shared_ptr<C2ParamDescriptor>> params;
+ startTime = getNowUs();
+ err = mComponent->querySupportedParams(¶ms);
+ timeConsumed = getNowUs() - startTime;
+ CHECK_TIMEOUT(timeConsumed, QUERYSUPPORTEDPARAMS_TIME_OUT,
+ "querySupportedParams()");
+ ALOGV("mComponent->querySupportedParams() timeConsumed=%" PRId64 " us",
+ timeConsumed);
+ ASSERT_EQ(err, C2_OK);
+
+ std::vector<std::unique_ptr<C2Param>> queried;
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ // Query and config all the supported params
+ for (std::shared_ptr<C2ParamDescriptor> p : params) {
+ startTime = getNowUs();
+ err = mComponent->query({}, {p->index()}, C2_DONT_BLOCK, &queried);
+ timeConsumed = getNowUs() - startTime;
+ CHECK_TIMEOUT(timeConsumed, QUERY_TIME_OUT, "query()");
+ EXPECT_NE(queried.size(), 0u);
+ EXPECT_EQ(err, C2_OK);
+ ALOGV("mComponent->query() for %s timeConsumed=%" PRId64 " us",
+ p->name().c_str(), timeConsumed);
+
+ startTime = getNowUs();
+ err = mComponent->config({queried[0].get()}, C2_DONT_BLOCK, &failures);
+ timeConsumed = getNowUs() - startTime;
+ CHECK_TIMEOUT(timeConsumed, CONFIG_TIME_OUT, "config()");
+ ASSERT_EQ(err, C2_OK);
+ ASSERT_EQ(failures.size(), 0u);
+ ALOGV("mComponent->config() for %s timeConsumed=%" PRId64 " us",
+ p->name().c_str(), timeConsumed);
+ }
+
+ std::list<std::unique_ptr<C2Work>> workList;
+ startTime = getNowUs();
+ err = mComponent->queue(&workList);
+ timeConsumed = getNowUs() - startTime;
+ ALOGV("mComponent->queue() timeConsumed=%" PRId64 " us", timeConsumed);
+ CHECK_TIMEOUT(timeConsumed, QUEUE_TIME_OUT, "queue()");
+ ASSERT_EQ(err, C2_OK);
+
+ startTime = getNowUs();
+ err = mComponent->flush(C2Component::FLUSH_COMPONENT, &workList);
+ timeConsumed = getNowUs() - startTime;
+ ALOGV("mComponent->flush() timeConsumed=%" PRId64 " us", timeConsumed);
+ CHECK_TIMEOUT(timeConsumed, FLUSH_TIME_OUT, "flush()");
+ ASSERT_EQ(err, C2_OK);
+
+ startTime = getNowUs();
+ err = mComponent->stop();
+ timeConsumed = getNowUs() - startTime;
+ ALOGV("mComponent->stop() timeConsumed=%" PRId64 " us", timeConsumed);
+ CHECK_TIMEOUT(timeConsumed, STOP_TIME_OUT, "stop()");
+ ASSERT_EQ(err, C2_OK);
+
+ startTime = getNowUs();
+ err = mComponent->release();
+ timeConsumed = getNowUs() - startTime;
+ ALOGV("mComponent->release() timeConsumed=%" PRId64 " us", timeConsumed);
+ CHECK_TIMEOUT(timeConsumed, RELEASE_TIME_OUT, "release()");
+ ASSERT_EQ(err, C2_OK);
+
+}
+
} // anonymous namespace
// TODO: Add test for Invalid work,
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 03d859a..962df0f 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -272,19 +272,14 @@
work->input.buffers.clear();
if (block) {
std::shared_ptr<C2Buffer> c2Buffer(
- // TODO: fence
new Buffer2D(block->share(
- C2Rect(block->width(), block->height()), ::C2Fence())),
- [buffer, source = getSource()](C2Buffer *ptr) {
- delete ptr;
- // TODO: fence
- (void)source->onInputBufferEmptied(buffer, -1);
- });
+ C2Rect(block->width(), block->height()), ::C2Fence())));
work->input.buffers.push_back(c2Buffer);
}
work->worklets.clear();
work->worklets.emplace_back(new C2Worklet);
std::list<std::unique_ptr<C2Work>> items;
+ uint64_t index = work->input.ordinal.frameIndex.peeku();
items.push_back(std::move(work));
c2_status_t err = comp->queue(&items);
@@ -292,6 +287,7 @@
return UNKNOWN_ERROR;
}
+ (void)mBufferIdsInUse.emplace(index, buffer);
return OK;
}
@@ -326,4 +322,18 @@
mHeight = height;
}
+void C2OMXNode::onInputBufferDone(c2_cntr64_t index) {
+ if (!mBufferSource) {
+ ALOGD("Buffer source not set (index=%llu)", index.peekull());
+ return;
+ }
+ auto it = mBufferIdsInUse.find(index.peeku());
+ if (it == mBufferIdsInUse.end()) {
+ ALOGV("Untracked input index %llu (maybe already removed)", index.peekull());
+ return;
+ }
+ (void)mBufferSource->onInputBufferEmptied(it->second, -1);
+ (void)mBufferIdsInUse.erase(it);
+}
+
} // namespace android
diff --git a/media/codec2/sfplugin/C2OMXNode.h b/media/codec2/sfplugin/C2OMXNode.h
index b5a815e..b7bd696 100644
--- a/media/codec2/sfplugin/C2OMXNode.h
+++ b/media/codec2/sfplugin/C2OMXNode.h
@@ -75,9 +75,23 @@
OMX_INDEXTYPE *index) override;
status_t dispatchMessage(const omx_message &msg) override;
+ /**
+ * Returns underlying IOMXBufferSource object.
+ */
sp<IOMXBufferSource> getSource();
+
+ /**
+ * Configure the frame size.
+ */
void setFrameSize(uint32_t width, uint32_t height);
+ /**
+ * Clean up work item reference.
+ *
+ * \param index input work index
+ */
+ void onInputBufferDone(c2_cntr64_t index);
+
private:
std::weak_ptr<Codec2Client::Component> mComp;
sp<IOMXBufferSource> mBufferSource;
@@ -96,6 +110,8 @@
bool mFirstInputFrame; // true for first input
c2_cntr64_t mPrevInputTimestamp; // input timestamp for previous frame
c2_cntr64_t mPrevCodecTimestamp; // adjusted (codec) timestamp for previous frame
+
+ std::map<uint64_t, buffer_id> mBufferIdsInUse;
};
} // namespace android
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 5f60378..8474ce8 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -370,6 +370,10 @@
return err;
}
+ void onInputBufferDone(c2_cntr64_t index) override {
+ mNode->onInputBufferDone(index);
+ }
+
private:
sp<BGraphicBufferSource> mSource;
sp<C2OMXNode> mNode;
@@ -742,10 +746,21 @@
return BAD_VALUE;
}
if ((config->mDomain & Config::IS_ENCODER) && (config->mDomain & Config::IS_VIDEO)) {
- if (!msg->findInt32(KEY_BIT_RATE, &i32)
- && !msg->findFloat(KEY_BIT_RATE, &flt)) {
- ALOGD("bitrate is missing, which is required for video encoders.");
- return BAD_VALUE;
+ C2Config::bitrate_mode_t mode = C2Config::BITRATE_VARIABLE;
+ if (msg->findInt32(KEY_BITRATE_MODE, &i32)) {
+ mode = (C2Config::bitrate_mode_t) i32;
+ }
+ if (mode == BITRATE_MODE_CQ) {
+ if (!msg->findInt32(KEY_QUALITY, &i32)) {
+ ALOGD("quality is missing, which is required for video encoders in CQ.");
+ return BAD_VALUE;
+ }
+ } else {
+ if (!msg->findInt32(KEY_BIT_RATE, &i32)
+ && !msg->findFloat(KEY_BIT_RATE, &flt)) {
+ ALOGD("bitrate is missing, which is required for video encoders.");
+ return BAD_VALUE;
+ }
}
if (!msg->findInt32(KEY_I_FRAME_INTERVAL, &i32)
&& !msg->findFloat(KEY_I_FRAME_INTERVAL, &flt)) {
@@ -1572,6 +1587,13 @@
void CCodec::onInputBufferDone(uint64_t frameIndex, size_t arrayIndex) {
mChannel->onInputBufferDone(frameIndex, arrayIndex);
+ if (arrayIndex == 0) {
+ // We always put no more than one buffer per work, if we use an input surface.
+ Mutexed<Config>::Locked config(mConfig);
+ if (config->mInputSurface) {
+ config->mInputSurface->onInputBufferDone(frameIndex);
+ }
+ }
}
void CCodec::onMessageReceived(const sp<AMessage> &msg) {
@@ -1704,6 +1726,9 @@
++stream;
}
}
+ if (config->mInputSurface) {
+ config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+ }
mChannel->onWorkDone(
std::move(work), changed ? config->mOutputFormat : nullptr,
initData.hasChanged() ? initData.update().get() : nullptr);
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index a6fa333..d1fa920 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1525,6 +1525,7 @@
mPending.splice(mPending.end(), mStash);
mDepth = depth;
}
+
void CCodecBufferChannel::ReorderStash::setKey(C2Config::ordinal_key_t key) {
mPending.splice(mPending.end(), mStash);
mKey = key;
@@ -1547,13 +1548,25 @@
int64_t timestamp,
int32_t flags,
const C2WorkOrdinalStruct &ordinal) {
- auto it = mStash.begin();
- for (; it != mStash.end(); ++it) {
- if (less(ordinal, it->ordinal)) {
- break;
+ bool eos = flags & MediaCodec::BUFFER_FLAG_EOS;
+ if (!buffer && eos) {
+ // TRICKY: we may be violating ordering of the stash here. Because we
+ // don't expect any more emplace() calls after this, the ordering should
+ // not matter.
+ mStash.emplace_back(buffer, timestamp, flags, ordinal);
+ } else {
+ flags = flags & ~MediaCodec::BUFFER_FLAG_EOS;
+ auto it = mStash.begin();
+ for (; it != mStash.end(); ++it) {
+ if (less(ordinal, it->ordinal)) {
+ break;
+ }
+ }
+ mStash.emplace(it, buffer, timestamp, flags, ordinal);
+ if (eos) {
+ mStash.back().flags = mStash.back().flags | MediaCodec::BUFFER_FLAG_EOS;
}
}
- mStash.emplace(it, buffer, timestamp, flags, ordinal);
while (!mStash.empty() && mStash.size() > mDepth) {
mPending.push_back(mStash.front());
mStash.pop_front();
@@ -2483,6 +2496,7 @@
bool post = true;
if (!configs->empty()) {
sp<ABuffer> config = configs->front();
+ configs->pop_front();
if (buffer->capacity() >= config->size()) {
memcpy(buffer->base(), config->data(), config->size());
buffer->setRange(0, config->size());
@@ -2814,8 +2828,9 @@
outBuffer->meta()->setInt64("timeUs", entry.timestamp);
outBuffer->meta()->setInt32("flags", entry.flags);
- ALOGV("[%s] sendOutputBuffers: out buffer index = %zu [%p] => %p + %zu",
- mName, index, outBuffer.get(), outBuffer->data(), outBuffer->size());
+ ALOGV("[%s] sendOutputBuffers: out buffer index = %zu [%p] => %p + %zu (%lld)",
+ mName, index, outBuffer.get(), outBuffer->data(), outBuffer->size(),
+ (long long)entry.timestamp);
mCallback->onOutputBufferAvailable(index, outBuffer);
}
}
diff --git a/media/codec2/sfplugin/InputSurfaceWrapper.h b/media/codec2/sfplugin/InputSurfaceWrapper.h
index d9c4eec..8341fd5 100644
--- a/media/codec2/sfplugin/InputSurfaceWrapper.h
+++ b/media/codec2/sfplugin/InputSurfaceWrapper.h
@@ -98,6 +98,13 @@
mDataSpace = dataSpace;
}
+ /**
+ * Clean up C2Work related references if necessary. No-op by default.
+ *
+ * \param index index of input work.
+ */
+ virtual void onInputBufferDone(c2_cntr64_t /* index */) {}
+
protected:
android_dataspace mDataSpace;
};
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 6da131f..d62944a 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -589,6 +589,21 @@
bool mIsHdr10Plus;
};
+struct Av1ProfileLevelMapper : ProfileLevelMapperHelper {
+ virtual bool simpleMap(C2Config::level_t from, int32_t *to) {
+ return sAv1Levels.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::level_t *to) {
+ return sAv1Levels.map(from, to);
+ }
+ virtual bool simpleMap(C2Config::profile_t from, int32_t *to) {
+ return sAv1Profiles.map(from, to);
+ }
+ virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
+ return sAv1Profiles.map(from, to);
+ }
+};
+
} // namespace
// static
@@ -613,6 +628,8 @@
return std::make_shared<Vp8ProfileLevelMapper>();
} else if (mediaType == MIMETYPE_VIDEO_VP9) {
return std::make_shared<Vp9ProfileLevelMapper>();
+ } else if (mediaType == MIMETYPE_VIDEO_AV1) {
+ return std::make_shared<Av1ProfileLevelMapper>();
}
return nullptr;
}
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 20cc643..d6d24c1 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -1336,6 +1336,13 @@
mReader = NULL;
delete mDataSource;
+
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ TrackInfo *info = &mTracks.editItemAt(i);
+ if (info->mMeta) {
+ AMediaFormat_delete(info->mMeta);
+ }
+ }
}
size_t MatroskaExtractor::countTracks() {
@@ -1808,6 +1815,8 @@
void MatroskaExtractor::addTracks() {
const mkvparser::Tracks *tracks = mSegment->GetTracks();
+ AMediaFormat *meta = nullptr;
+
for (size_t index = 0; index < tracks->GetTracksCount(); ++index) {
const mkvparser::Track *track = tracks->GetTrackByIndex(index);
@@ -1832,7 +1841,11 @@
enum { VIDEO_TRACK = 1, AUDIO_TRACK = 2 };
- AMediaFormat *meta = AMediaFormat_new();
+ if (meta) {
+ AMediaFormat_clear(meta);
+ } else {
+ meta = AMediaFormat_new();
+ }
status_t err = OK;
int32_t nalSize = -1;
@@ -2067,21 +2080,26 @@
long long durationNs = mSegment->GetDuration();
AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_DURATION, (durationNs + 500) / 1000);
+ const char *mimetype = "";
+ if (!AMediaFormat_getString(meta, AMEDIAFORMAT_KEY_MIME, &mimetype)) {
+ // do not add this track to the track list
+ ALOGW("ignoring track with unknown mime");
+ continue;
+ }
+
mTracks.push();
size_t n = mTracks.size() - 1;
TrackInfo *trackInfo = &mTracks.editItemAt(n);
initTrackInfo(track, meta, trackInfo);
trackInfo->mNalLengthSize = nalSize;
- const char *mimetype = "";
- AMediaFormat_getString(meta, AMEDIAFORMAT_KEY_MIME, &mimetype);
-
if ((!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) ||
(!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_AVC) && isSetCsdFrom1stFrame)) {
// Attempt to recover from AVC track without codec private data
err = synthesizeAVCC(trackInfo, n);
if (err != OK) {
mTracks.pop();
+ continue;
}
} else if ((!strcmp("V_MPEG2", codecID) && codecPrivateSize == 0) ||
(!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG2) && isSetCsdFrom1stFrame)) {
@@ -2089,6 +2107,7 @@
err = synthesizeMPEG2(trackInfo, n);
if (err != OK) {
mTracks.pop();
+ continue;
}
} else if ((!strcmp("V_MPEG4/ISO/ASP", codecID) && codecPrivateSize == 0) ||
(!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG4) && isSetCsdFrom1stFrame) ||
@@ -2099,9 +2118,14 @@
err = synthesizeMPEG4(trackInfo, n);
if (err != OK) {
mTracks.pop();
+ continue;
}
}
-
+ // the TrackInfo owns the metadata now
+ meta = nullptr;
+ }
+ if (meta) {
+ AMediaFormat_delete(meta);
}
}
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/MatroskaExtractor.h
index d53d9e3..99fad17 100644
--- a/media/extractors/mkv/MatroskaExtractor.h
+++ b/media/extractors/mkv/MatroskaExtractor.h
@@ -61,10 +61,8 @@
TrackInfo() {
mMeta = NULL;
}
+
~TrackInfo() {
- if (mMeta) {
- AMediaFormat_delete(mMeta);
- }
}
unsigned long mTrackNum;
bool mEncrypted;
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 22819cb..5ff1c59 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -3675,8 +3675,10 @@
void *tmpData;
size_t tmpDataSize;
+ const char *s;
if (size >= 8 && metadataKey &&
- !AMediaFormat_getBuffer(mFileMetaData, metadataKey, &tmpData, &tmpDataSize)) {
+ !AMediaFormat_getBuffer(mFileMetaData, metadataKey, &tmpData, &tmpDataSize) &&
+ !AMediaFormat_getString(mFileMetaData, metadataKey, &s)) {
if (!strcmp(metadataKey, "albumart")) {
AMediaFormat_setBuffer(mFileMetaData, metadataKey,
buffer + 8, size - 8);
@@ -3918,10 +3920,9 @@
};
static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
- void *tmpData;
- size_t tmpDataSize;
for (size_t i = 0; i < kNumMapEntries; ++i) {
- if (!AMediaFormat_getBuffer(mFileMetaData, kMap[i].key, &tmpData, &tmpDataSize)) {
+ const char *ss;
+ if (!AMediaFormat_getString(mFileMetaData, kMap[i].key, &ss)) {
ID3::Iterator *it = new ID3::Iterator(id3, kMap[i].tag1);
if (it->done()) {
delete it;
@@ -5318,7 +5319,9 @@
}
int32_t MPEG4Source::parseHEVCLayerId(const uint8_t *data, size_t size) {
- CHECK(data != nullptr && size >= (mNALLengthSize + 2));
+ if (data == nullptr || size < mNALLengthSize + 2) {
+ return -1;
+ }
// HEVC NAL-header (16-bit)
// 1 6 6 3
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index d99493d..b63ae6b 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -1280,7 +1280,7 @@
//ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
}
- AMediaFormat_getInt32(mFileMeta, "haptic", &mHapticChannelCount);
+ AMediaFormat_getInt32(mFileMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, &mHapticChannelCount);
}
void MyOggExtractor::setChannelMask(int channelCount) {
@@ -1297,6 +1297,8 @@
const audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(
audioChannelCount) | hapticChannelMask;
AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK, channelMask);
+ AMediaFormat_setInt32(
+ mMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, mHapticChannelCount);
}
} else {
AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK,
diff --git a/media/libaudioclient/AudioPolicy.cpp b/media/libaudioclient/AudioPolicy.cpp
index 3ab38cd..65e797f 100644
--- a/media/libaudioclient/AudioPolicy.cpp
+++ b/media/libaudioclient/AudioPolicy.cpp
@@ -97,6 +97,7 @@
mDeviceType = (audio_devices_t) parcel->readInt32();
mDeviceAddress = parcel->readString8();
mCbFlags = (uint32_t)parcel->readInt32();
+ mAllowPrivilegedPlaybackCapture = parcel->readBool();
size_t size = (size_t)parcel->readInt32();
if (size > MAX_CRITERIA_PER_MIX) {
size = MAX_CRITERIA_PER_MIX;
@@ -120,6 +121,7 @@
parcel->writeInt32(mDeviceType);
parcel->writeString8(mDeviceAddress);
parcel->writeInt32(mCbFlags);
+ parcel->writeBool(mAllowPrivilegedPlaybackCapture);
size_t size = mCriteria.size();
if (size > MAX_CRITERIA_PER_MIX) {
size = MAX_CRITERIA_PER_MIX;
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 5851533..f07be46 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -410,8 +410,8 @@
}
// Call these directly because we are already holding the lock.
- mAudioRecord->setMicrophoneDirection(mSelectedMicDirection);
- mAudioRecord->setMicrophoneFieldDimension(mSelectedMicFieldDimension);
+ mAudioRecord->setPreferredMicrophoneDirection(mSelectedMicDirection);
+ mAudioRecord->setPreferredMicrophoneFieldDimension(mSelectedMicFieldDimension);
if (status != NO_ERROR) {
mActive = false;
@@ -1381,7 +1381,7 @@
return mAudioRecord->getActiveMicrophones(activeMicrophones).transactionError();
}
-status_t AudioRecord::setMicrophoneDirection(audio_microphone_direction_t direction)
+status_t AudioRecord::setPreferredMicrophoneDirection(audio_microphone_direction_t direction)
{
AutoMutex lock(mLock);
if (mSelectedMicDirection == direction) {
@@ -1394,11 +1394,11 @@
// the internal AudioRecord hasn't be created yet, so just stash the attribute.
return OK;
} else {
- return mAudioRecord->setMicrophoneDirection(direction).transactionError();
+ return mAudioRecord->setPreferredMicrophoneDirection(direction).transactionError();
}
}
-status_t AudioRecord::setMicrophoneFieldDimension(float zoom) {
+status_t AudioRecord::setPreferredMicrophoneFieldDimension(float zoom) {
AutoMutex lock(mLock);
if (mSelectedMicFieldDimension == zoom) {
// NOP
@@ -1410,7 +1410,7 @@
// the internal AudioRecord hasn't be created yet, so just stash the attribute.
return OK;
} else {
- return mAudioRecord->setMicrophoneFieldDimension(zoom).transactionError();
+ return mAudioRecord->setPreferredMicrophoneFieldDimension(zoom).transactionError();
}
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index cf9c7f4..ecf58b6 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -39,9 +39,9 @@
/* Set the microphone direction (for processing).
*/
- void setMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
+ void setPreferredMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
/* Set the microphone zoom (for processing).
*/
- void setMicrophoneFieldDimension(float zoom);
+ void setPreferredMicrophoneFieldDimension(float zoom);
}
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index bf8d627..4b94c12 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -114,6 +114,8 @@
audio_devices_t mDeviceType;
String8 mDeviceAddress;
uint32_t mCbFlags; // flags indicating which callbacks to use, see kCbFlag*
+ /** Ignore the AUDIO_FLAG_NO_MEDIA_PROJECTION */
+ bool mAllowPrivilegedPlaybackCapture = false;
};
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index b4ddb69..9c81bb7 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -542,11 +542,11 @@
/* Set the Microphone direction (for processing purposes).
*/
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
/* Set the Microphone zoom factor (for processing purposes).
*/
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
/* Get the unique port ID assigned to this AudioRecord instance by audio policy manager.
* The ID is unique across all audioserver clients and can change during the life cycle
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index e396cf3..6c8e6a4 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -855,25 +855,26 @@
#endif
#if MAJOR_VERSION < 5
-status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+status_t StreamInHalHidl::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction __unused) {
if (mStream == 0) return NO_INIT;
return INVALID_OPERATION;
}
-status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom __unused) {
+status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom __unused) {
if (mStream == 0) return NO_INIT;
return INVALID_OPERATION;
}
#else
-status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction) {
+status_t StreamInHalHidl::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
if (!mStream) return NO_INIT;
- return processReturn("setMicrophoneDirection",
- mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
+ return processReturn("setPreferredMicrophoneDirection",
+ mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
}
-status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom) {
+status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom) {
if (!mStream) return NO_INIT;
- return processReturn("setMicrophoneFieldDimension",
+ return processReturn("setPreferredMicrophoneFieldDimension",
mStream->setMicrophoneFieldDimension(zoom));
}
#endif
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 9ac1067..f587889 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -221,10 +221,11 @@
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
// Set microphone direction (for processing)
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) override;
+ virtual status_t setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction) override;
// Set microphone zoom (for processing)
- virtual status_t setMicrophoneFieldDimension(float zoom) override;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) override;
// Called when the metadata of the stream's sink has been changed.
status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index fcb809b..7d5ce05 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -369,20 +369,21 @@
#endif
#if MAJOR_VERSION < 5
-status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+status_t StreamInHalLocal::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction __unused) {
return INVALID_OPERATION;
}
-status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom __unused) {
+status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom __unused) {
return INVALID_OPERATION;
}
#else
-status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction) {
+status_t StreamInHalLocal::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
if (mStream->set_microphone_direction == NULL) return INVALID_OPERATION;
return mStream->set_microphone_direction(mStream, direction);
}
-status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom) {
+status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom) {
if (mStream->set_microphone_field_dimension == NULL) return INVALID_OPERATION;
return mStream->set_microphone_field_dimension(mStream, zoom);
@@ -391,3 +392,5 @@
} // namespace CPP_VERSION
} // namespace android
+
+
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index 3d6c50e..34f2bd8 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -205,10 +205,10 @@
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
// Sets microphone direction (for processing)
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
// Sets microphone zoom (for processing)
- virtual status_t setMicrophoneFieldDimension(float zoom);
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
// Called when the metadata of the stream's sink has been changed.
status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index ed8282f..6c3b21c 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -180,10 +180,10 @@
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
// Set direction for capture processing
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t) = 0;
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t) = 0;
// Set zoom factor for capture stream
- virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
struct SinkMetadata {
std::vector<record_track_metadata_t> tracks;
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 7a32d3f..d150f18 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -132,6 +132,9 @@
shared_libs: [
"liblog",
],
+ header_libs: [
+ "libhardware_headers"
+ ],
cflags: [
"-fvisibility=hidden",
"-DBUILD_FLOAT",
diff --git a/media/libeffects/lvm/lib/Bundle/lib/LVM.h b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
index 83ecae1..5082a53 100644
--- a/media/libeffects/lvm/lib/Bundle/lib/LVM.h
+++ b/media/libeffects/lvm/lib/Bundle/lib/LVM.h
@@ -298,6 +298,7 @@
LVM_PSA_DecaySpeed_en PSA_PeakDecayRate; /* Peak value decay rate*/
#ifdef SUPPORT_MC
LVM_INT32 NrChannels;
+ LVM_INT32 ChMask;
#endif
} LVM_ControlParams_t;
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
index 62b4c73..1d95342 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Control.c
@@ -93,6 +93,7 @@
#ifdef SUPPORT_MC
pInstance->Params.NrChannels = pParams->NrChannels;
+ pInstance->Params.ChMask = pParams->ChMask;
#endif
/*
* Cinema Sound parameters
@@ -584,6 +585,7 @@
#ifdef SUPPORT_MC
pInstance->NrChannels = LocalParams.NrChannels;
+ pInstance->ChMask = LocalParams.ChMask;
#endif
/* Clear all internal data if format change*/
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
index 19d1532..cdd3134 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Private.h
@@ -291,6 +291,7 @@
#ifdef SUPPORT_MC
LVM_INT16 NrChannels;
+ LVM_INT32 ChMask;
#endif
} LVM_Instance_t;
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
index 94ba278..8d30a61 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Process.c
@@ -21,6 +21,7 @@
/* Includes */
/* */
/****************************************************************************************/
+#include <system/audio.h>
#include "LVM_Private.h"
#include "VectorArithmetic.h"
@@ -67,6 +68,7 @@
LVM_ReturnStatus_en Status;
#ifdef SUPPORT_MC
LVM_INT32 NrChannels = pInstance->NrChannels;
+ LVM_INT32 ChMask = pInstance->ChMask;
#define NrFrames SampleCount // alias for clarity
#endif
@@ -119,6 +121,7 @@
#ifdef SUPPORT_MC
/* Update the local variable NrChannels from pInstance->NrChannels value */
NrChannels = pInstance->NrChannels;
+ ChMask = pInstance->ChMask;
#endif
if(Status != LVM_SUCCESS)
@@ -140,6 +143,7 @@
pToProcess = pOutData;
#ifdef SUPPORT_MC
NrChannels = 2;
+ ChMask = AUDIO_CHANNEL_OUT_STEREO;
#endif
}
@@ -254,18 +258,24 @@
}
#ifdef SUPPORT_MC
- /* TODO - Multichannel support to be added */
- if (NrChannels == 2)
+ /*
+ * Volume balance
+ */
+ LVC_MixSoft_1St_MC_float_SAT(&pInstance->VC_BalanceMix,
+ pProcessed,
+ pProcessed,
+ NrFrames,
+ NrChannels,
+ ChMask);
+#else
+ /*
+ * Volume balance
+ */
+ LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
+ pProcessed,
+ pProcessed,
+ SampleCount);
#endif
- {
- /*
- * Volume balance
- */
- LVC_MixSoft_1St_2i_D16C31_SAT(&pInstance->VC_BalanceMix,
- pProcessed,
- pProcessed,
- SampleCount);
- }
/*
* Perform Parametric Spectum Analysis
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c
index eb5755e..db76cd1 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixHard_1St_2i_D16C31_SAT.c
@@ -59,6 +59,31 @@
}
+#ifdef SUPPORT_MC
+void LVC_Core_MixHard_1St_MC_float_SAT (Mix_Private_FLOAT_st **ptrInstance,
+ const LVM_FLOAT *src,
+ LVM_FLOAT *dst,
+ LVM_INT16 NrFrames,
+ LVM_INT16 NrChannels)
+{
+ LVM_FLOAT Temp;
+ LVM_INT16 ii, jj;
+ for (ii = NrFrames; ii != 0; ii--)
+ {
+ for (jj = 0; jj < NrChannels; jj++)
+ {
+ Mix_Private_FLOAT_st *pInstance1 = (Mix_Private_FLOAT_st *)(ptrInstance[jj]);
+ Temp = ((LVM_FLOAT)*(src++) * (LVM_FLOAT)pInstance1->Current);
+ if (Temp > 1.0f)
+ *dst++ = 1.0f;
+ else if (Temp < -1.0f)
+ *dst++ = -1.0f;
+ else
+ *dst++ = (LVM_FLOAT)Temp;
+ }
+ }
+}
+#endif
#else
void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_st *ptrInstance1,
LVMixer3_st *ptrInstance2,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c
index 656a117..56b5dae 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Core_MixSoft_1St_2i_D16C31_WRA.c
@@ -146,6 +146,51 @@
pInstanceR->Current = CurrentR;
}
+#ifdef SUPPORT_MC
+void LVC_Core_MixSoft_1St_MC_float_WRA (Mix_Private_FLOAT_st **ptrInstance,
+ const LVM_FLOAT *src,
+ LVM_FLOAT *dst,
+ LVM_INT16 NrFrames,
+ LVM_INT16 NrChannels)
+{
+ LVM_INT32 ii, ch;
+ LVM_FLOAT Temp =0.0f;
+ LVM_FLOAT tempCurrent[NrChannels];
+ for (ch = 0; ch < NrChannels; ch++)
+ {
+ tempCurrent[ch] = ptrInstance[ch]->Current;
+ }
+ for (ii = NrFrames; ii > 0; ii--)
+ {
+ for (ch = 0; ch < NrChannels; ch++)
+ {
+ Mix_Private_FLOAT_st *pInstance = ptrInstance[ch];
+ const LVM_FLOAT Delta = pInstance->Delta;
+ LVM_FLOAT Current = tempCurrent[ch];
+ const LVM_FLOAT Target = pInstance->Target;
+ if (Current < Target)
+ {
+ ADD2_SAT_FLOAT(Current, Delta, Temp);
+ Current = Temp;
+ if (Current > Target)
+ Current = Target;
+ }
+ else
+ {
+ Current -= Delta;
+ if (Current < Target)
+ Current = Target;
+ }
+ *dst++ = *src++ * Current;
+ tempCurrent[ch] = Current;
+ }
+ }
+ for (ch = 0; ch < NrChannels; ch++)
+ {
+ ptrInstance[ch]->Current = tempCurrent[ch];
+ }
+}
+#endif
#else
void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_st *ptrInstance1,
LVMixer3_st *ptrInstance2,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c
index bd5a925..a4682d3 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_MixSoft_1St_2i_D16C31_SAT.c
@@ -19,6 +19,8 @@
INCLUDE FILES
***********************************************************************************/
+#include <system/audio.h>
+
#include "LVC_Mixer_Private.h"
#include "VectorArithmetic.h"
#include "ScalarArithmetic.h"
@@ -30,10 +32,207 @@
#define TRUE 1
#define FALSE 0
+#define ARRAY_SIZE(a) ((sizeof(a)) / (sizeof(*(a))))
+
/**********************************************************************************
FUNCTION LVC_MixSoft_1St_2i_D16C31_SAT
***********************************************************************************/
#ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+/* This threshold is used to decide on the processing to be applied on
+ * front center and back center channels
+ */
+#define LVM_VOL_BAL_THR (0.000016f)
+void LVC_MixSoft_1St_MC_float_SAT (LVMixer3_2St_FLOAT_st *ptrInstance,
+ const LVM_FLOAT *src,
+ LVM_FLOAT *dst,
+ LVM_INT16 NrFrames,
+ LVM_INT32 NrChannels,
+ LVM_INT32 ChMask)
+{
+ char HardMixing = TRUE;
+ LVM_FLOAT TargetGain;
+ Mix_Private_FLOAT_st Target_lfe = {LVM_MAXFLOAT, LVM_MAXFLOAT, LVM_MAXFLOAT};
+ Mix_Private_FLOAT_st Target_ctr = {LVM_MAXFLOAT, LVM_MAXFLOAT, LVM_MAXFLOAT};
+ Mix_Private_FLOAT_st *pInstance1 = \
+ (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[0].PrivateParams);
+ Mix_Private_FLOAT_st *pInstance2 = \
+ (Mix_Private_FLOAT_st *)(ptrInstance->MixerStream[1].PrivateParams);
+ Mix_Private_FLOAT_st *pMixPrivInst[4] = {pInstance1, pInstance2, &Target_ctr, &Target_lfe};
+ Mix_Private_FLOAT_st *pInstance[NrChannels];
+
+ if (audio_channel_mask_get_representation(ChMask)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX)
+ {
+ for (int i = 0; i < 2; i++)
+ {
+ pInstance[i] = pMixPrivInst[i];
+ }
+ for (int i = 2; i < NrChannels; i++)
+ {
+ pInstance[i] = pMixPrivInst[2];
+ }
+ }
+ else
+ {
+ // TODO: Combine with system/media/audio_utils/Balance.cpp
+ // Constants in system/media/audio/include/system/audio-base.h
+ // 'mixInstIdx' is used to map the appropriate mixer instance for each channel.
+ const int mixInstIdx[] = {
+ 0, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
+ 1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
+ 2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
+ 3, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
+ 0, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
+ 1, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
+ 0, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
+ 1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+ 2, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
+ 0, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
+ 1, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
+ 2, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
+ 0, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
+ 2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
+ 1, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
+ 0, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
+ 2, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
+ 1, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
+ 0, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
+ 1, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u
+ };
+ if (pInstance1->Target <= LVM_VOL_BAL_THR ||
+ pInstance2->Target <= LVM_VOL_BAL_THR)
+ {
+ Target_ctr.Target = 0.0f;
+ Target_ctr.Current = 0.0f;
+ Target_ctr.Delta = 0.0f;
+ }
+ const unsigned int idxArrSize = ARRAY_SIZE(mixInstIdx);
+ for (unsigned int i = 0, channel = ChMask; channel !=0 ; ++i)
+ {
+ const unsigned int idx = __builtin_ctz(channel);
+ if (idx < idxArrSize)
+ {
+ pInstance[i] = pMixPrivInst[mixInstIdx[idx]];
+ }
+ else
+ {
+ pInstance[i] = pMixPrivInst[2];
+ }
+ channel &= ~(1 << idx);
+ }
+ }
+
+ if (NrFrames <= 0) return;
+
+ /******************************************************************************
+ SOFT MIXING
+ *******************************************************************************/
+
+ if ((pInstance1->Current != pInstance1->Target) ||
+ (pInstance2->Current != pInstance2->Target))
+ {
+ // TODO: combine similar checks below.
+ if (pInstance1->Delta == LVM_MAXFLOAT
+ || Abs_Float(pInstance1->Current - pInstance1->Target) < pInstance1->Delta)
+ {
+ /* Difference is not significant anymore. Make them equal. */
+ pInstance1->Current = pInstance1->Target;
+ TargetGain = pInstance1->Target;
+ LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[0]), TargetGain);
+ }
+ else
+ {
+ /* Soft mixing has to be applied */
+ HardMixing = FALSE;
+ }
+
+ if (HardMixing == TRUE)
+ {
+ if (pInstance2->Delta == LVM_MAXFLOAT
+ || Abs_Float(pInstance2->Current - pInstance2->Target) < pInstance2->Delta)
+ {
+ /* Difference is not significant anymore. Make them equal. */
+ pInstance2->Current = pInstance2->Target;
+ TargetGain = pInstance2->Target;
+ LVC_Mixer_SetTarget(&(ptrInstance->MixerStream[1]), TargetGain);
+ }
+ else
+ {
+ /* Soft mixing has to be applied */
+ HardMixing = FALSE;
+ }
+ }
+
+ if (HardMixing == FALSE)
+ {
+ LVC_Core_MixSoft_1St_MC_float_WRA (&pInstance[0],
+ src, dst, NrFrames, NrChannels);
+ }
+ }
+
+ /******************************************************************************
+ HARD MIXING
+ *******************************************************************************/
+
+ if (HardMixing == TRUE)
+ {
+ if ((pInstance1->Target == LVM_MAXFLOAT) && (pInstance2->Target == LVM_MAXFLOAT))
+ {
+ if (src != dst)
+ {
+ Copy_Float(src, dst, NrFrames*NrChannels);
+ }
+ }
+ else
+ {
+ LVC_Core_MixHard_1St_MC_float_SAT(&(pInstance[0]),
+ src, dst, NrFrames, NrChannels);
+ }
+ }
+
+ /******************************************************************************
+ CALL BACK
+ *******************************************************************************/
+
+ if (ptrInstance->MixerStream[0].CallbackSet)
+ {
+ if (Abs_Float(pInstance1->Current - pInstance1->Target) < pInstance1->Delta)
+ {
+ pInstance1->Current = pInstance1->Target; /* Difference is not significant anymore. \
+ Make them equal. */
+ TargetGain = pInstance1->Target;
+ LVC_Mixer_SetTarget(&ptrInstance->MixerStream[0], TargetGain);
+ ptrInstance->MixerStream[0].CallbackSet = FALSE;
+ if (ptrInstance->MixerStream[0].pCallBack != 0)
+ {
+ (*ptrInstance->MixerStream[0].pCallBack) (\
+ ptrInstance->MixerStream[0].pCallbackHandle,
+ ptrInstance->MixerStream[0].pGeneralPurpose,
+ ptrInstance->MixerStream[0].CallbackParam);
+ }
+ }
+ }
+ if (ptrInstance->MixerStream[1].CallbackSet)
+ {
+ if (Abs_Float(pInstance2->Current - pInstance2->Target) < pInstance2->Delta)
+ {
+ pInstance2->Current = pInstance2->Target; /* Difference is not significant anymore.
+ Make them equal. */
+ TargetGain = pInstance2->Target;
+ LVC_Mixer_SetTarget(&ptrInstance->MixerStream[1], TargetGain);
+ ptrInstance->MixerStream[1].CallbackSet = FALSE;
+ if (ptrInstance->MixerStream[1].pCallBack != 0)
+ {
+ (*ptrInstance->MixerStream[1].pCallBack) (\
+ ptrInstance->MixerStream[1].pCallbackHandle,
+ ptrInstance->MixerStream[1].pGeneralPurpose,
+ ptrInstance->MixerStream[1].CallbackParam);
+ }
+ }
+ }
+}
+#endif
void LVC_MixSoft_1St_2i_D16C31_SAT( LVMixer3_2St_FLOAT_st *ptrInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst,
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
index 7f18747..199d529 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer.h
@@ -224,6 +224,14 @@
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
#ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void LVC_MixSoft_1St_MC_float_SAT(LVMixer3_2St_FLOAT_st *pInstance,
+ const LVM_FLOAT *src,
+ LVM_FLOAT *dst, /* dst can be equal to src */
+ LVM_INT16 NrFrames,
+ LVM_INT32 NrChannels,
+ LVM_INT32 ChMask);
+#endif
void LVC_MixSoft_1St_2i_D16C31_SAT(LVMixer3_2St_FLOAT_st *pInstance,
const LVM_FLOAT *src,
LVM_FLOAT *dst, /* dst can be equal to src */
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
index f10094b..453a6a5 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_Private.h
@@ -116,6 +116,13 @@
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
#ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void LVC_Core_MixSoft_1St_MC_float_WRA(Mix_Private_FLOAT_st **ptrInstance,
+ const LVM_FLOAT *src,
+ LVM_FLOAT *dst,
+ LVM_INT16 NrFrames,
+ LVM_INT16 NrChannels);
+#endif
void LVC_Core_MixSoft_1St_2i_D16C31_WRA( LVMixer3_FLOAT_st *ptrInstance1,
LVMixer3_FLOAT_st *ptrInstance2,
const LVM_FLOAT *src,
@@ -136,6 +143,13 @@
/* Gain values should not be more that 1.0 */
/**********************************************************************************/
#ifdef BUILD_FLOAT
+#ifdef SUPPORT_MC
+void LVC_Core_MixHard_1St_MC_float_SAT(Mix_Private_FLOAT_st **ptrInstance,
+ const LVM_FLOAT *src,
+ LVM_FLOAT *dst,
+ LVM_INT16 NrFrames,
+ LVM_INT16 NrChannels);
+#endif
void LVC_Core_MixHard_1St_2i_D16C31_SAT( LVMixer3_FLOAT_st *ptrInstance1,
LVMixer3_FLOAT_st *ptrInstance2,
const LVM_FLOAT *src,
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 1a874a3..5079634 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -36,6 +36,10 @@
"-csE -tE"
"-csE -eqE" "-tE -eqE"
"-csE -tE -bE -M -eqE"
+ "-tE -eqE -vcBal:96 -M"
+ "-tE -eqE -vcBal:-96 -M"
+ "-tE -eqE -vcBal:0 -M"
+ "-tE -eqE -bE -vcBal:30 -M"
)
fs_arr=(
@@ -56,26 +60,41 @@
# run multichannel effects at different configs, saving only the stereo channel
# pair.
+error_count=0
for flags in "${flags_arr[@]}"
do
for fs in ${fs_arr[*]}
do
- for ch in {1..8}
+ for chMask in {0..22}
do
adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
- -o:$testdir/sinesweep_$((ch))_$((fs)).raw -ch:$ch -fs:$fs $flags
+ -o:$testdir/sinesweep_$((chMask))_$((fs)).raw -chMask:$chMask -fs:$fs $flags
+
+ shell_ret=$?
+ if [ $shell_ret -ne 0 ]; then
+ echo "error: $shell_ret"
+ ((++error_count))
+ fi
+
# two channel files should be identical to higher channel
# computation (first 2 channels).
# Do not compare cases where -bE is in flags (due to mono computation)
- if [[ $flags != *"-bE"* ]] && [ "$ch" -gt 2 ]
+ if [[ $flags != *"-bE"* ]] && [[ "$chMask" -gt 1 ]]
then
- adb shell cmp $testdir/sinesweep_2_$((fs)).raw \
- $testdir/sinesweep_$((ch))_$((fs)).raw
- elif [[ $flags == *"-bE"* ]] && [ "$ch" -gt 2 ]
+ adb shell cmp $testdir/sinesweep_1_$((fs)).raw \
+ $testdir/sinesweep_$((chMask))_$((fs)).raw
+ elif [[ $flags == *"-bE"* ]] && [[ "$chMask" -gt 1 ]]
then
- adb shell $testdir/snr $testdir/sinesweep_2_$((fs)).raw \
- $testdir/sinesweep_$((ch))_$((fs)).raw -thr:90.308998
+ adb shell $testdir/snr $testdir/sinesweep_1_$((fs)).raw \
+ $testdir/sinesweep_$((chMask))_$((fs)).raw -thr:90.308998
+ fi
+
+ # both cmp and snr return EXIT_FAILURE on mismatch.
+ shell_ret=$?
+ if [ $shell_ret -ne 0 ]; then
+ echo "error: $shell_ret"
+ ((++error_count))
fi
done
@@ -83,3 +102,5 @@
done
adb shell rm -r $testdir
+echo "$error_count errors"
+exit $error_count
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index 416bdaa..5b58dd1 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -24,6 +24,7 @@
#include <audio_utils/channels.h>
#include <audio_utils/primitives.h>
#include <log/log.h>
+#include <system/audio.h>
#include "EffectBundle.h"
#include "LVM_Private.h"
@@ -76,6 +77,8 @@
struct lvmConfigParams_t {
int samplingFreq = 44100;
int nrChannels = 2;
+ int chMask = AUDIO_CHANNEL_OUT_STEREO;
+ int vcBal = 0;
int fChannels = 2;
bool monoMode = false;
int bassEffectLevel = 0;
@@ -87,9 +90,36 @@
LVM_Mode_en csEnable = LVM_MODE_OFF;
};
+constexpr audio_channel_mask_t lvmConfigChMask[] = {
+ AUDIO_CHANNEL_OUT_MONO,
+ AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_CHANNEL_OUT_2POINT1,
+ AUDIO_CHANNEL_OUT_2POINT0POINT2,
+ AUDIO_CHANNEL_OUT_QUAD,
+ AUDIO_CHANNEL_OUT_QUAD_BACK,
+ AUDIO_CHANNEL_OUT_QUAD_SIDE,
+ AUDIO_CHANNEL_OUT_SURROUND,
+ (1 << 4) - 1,
+ AUDIO_CHANNEL_OUT_2POINT1POINT2,
+ AUDIO_CHANNEL_OUT_3POINT0POINT2,
+ AUDIO_CHANNEL_OUT_PENTA,
+ (1 << 5) - 1,
+ AUDIO_CHANNEL_OUT_3POINT1POINT2,
+ AUDIO_CHANNEL_OUT_5POINT1,
+ AUDIO_CHANNEL_OUT_5POINT1_BACK,
+ AUDIO_CHANNEL_OUT_5POINT1_SIDE,
+ (1 << 6) - 1,
+ AUDIO_CHANNEL_OUT_6POINT1,
+ (1 << 7) - 1,
+ AUDIO_CHANNEL_OUT_5POINT1POINT2,
+ AUDIO_CHANNEL_OUT_7POINT1,
+ (1 << 8) - 1,
+};
+
+
void printUsage() {
printf("\nUsage: ");
- printf("\n <exceutable> -i:<input_file> -o:<out_file> [options]\n");
+ printf("\n <executable> -i:<input_file> -o:<out_file> [options]\n");
printf("\nwhere, \n <inputfile> is the input file name");
printf("\n on which LVM effects are applied");
printf("\n <outputfile> processed output file");
@@ -98,7 +128,34 @@
printf("\n -help (or) -h");
printf("\n Prints this usage information");
printf("\n");
- printf("\n -ch:<process_channels> (1 through 8)\n\n");
+ printf("\n -chMask:<channel_mask>\n");
+ printf("\n 0 - AUDIO_CHANNEL_OUT_MONO");
+ printf("\n 1 - AUDIO_CHANNEL_OUT_STEREO");
+ printf("\n 2 - AUDIO_CHANNEL_OUT_2POINT1");
+ printf("\n 3 - AUDIO_CHANNEL_OUT_2POINT0POINT2");
+ printf("\n 4 - AUDIO_CHANNEL_OUT_QUAD");
+ printf("\n 5 - AUDIO_CHANNEL_OUT_QUAD_BACK");
+ printf("\n 6 - AUDIO_CHANNEL_OUT_QUAD_SIDE");
+ printf("\n 7 - AUDIO_CHANNEL_OUT_SURROUND");
+ printf("\n 8 - canonical channel index mask for 4 ch: (1 << 4) - 1");
+ printf("\n 9 - AUDIO_CHANNEL_OUT_2POINT1POINT2");
+ printf("\n 10 - AUDIO_CHANNEL_OUT_3POINT0POINT2");
+ printf("\n 11 - AUDIO_CHANNEL_OUT_PENTA");
+ printf("\n 12 - canonical channel index mask for 5 ch: (1 << 5) - 1");
+ printf("\n 13 - AUDIO_CHANNEL_OUT_3POINT1POINT2");
+ printf("\n 14 - AUDIO_CHANNEL_OUT_5POINT1");
+ printf("\n 15 - AUDIO_CHANNEL_OUT_5POINT1_BACK");
+ printf("\n 16 - AUDIO_CHANNEL_OUT_5POINT1_SIDE");
+ printf("\n 17 - canonical channel index mask for 6 ch: (1 << 6) - 1");
+ printf("\n 18 - AUDIO_CHANNEL_OUT_6POINT1");
+ printf("\n 19 - canonical channel index mask for 7 ch: (1 << 7) - 1");
+ printf("\n 20 - AUDIO_CHANNEL_OUT_5POINT1POINT2");
+ printf("\n 21 - AUDIO_CHANNEL_OUT_7POINT1");
+ printf("\n 22 - canonical channel index mask for 8 ch: (1 << 8) - 1");
+ printf("\n default 0");
+ printf("\n -vcBal:<Left Right Balance control in dB [-96 to 96 dB]>");
+ printf("\n -ve values reduce Right channel while +ve value reduces Left channel");
+ printf("\n default 0");
printf("\n -fch:<file_channels> (1 through 8)\n\n");
printf("\n -M");
printf("\n Mono mode (force all input audio channels to be identical)");
@@ -298,6 +355,7 @@
params->OperatingMode = LVM_MODE_ON;
params->SampleRate = LVM_FS_44100;
params->SourceFormat = LVM_STEREO;
+ params->ChMask = AUDIO_CHANNEL_OUT_STEREO;
params->SpeakerType = LVM_HEADPHONES;
pContext->pBundledContext->SampleRate = LVM_FS_44100;
@@ -452,13 +510,13 @@
params->OperatingMode = LVM_MODE_ON;
params->SpeakerType = LVM_HEADPHONES;
- const int nrChannels = plvmConfigParams->nrChannels;
- params->NrChannels = nrChannels;
- if (nrChannels == 1) {
+ params->ChMask = plvmConfigParams->chMask;
+ params->NrChannels = plvmConfigParams->nrChannels;
+ if (params->NrChannels == 1) {
params->SourceFormat = LVM_MONO;
- } else if (nrChannels == 2) {
+ } else if (params->NrChannels == 2) {
params->SourceFormat = LVM_STEREO;
- } else if (nrChannels > 2 && nrChannels <= 8) { // FCC_2 FCC_8
+ } else if (params->NrChannels > 2 && params->NrChannels <= 8) { // FCC_2 FCC_8
params->SourceFormat = LVM_MULTICHANNEL;
} else {
return -EINVAL;
@@ -531,7 +589,7 @@
/* Volume Control parameters */
params->VC_EffectLevel = 0;
- params->VC_Balance = 0;
+ params->VC_Balance = plvmConfigParams->vcBal;
/* Treble Enhancement parameters */
params->TE_OperatingMode = plvmConfigParams->trebleEnable;
@@ -667,13 +725,21 @@
return -1;
}
lvmConfigParams.samplingFreq = samplingFreq;
- } else if (!strncmp(argv[i], "-ch:", 4)) {
- const int nrChannels = atoi(argv[i] + 4);
- if (nrChannels > 8 || nrChannels < 1) {
- printf("Error: Unsupported number of channels : %d\n", nrChannels);
+ } else if (!strncmp(argv[i], "-chMask:", 8)) {
+ const int chMaskConfigIdx = atoi(argv[i] + 8);
+ if (chMaskConfigIdx < 0 || (size_t)chMaskConfigIdx >= std::size(lvmConfigChMask)) {
+ ALOGE("\nError: Unsupported Channel Mask : %d\n", chMaskConfigIdx);
return -1;
}
- lvmConfigParams.nrChannels = nrChannels;
+ const audio_channel_mask_t chMask = lvmConfigChMask[chMaskConfigIdx];
+ lvmConfigParams.chMask = chMask;
+ lvmConfigParams.nrChannels = audio_channel_count_from_out_mask(chMask);
+ } else if (!strncmp(argv[i], "-vcBal:", 7)) {
+ const int vcBalance = atoi(argv[i] + 7);
+ if (vcBalance > 96 || vcBalance < -96) {
+ ALOGE("\nError: Unsupported volume balance value: %d\n", vcBalance);
+ }
+ lvmConfigParams.vcBal = vcBalance;
} else if (!strncmp(argv[i], "-fch:", 5)) {
const int fChannels = atoi(argv[i] + 5);
if (fChannels > 8 || fChannels < 1) {
diff --git a/media/libeffects/lvm/tests/snr.cpp b/media/libeffects/lvm/tests/snr.cpp
index 88110c0..885994c 100644
--- a/media/libeffects/lvm/tests/snr.cpp
+++ b/media/libeffects/lvm/tests/snr.cpp
@@ -84,6 +84,7 @@
printf("\nError: missing input/reference files\n");
return -1;
}
+ int ret = EXIT_SUCCESS;
auto sn = pcm_format == 0
? getSignalNoise<short>(finp, fref)
: getSignalNoise<float>(finp, fref);
@@ -92,6 +93,7 @@
// compare the measured snr value with threshold
if (snr < thr) {
printf("%.6f less than threshold %.6f\n", snr, thr);
+ ret = EXIT_FAILURE;
} else {
printf("%.6f\n", snr);
}
@@ -99,5 +101,5 @@
fclose(finp);
fclose(fref);
- return 0;
+ return ret;
}
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 0c6f8de..3a97905 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -1315,6 +1315,7 @@
#ifdef SUPPORT_MC
ActiveParams.NrChannels = NrChannels;
+ ActiveParams.ChMask = pConfig->inputCfg.channels;
#endif
LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index f283569..a354ce1 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -66,8 +66,8 @@
ENABLE_AUDIO_DEVICE_CALLBACK,
GET_ACTIVE_MICROPHONES,
GET_PORT_ID,
- SET_MICROPHONE_DIRECTION,
- SET_MICROPHONE_FIELD_DIMENSION
+ SET_PREFERRED_MICROPHONE_DIRECTION,
+ SET_PREFERRED_MICROPHONE_FIELD_DIMENSION
};
class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -409,21 +409,21 @@
return status;
}
- status_t setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
data.writeInt32(direction);
- status_t status = remote()->transact(SET_MICROPHONE_DIRECTION, data, &reply);
+ status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_DIRECTION, data, &reply);
return status == NO_ERROR ? (status_t)reply.readInt32() : status;
}
- status_t setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ status_t setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
data.writeFloat(zoom);
- status_t status = remote()->transact(SET_MICROPHONE_FIELD_DIMENSION, data, &reply);
+ status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_FIELD_DIMENSION, data, &reply);
return status == NO_ERROR ? (status_t)reply.readInt32() : status;
}
@@ -709,20 +709,20 @@
}
return NO_ERROR;
}
- case SET_MICROPHONE_DIRECTION: {
- ALOGV("SET_MICROPHONE_DIRECTION");
+ case SET_PREFERRED_MICROPHONE_DIRECTION: {
+ ALOGV("SET_PREFERRED_MICROPHONE_DIRECTION");
CHECK_INTERFACE(IMediaRecorder, data, reply);
int direction = data.readInt32();
- status_t status =
- setMicrophoneDirection(static_cast<audio_microphone_direction_t>(direction));
+ status_t status = setPreferredMicrophoneDirection(
+ static_cast<audio_microphone_direction_t>(direction));
reply->writeInt32(status);
return NO_ERROR;
}
- case SET_MICROPHONE_FIELD_DIMENSION: {
+ case SET_PREFERRED_MICROPHONE_FIELD_DIMENSION: {
ALOGV("SET_MICROPHONE_FIELD_DIMENSION");
CHECK_INTERFACE(IMediaRecorder, data, reply);
float zoom = data.readFloat();
- status_t status = setMicrophoneFieldDimension(zoom);
+ status_t status = setPreferredMicrophoneFieldDimension(zoom);
reply->writeInt32(status);
return NO_ERROR;
}
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index ea0547c..c150407 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -65,6 +65,7 @@
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL,
AMEDIAFORMAT_KEY_GRID_COLUMNS,
AMEDIAFORMAT_KEY_GRID_ROWS,
+ AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT,
AMEDIAFORMAT_KEY_HEIGHT,
AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD,
AMEDIAFORMAT_KEY_IS_ADTS,
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 469c5b6..5be78d1 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -392,7 +392,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_BYPASS_MUTE),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_LOW_LATENCY),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_DEEP_BUFFER),
- MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_CAPTURE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_MEDIA_PROJECTION),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_SYSTEM_CAPTURE),
TERMINATOR
};
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index 0b09420..f9c557c 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -73,8 +73,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
- virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) = 0;
};
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index 88282ac..a2dff31 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -72,8 +72,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
- virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) const = 0;
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index 8580437..2dd4b7f 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -264,8 +264,8 @@
status_t getRoutedDeviceId(audio_port_handle_t *deviceId);
status_t enableAudioDeviceCallback(bool enabled);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 6c59a29..4570af9 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -842,14 +842,14 @@
return mMediaRecorder->getActiveMicrophones(activeMicrophones);
}
-status_t MediaRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
- return mMediaRecorder->setMicrophoneDirection(direction);
+status_t MediaRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
+ return mMediaRecorder->setPreferredMicrophoneDirection(direction);
}
-status_t MediaRecorder::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
- return mMediaRecorder->setMicrophoneFieldDimension(zoom);
+status_t MediaRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
+ return mMediaRecorder->setPreferredMicrophoneFieldDimension(zoom);
}
status_t MediaRecorder::getPortId(audio_port_handle_t *portId) const
diff --git a/media/libmedia/xsd/api/current.txt b/media/libmedia/xsd/api/current.txt
index 0924dd9..05e8a49 100644
--- a/media/libmedia/xsd/api/current.txt
+++ b/media/libmedia/xsd/api/current.txt
@@ -45,10 +45,17 @@
ctor public CamcorderProfiles();
method public int getCameraId();
method public java.util.List<media.profiles.EncoderProfile> getEncoderProfile();
+ method public java.util.List<media.profiles.CamcorderProfiles.ImageDecoding> getImageDecoding();
method public java.util.List<media.profiles.CamcorderProfiles.ImageEncoding> getImageEncoding();
method public void setCameraId(int);
}
+ public static class CamcorderProfiles.ImageDecoding {
+ ctor public CamcorderProfiles.ImageDecoding();
+ method public int getMemCap();
+ method public void setMemCap(int);
+ }
+
public static class CamcorderProfiles.ImageEncoding {
ctor public CamcorderProfiles.ImageEncoding();
method public int getQuality();
diff --git a/media/libmedia/xsd/media_profiles.xsd b/media/libmedia/xsd/media_profiles.xsd
index a9687b0..a02252a 100644
--- a/media/libmedia/xsd/media_profiles.xsd
+++ b/media/libmedia/xsd/media_profiles.xsd
@@ -42,6 +42,11 @@
<xs:attribute name="quality" type="xs:int"/>
</xs:complexType>
</xs:element>
+ <xs:element name="ImageDecoding" minOccurs="0" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:attribute name="memCap" type="xs:int"/>
+ </xs:complexType>
+ </xs:element>
</xs:sequence>
<xs:attribute name="cameraId" type="xs:int"/>
</xs:complexType>
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index d6628d9..9f4265b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -538,18 +538,19 @@
return NO_INIT;
}
-status_t MediaRecorderClient::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+status_t MediaRecorderClient::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
if (mRecorder != NULL) {
- return mRecorder->setMicrophoneDirection(direction);
+ return mRecorder->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
-status_t MediaRecorderClient::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t MediaRecorderClient::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
if (mRecorder != NULL) {
- return mRecorder->setMicrophoneFieldDimension(zoom);
+ return mRecorder->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 8da718f..e698819 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -109,8 +109,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- virtual status_t setMicrophoneFieldDimension(float zoom);
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) override;
private:
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 77777b8..63681fa 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -164,9 +164,12 @@
mAnalyticsItem->setInt32(kRecorderVideoIframeInterval, mIFramesIntervalSec);
// TBD mAudioSourceNode = 0;
// TBD mUse64BitFileOffset = false;
- mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
- mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
- mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
+ if (mMovieTimeScale != -1)
+ mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
+ if (mAudioTimeScale != -1)
+ mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
+ if (mVideoTimeScale != -1)
+ mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
// TBD mCameraId = 0;
// TBD mStartTimeOffsetMs = -1;
mAnalyticsItem->setInt32(kRecorderVideoProfile, mVideoEncoderProfile);
@@ -2210,7 +2213,7 @@
}
status_t StagefrightRecorder::getMetrics(Parcel *reply) {
- ALOGD("StagefrightRecorder::getMetrics");
+ ALOGV("StagefrightRecorder::getMetrics");
if (reply == NULL) {
ALOGE("Null pointer argument");
@@ -2274,20 +2277,20 @@
return NO_INIT;
}
-status_t StagefrightRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+status_t StagefrightRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
mSelectedMicDirection = direction;
if (mAudioSourceNode != 0) {
- return mAudioSourceNode->setMicrophoneDirection(direction);
+ return mAudioSourceNode->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
-status_t StagefrightRecorder::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t StagefrightRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
mSelectedMicFieldDimension = zoom;
if (mAudioSourceNode != 0) {
- return mAudioSourceNode->setMicrophoneFieldDimension(zoom);
+ return mAudioSourceNode->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 236b19e..8bf083a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -77,8 +77,8 @@
virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- virtual status_t setMicrophoneFieldDimension(float zoom);
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const override;
private:
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index f00c895..cf1a6f1 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -569,7 +569,6 @@
mFps(-1.0),
mCaptureFps(-1.0),
mCreateInputBuffersSuspended(false),
- mLatency(0),
mTunneled(false),
mDescribeColorAspectsIndex((OMX_INDEXTYPE)0),
mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0),
@@ -4425,12 +4424,13 @@
h264type.eProfile == OMX_VIDEO_AVCProfileHigh) {
h264type.nSliceHeaderSpacing = 0;
h264type.bUseHadamard = OMX_TRUE;
- h264type.nRefFrames = 2;
- h264type.nBFrames = mLatency == 0 ? 1 : std::min(1U, mLatency - 1);
-
- // disable B-frames until we have explicit settings for enabling the feature.
- h264type.nRefFrames = 1;
- h264type.nBFrames = 0;
+ int32_t maxBframes = 0;
+ (void)msg->findInt32(KEY_MAX_B_FRAMES, &maxBframes);
+ h264type.nBFrames = uint32_t(maxBframes);
+ if (mLatency && h264type.nBFrames > *mLatency) {
+ h264type.nBFrames = *mLatency;
+ }
+ h264type.nRefFrames = h264type.nBFrames == 0 ? 1 : 2;
h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
h264type.nAllowedPictureTypes =
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 5f86bd3..5194e03 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -510,18 +510,18 @@
return NO_INIT;
}
-status_t AudioSource::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+status_t AudioSource::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
if (mRecord != 0) {
- return mRecord->setMicrophoneDirection(direction);
+ return mRecord->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
-status_t AudioSource::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t AudioSource::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
if (mRecord != 0) {
- return mRecord->setMicrophoneFieldDimension(zoom);
+ return mRecord->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index dbc287e..3f0bc7d 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -309,7 +309,6 @@
void parseVorbisComment(
AMediaFormat *fileMeta, const char *comment, size_t commentLength) {
// Haptic tag is only kept here as it will only be used in extractor to generate channel mask.
- const char* const haptic = "haptic";
struct {
const char *const mTag;
const char *mKey;
@@ -330,7 +329,7 @@
{ "LYRICIST", AMEDIAFORMAT_KEY_LYRICIST },
{ "METADATA_BLOCK_PICTURE", AMEDIAFORMAT_KEY_ALBUMART },
{ "ANDROID_LOOP", AMEDIAFORMAT_KEY_LOOP },
- { "ANDROID_HAPTIC", haptic },
+ { "ANDROID_HAPTIC", AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT },
};
for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
@@ -346,12 +345,12 @@
if (!strcasecmp(&comment[tagLen + 1], "true")) {
AMediaFormat_setInt32(fileMeta, AMEDIAFORMAT_KEY_LOOP, 1);
}
- } else if (kMap[j].mKey == haptic) {
+ } else if (kMap[j].mKey == AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT) {
char *end;
errno = 0;
const int hapticChannelCount = strtol(&comment[tagLen + 1], &end, 10);
if (errno == 0) {
- AMediaFormat_setInt32(fileMeta, haptic, hapticChannelCount);
+ AMediaFormat_setInt32(fileMeta, kMap[j].mKey, hapticChannelCount);
} else {
ALOGE("Error(%d) when parsing haptic channel count", errno);
}
diff --git a/media/libstagefright/RemoteMediaExtractor.cpp b/media/libstagefright/RemoteMediaExtractor.cpp
index b0ce688..29c3a35 100644
--- a/media/libstagefright/RemoteMediaExtractor.cpp
+++ b/media/libstagefright/RemoteMediaExtractor.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "RemoteMediaExtractor"
#include <utils/Log.h>
+#include <binder/IPCThreadState.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/MediaAnalyticsItem.h>
#include <media/MediaSource.h>
@@ -51,6 +52,11 @@
if (MEDIA_LOG) {
mAnalyticsItem = MediaAnalyticsItem::create(kKeyExtractor);
+ // we're in the extractor service, we want to attribute to the app
+ // that invoked us.
+ int uid = IPCThreadState::self()->getCallingUid();
+ mAnalyticsItem->setUid(uid);
+
// track the container format (mpeg, aac, wvm, etc)
size_t ntracks = extractor->countTracks();
mAnalyticsItem->setCString(kExtractorFormat, extractor->name());
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 2aa9ed8..c7b2719 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -967,6 +967,11 @@
if (meta->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
msg->setInt32("pcm-encoding", pcmEncoding);
}
+
+ int32_t hapticChannelCount;
+ if (meta->findInt32(kKeyHapticChannelCount, &hapticChannelCount)) {
+ msg->setInt32("haptic-channel-count", hapticChannelCount);
+ }
}
int32_t maxInputSize;
@@ -1708,6 +1713,11 @@
if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
meta->setInt32(kKeyPcmEncoding, pcmEncoding);
}
+
+ int32_t hapticChannelCount;
+ if (msg->findInt32("haptic-channel-count", &hapticChannelCount)) {
+ meta->setInt32(kKeyHapticChannelCount, hapticChannelCount);
+ }
}
int32_t maxInputSize;
diff --git a/media/libstagefright/codecs/aacdec/Android.bp b/media/libstagefright/codecs/aacdec/Android.bp
index 25628a2..e0bb5cd 100644
--- a/media/libstagefright/codecs/aacdec/Android.bp
+++ b/media/libstagefright/codecs/aacdec/Android.bp
@@ -29,12 +29,10 @@
static_libs: ["libFraunhoferAAC"],
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
"libcutils",
- "liblog",
],
compile_multilib: "32",
}
diff --git a/media/libstagefright/codecs/aacenc/Android.bp b/media/libstagefright/codecs/aacenc/Android.bp
index ec1151b..0d677fe 100644
--- a/media/libstagefright/codecs/aacenc/Android.bp
+++ b/media/libstagefright/codecs/aacenc/Android.bp
@@ -26,11 +26,7 @@
static_libs: ["libFraunhoferAAC"],
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
+
compile_multilib: "32",
}
diff --git a/media/libstagefright/codecs/amrnb/common/src/lsp.cpp b/media/libstagefright/codecs/amrnb/common/src/lsp.cpp
index 0e3f772..81d9cde 100644
--- a/media/libstagefright/codecs/amrnb/common/src/lsp.cpp
+++ b/media/libstagefright/codecs/amrnb/common/src/lsp.cpp
@@ -173,7 +173,7 @@
*st = NULL;
/* allocate memory */
- if ((s = (lspState *) malloc(sizeof(lspState))) == NULL)
+ if ((s = (lspState *) calloc(sizeof(lspState), 1)) == NULL)
{
/* fprintf(stderr, "lsp_init: can not malloc state structure\n"); */
return -1;
@@ -182,11 +182,13 @@
/* Initialize quantization state */
if (0 != Q_plsf_init(&s->qSt))
{
+ lsp_exit(&s);
return -1;
}
if (0 != lsp_reset(s))
{
+ lsp_exit(&s);
return -1;
}
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.bp b/media/libstagefright/codecs/amrnb/dec/Android.bp
index 880f161..f3b272b 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.bp
+++ b/media/libstagefright/codecs/amrnb/dec/Android.bp
@@ -101,11 +101,9 @@
"libstagefright_amrwbdec",
],
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
"libstagefright_amrnb_common",
],
compile_multilib: "32",
diff --git a/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp b/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp
index 2989b74..49cafff 100644
--- a/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/src/sp_dec.cpp
@@ -268,13 +268,7 @@
if (Decoder_amr_init(&s->decoder_amrState)
|| Post_Process_reset(&s->postHP_state))
{
- Speech_Decode_FrameState *tmp = s;
- /*
- * dereferencing type-punned pointer avoid
- * breaking strict-aliasing rules
- */
- void** tempVoid = (void**) tmp;
- GSMDecodeFrameExit(tempVoid);
+ free(s);
return (-1);
}
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.bp b/media/libstagefright/codecs/amrnb/enc/Android.bp
index 19fd4a8..1c8b511 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.bp
+++ b/media/libstagefright/codecs/amrnb/enc/Android.bp
@@ -110,11 +110,9 @@
static_libs: ["libstagefright_amrnbenc"],
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
"libstagefright_amrnb_common",
],
compile_multilib: "32",
diff --git a/media/libstagefright/codecs/amrwbenc/Android.bp b/media/libstagefright/codecs/amrwbenc/Android.bp
index b9d45c1..8327500 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/Android.bp
@@ -167,11 +167,9 @@
static_libs: ["libstagefright_amrwbenc"],
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
"libstagefright_enc_common",
],
compile_multilib: "32",
diff --git a/media/libstagefright/codecs/avcdec/Android.bp b/media/libstagefright/codecs/avcdec/Android.bp
index 8a34845..567bcca 100644
--- a/media/libstagefright/codecs/avcdec/Android.bp
+++ b/media/libstagefright/codecs/avcdec/Android.bp
@@ -22,12 +22,7 @@
"frameworks/native/include/media/openmax",
],
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
sanitize: {
misc_undefined: [
diff --git a/media/libstagefright/codecs/avcenc/Android.bp b/media/libstagefright/codecs/avcenc/Android.bp
index 6371828..0cd39e1 100644
--- a/media/libstagefright/codecs/avcenc/Android.bp
+++ b/media/libstagefright/codecs/avcenc/Android.bp
@@ -16,12 +16,7 @@
"frameworks/native/include/media/openmax",
],
- shared_libs: [
- "libstagefright_foundation",
- "libstagefright_omx",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
sanitize: {
misc_undefined: [
diff --git a/media/libstagefright/codecs/flac/dec/Android.bp b/media/libstagefright/codecs/flac/dec/Android.bp
index 3d4a44f..18a3f6b 100644
--- a/media/libstagefright/codecs/flac/dec/Android.bp
+++ b/media/libstagefright/codecs/flac/dec/Android.bp
@@ -28,12 +28,10 @@
cfi: true,
},
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
- "liblog",
"libstagefright_flacdec",
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
],
compile_multilib: "32",
}
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index b32ab08..4149ccd 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -19,13 +19,7 @@
],
cfi: true,
},
-
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
header_libs: ["libbase_headers"],
static_libs: [
diff --git a/media/libstagefright/codecs/g711/dec/Android.bp b/media/libstagefright/codecs/g711/dec/Android.bp
index 7097688..c273179 100644
--- a/media/libstagefright/codecs/g711/dec/Android.bp
+++ b/media/libstagefright/codecs/g711/dec/Android.bp
@@ -12,12 +12,7 @@
"frameworks/native/include/media/openmax",
],
- shared_libs: [
- "libstagefright_foundation",
- "libstagefright_omx",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
cflags: ["-Werror"],
diff --git a/media/libstagefright/codecs/gsm/dec/Android.bp b/media/libstagefright/codecs/gsm/dec/Android.bp
index a973f70..3c5ebfe 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.bp
+++ b/media/libstagefright/codecs/gsm/dec/Android.bp
@@ -25,12 +25,7 @@
cfi: true,
},
- shared_libs: [
- "libstagefright_foundation",
- "libstagefright_omx",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
static_libs: ["libgsm"],
compile_multilib: "32",
diff --git a/media/libstagefright/codecs/hevcdec/Android.bp b/media/libstagefright/codecs/hevcdec/Android.bp
index 60fc446..cc91d53 100644
--- a/media/libstagefright/codecs/hevcdec/Android.bp
+++ b/media/libstagefright/codecs/hevcdec/Android.bp
@@ -30,12 +30,7 @@
cfi: true,
},
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
// We need this because the current asm generates the following link error:
// requires unsupported dynamic reloc R_ARM_REL32; recompile with -fPIC
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.bp b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
index 41141b1..0523143 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
@@ -91,12 +91,7 @@
static_libs: ["libstagefright_m4vh263dec"],
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
sanitize: {
misc_undefined: [
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index a8fcdd1..9893c6f 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -24,7 +24,6 @@
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/IOMX.h>
#include "mp4dec_api.h"
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.bp b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
index d4f7d50..d38f4b1 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
@@ -77,12 +77,7 @@
static_libs: ["libstagefright_m4vh263enc"],
- shared_libs: [
- "libstagefright_foundation",
- "libstagefright_omx",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
sanitize: {
misc_undefined: [
diff --git a/media/libstagefright/codecs/mp3dec/Android.bp b/media/libstagefright/codecs/mp3dec/Android.bp
index 2154f84..9173ed6 100644
--- a/media/libstagefright/codecs/mp3dec/Android.bp
+++ b/media/libstagefright/codecs/mp3dec/Android.bp
@@ -105,12 +105,7 @@
cfi: true,
},
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
static_libs: ["libstagefright_mp3dec"],
compile_multilib: "32",
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.bp b/media/libstagefright/codecs/mpeg2dec/Android.bp
index c655544..26e786e 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.bp
+++ b/media/libstagefright/codecs/mpeg2dec/Android.bp
@@ -20,12 +20,7 @@
"frameworks/native/include/media/openmax",
],
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/libstagefright/codecs/on2/dec/Android.bp b/media/libstagefright/codecs/on2/dec/Android.bp
index 174f183..abd21d7 100644
--- a/media/libstagefright/codecs/on2/dec/Android.bp
+++ b/media/libstagefright/codecs/on2/dec/Android.bp
@@ -14,12 +14,7 @@
static_libs: ["libvpx"],
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
cflags: ["-Werror"],
diff --git a/media/libstagefright/codecs/on2/enc/Android.bp b/media/libstagefright/codecs/on2/enc/Android.bp
index 891a771..ea46bad 100644
--- a/media/libstagefright/codecs/on2/enc/Android.bp
+++ b/media/libstagefright/codecs/on2/enc/Android.bp
@@ -30,11 +30,7 @@
static_libs: ["libvpx"],
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
+
compile_multilib: "32",
}
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
index b4904bf..c5c2abf 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
@@ -23,8 +23,6 @@
#include <OMX_VideoExt.h>
#include <OMX_IndexExt.h>
-#include <hardware/gralloc.h>
-
#include "vpx/vpx_encoder.h"
#include "vpx/vpx_codec.h"
#include "vpx/vp8cx.h"
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
index 85df69a..308a9ac 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
@@ -23,8 +23,6 @@
#include <OMX_VideoExt.h>
#include <OMX_IndexExt.h>
-#include <hardware/gralloc.h>
-
#include "vpx/vpx_encoder.h"
#include "vpx/vpx_codec.h"
#include "vpx/vp8cx.h"
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 263d134..7208d69 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -23,8 +23,6 @@
#include <OMX_VideoExt.h>
#include <OMX_IndexExt.h>
-#include <hardware/gralloc.h>
-
#include "vpx/vpx_encoder.h"
#include "vpx/vpx_codec.h"
#include "vpx/vp8cx.h"
diff --git a/media/libstagefright/codecs/opus/dec/Android.bp b/media/libstagefright/codecs/opus/dec/Android.bp
index afe459d..bfcae07 100644
--- a/media/libstagefright/codecs/opus/dec/Android.bp
+++ b/media/libstagefright/codecs/opus/dec/Android.bp
@@ -12,12 +12,10 @@
"frameworks/native/include/media/openmax",
],
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
"libopus",
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
],
cflags: ["-Werror"],
diff --git a/media/libstagefright/codecs/raw/Android.bp b/media/libstagefright/codecs/raw/Android.bp
index f822445..1c23bad 100644
--- a/media/libstagefright/codecs/raw/Android.bp
+++ b/media/libstagefright/codecs/raw/Android.bp
@@ -24,11 +24,7 @@
cfi: true,
},
- shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
- ],
+ defaults: ["omx_soft_libs"],
+
compile_multilib: "32",
}
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.bp b/media/libstagefright/codecs/vorbis/dec/Android.bp
index a9265cb..2d1a922 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.bp
+++ b/media/libstagefright/codecs/vorbis/dec/Android.bp
@@ -12,12 +12,10 @@
"frameworks/native/include/media/openmax",
],
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
"libvorbisidec",
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
- "liblog",
],
cflags: ["-Werror"],
diff --git a/media/libstagefright/codecs/xaacdec/Android.bp b/media/libstagefright/codecs/xaacdec/Android.bp
index 7392f1e..e49eb8f 100644
--- a/media/libstagefright/codecs/xaacdec/Android.bp
+++ b/media/libstagefright/codecs/xaacdec/Android.bp
@@ -24,12 +24,10 @@
static_libs: ["libxaacdec"],
+ defaults: ["omx_soft_libs"],
+
shared_libs: [
- "libstagefright_omx",
- "libstagefright_foundation",
- "libutils",
"libcutils",
- "liblog",
],
compile_multilib: "32",
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index e20174f..f785bfa 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -115,6 +115,9 @@
<Limit name="block-count" range="1-4096" /> <!-- max 512x512 -->
<Limit name="blocks-per-second" range="1-122880" />
<Limit name="bitrate" range="1-10000000" />
+ <Limit name="complexity" range="0-10" default="0" />
+ <Limit name="quality" range="0-100" default="80" />
+ <Feature name="bitrate-modes" value="VBR,CBR,CQ" />
</MediaCodec>
<MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
<Alias name="OMX.google.mpeg4.encoder" />
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index d153598..c62c2cd 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -2125,7 +2125,10 @@
size_t offset = 0;
while (offset < buffer->size()) {
const uint8_t *adtsHeader = buffer->data() + offset;
- CHECK_LT(offset + 5, buffer->size());
+ if (buffer->size() <= offset+5) {
+ ALOGV("buffer does not contain a complete header");
+ return ERROR_MALFORMED;
+ }
// non-const pointer for decryption if needed
uint8_t *adtsFrame = buffer->data() + offset;
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 9d46d2d..784fd36 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -286,7 +286,7 @@
double mFps;
double mCaptureFps;
bool mCreateInputBuffersSuspended;
- uint32_t mLatency;
+ std::optional<uint32_t> mLatency;
bool mTunneled;
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 18e5f10..af04dad 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -70,8 +70,8 @@
status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 2dca5c3..8b6944b 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -772,6 +772,7 @@
constexpr char KEY_LANGUAGE[] = "language";
constexpr char KEY_LATENCY[] = "latency";
constexpr char KEY_LEVEL[] = "level";
+constexpr char KEY_MAX_B_FRAMES[] = "max-bframes";
constexpr char KEY_MAX_BIT_RATE[] = "max-bitrate";
constexpr char KEY_MAX_FPS_TO_ENCODER[] = "max-fps-to-encoder";
constexpr char KEY_MAX_HEIGHT[] = "max-height";
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 75fd0d9..8dc2dd5 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -62,8 +62,6 @@
kKeyAV1C = 'av1c', // raw data
kKeyThumbnailHVCC = 'thvc', // raw data
kKeyD263 = 'd263', // raw data
- kKeyVorbisInfo = 'vinf', // raw data
- kKeyVorbisBooks = 'vboo', // raw data
kKeyOpusHeader = 'ohdr', // raw data
kKeyOpusCodecDelay = 'ocod', // uint64_t (codec delay in ns)
kKeyOpusSeekPreRoll = 'ospr', // uint64_t (seek preroll in ns)
@@ -238,6 +236,8 @@
kKeyOpaqueCSD0 = 'csd0',
kKeyOpaqueCSD1 = 'csd1',
kKeyOpaqueCSD2 = 'csd2',
+
+ kKeyHapticChannelCount = 'hapC',
};
enum {
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
index e32f676..7d446ab 100644
--- a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
+++ b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
@@ -149,6 +149,11 @@
}
// ADTS header is included in the size
+ if (size < adtsHdrSize) {
+ ALOGV("processAAC: size (%zu) < adtsHdrSize (%zu)", size, adtsHdrSize);
+ android_errorWriteLog(0x534e4554, "128433933");
+ return;
+ }
size_t offset = adtsHdrSize;
size_t remainingBytes = size - adtsHdrSize;
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 4383004..b8f9aea 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -11,6 +11,8 @@
"OMXNodeInstance.cpp",
"OMXUtils.cpp",
"OmxGraphicBufferSource.cpp",
+ //TODO: remove the soft component code here and use
+ //libstagefright_omx_soft, once partner build is fixed
"SimpleSoftOMXComponent.cpp",
"SoftOMXComponent.cpp",
"SoftOMXPlugin.cpp",
@@ -56,6 +58,7 @@
"libvndksupport",
"android.hardware.media.omx@1.0",
"android.hardware.graphics.bufferqueue@1.0",
+ //"libstagefright_omx_soft",
],
export_shared_lib_headers: [
@@ -81,6 +84,64 @@
},
}
+cc_defaults {
+ name: "omx_soft_libs",
+ shared_libs: [
+ "libutils",
+ "liblog",
+ "libstagefright_foundation",
+ "libstagefright_omx_soft",
+ ],
+}
+
+cc_library_shared {
+ name: "libstagefright_omx_soft",
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
+
+ srcs: [
+ "SimpleSoftOMXComponent.cpp",
+ "SoftOMXComponent.cpp",
+ "SoftOMXPlugin.cpp",
+ "SoftVideoDecoderOMXComponent.cpp",
+ "SoftVideoEncoderOMXComponent.cpp",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ shared_libs: [
+ "libutils",
+ "liblog",
+ "libui",
+ "libstagefright_foundation",
+ ],
+
+ export_shared_lib_headers: [
+ "libstagefright_foundation",
+ "libutils",
+ "liblog",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-Wno-unused-parameter",
+ "-Wno-documentation",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "signed-integer-overflow",
+ "unsigned-integer-overflow",
+ ],
+ cfi: true,
+ },
+}
+
cc_library_shared {
name: "libstagefright_omx_utils",
vendor_available: true,
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index 2fbbb44..d75acda 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -559,7 +559,7 @@
if (nativeMeta.nFenceFd >= 0) {
sp<Fence> fence = new Fence(nativeMeta.nFenceFd);
nativeMeta.nFenceFd = -1;
- status_t err = fence->wait(IOMX::kFenceTimeoutMs);
+ status_t err = fence->wait(kFenceTimeoutMs);
if (err != OK) {
ALOGE("Timed out waiting on input fence");
return NULL;
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
index 3ab6f88..79f0c77 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftOMXComponent.h
@@ -21,12 +21,15 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AString.h>
#include <utils/RefBase.h>
-
+#include <utils/Log.h>
#include <OMX_Component.h>
namespace android {
struct SoftOMXComponent : public RefBase {
+ enum {
+ kFenceTimeoutMs = 1000
+ };
SoftOMXComponent(
const char *name,
const OMX_CALLBACKTYPE *callbacks,
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
index 3b381ce..d7c1658 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
@@ -23,7 +23,10 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AHandlerReflector.h>
#include <media/stagefright/foundation/ColorUtils.h>
-#include <media/IOMX.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/openmax/OMX_Video.h>
+#include <media/openmax/OMX_VideoExt.h>
+
#include <media/hardware/HardwareAPI.h>
#include <utils/RefBase.h>
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
index 2d6f31b..9cb72dd 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
@@ -18,7 +18,9 @@
#define SOFT_VIDEO_ENCODER_OMX_COMPONENT_H_
-#include <media/IOMX.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/openmax/OMX_Video.h>
+#include <media/openmax/OMX_VideoExt.h>
#include "SimpleSoftOMXComponent.h"
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index b0a303e..26e0884 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -177,8 +177,8 @@
const void *headerData3;
size_t headerSize1, headerSize2 = sizeof(headerData2), headerSize3;
- if (!md->findData(kKeyVorbisInfo, &type, &headerData1, &headerSize1)
- || !md->findData(kKeyVorbisBooks, &type, &headerData3, &headerSize3)) {
+ if (!md->findData(kKeyOpaqueCSD0, &type, &headerData1, &headerSize1)
+ || !md->findData(kKeyOpaqueCSD1, &type, &headerData3, &headerSize3)) {
ALOGE("Missing header format keys for vorbis track");
md->dumpToLog();
return NULL;
diff --git a/media/libstagefright/xmlparser/api/current.txt b/media/libstagefright/xmlparser/api/current.txt
index f5245c1..5443f2c 100644
--- a/media/libstagefright/xmlparser/api/current.txt
+++ b/media/libstagefright/xmlparser/api/current.txt
@@ -1,6 +1,12 @@
// Signature format: 2.0
package media.codecs {
+ public class Alias {
+ ctor public Alias();
+ method public String getName();
+ method public void setName(String);
+ }
+
public class Decoders {
ctor public Decoders();
method public java.util.List<media.codecs.MediaCodec> getMediaCodec();
@@ -23,6 +29,23 @@
method public void setValue(String);
}
+ public class Include {
+ ctor public Include();
+ method public String getHref();
+ method public void setHref(String);
+ }
+
+ public class Included {
+ ctor public Included();
+ method public media.codecs.Decoders getDecoders_optional();
+ method public media.codecs.Encoders getEncoders_optional();
+ method public java.util.List<media.codecs.Include> getInclude_optional();
+ method public media.codecs.Settings getSettings_optional();
+ method public void setDecoders_optional(media.codecs.Decoders);
+ method public void setEncoders_optional(media.codecs.Encoders);
+ method public void setSettings_optional(media.codecs.Settings);
+ }
+
public class Limit {
ctor public Limit();
method public String getIn();
@@ -47,12 +70,13 @@
public class MediaCodec {
ctor public MediaCodec();
- method public java.util.List<media.codecs.Feature> getFeature();
- method public java.util.List<media.codecs.Limit> getLimit();
+ method public java.util.List<media.codecs.Alias> getAlias_optional();
+ method public java.util.List<media.codecs.Feature> getFeature_optional();
+ method public java.util.List<media.codecs.Limit> getLimit_optional();
method public String getName();
- method public java.util.List<media.codecs.Quirk> getQuirk();
- method public java.util.List<media.codecs.Type> getType();
+ method public java.util.List<media.codecs.Quirk> getQuirk_optional();
method public String getType();
+ method public java.util.List<media.codecs.Type> getType_optional();
method public String getUpdate();
method public void setName(String);
method public void setType(String);
@@ -61,9 +85,13 @@
public class MediaCodecs {
ctor public MediaCodecs();
- method public java.util.List<media.codecs.Decoders> getDecoders();
- method public java.util.List<media.codecs.Encoders> getEncoders();
- method public java.util.List<media.codecs.Settings> getSettings();
+ method public media.codecs.Decoders getDecoders_optional();
+ method public media.codecs.Encoders getEncoders_optional();
+ method public java.util.List<media.codecs.Include> getInclude_optional();
+ method public media.codecs.Settings getSettings_optional();
+ method public void setDecoders_optional(media.codecs.Decoders);
+ method public void setEncoders_optional(media.codecs.Encoders);
+ method public void setSettings_optional(media.codecs.Settings);
}
public class Quirk {
@@ -89,6 +117,7 @@
public class Type {
ctor public Type();
+ method public java.util.List<media.codecs.Alias> getAlias();
method public java.util.List<media.codecs.Feature> getFeature();
method public java.util.List<media.codecs.Limit> getLimit();
method public String getName();
@@ -99,7 +128,8 @@
public class XmlParser {
ctor public XmlParser();
- method public static media.codecs.MediaCodecs read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static media.codecs.Included readIncluded(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static media.codecs.MediaCodecs readMediaCodecs(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
}
diff --git a/media/libstagefright/xmlparser/media_codecs.xsd b/media/libstagefright/xmlparser/media_codecs.xsd
index 4faba87..77193a2 100644
--- a/media/libstagefright/xmlparser/media_codecs.xsd
+++ b/media/libstagefright/xmlparser/media_codecs.xsd
@@ -20,11 +20,22 @@
xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="MediaCodecs">
<xs:complexType>
- <xs:sequence>
- <xs:element name="Decoders" type="Decoders" maxOccurs="unbounded"/>
- <xs:element name="Encoders" type="Encoders" maxOccurs="unbounded"/>
- <xs:element name="Settings" type="Settings" maxOccurs="unbounded"/>
- </xs:sequence>
+ <xs:choice minOccurs="0" maxOccurs="unbounded">
+ <xs:element name="Include" type="Include" maxOccurs="unbounded"/>
+ <xs:element name="Settings" type="Settings"/>
+ <xs:element name="Decoders" type="Decoders"/>
+ <xs:element name="Encoders" type="Encoders"/>
+ </xs:choice>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="Included">
+ <xs:complexType>
+ <xs:choice minOccurs="0" maxOccurs="unbounded">
+ <xs:element name="Include" type="Include" maxOccurs="unbounded"/>
+ <xs:element name="Settings" type="Settings"/>
+ <xs:element name="Decoders" type="Decoders"/>
+ <xs:element name="Encoders" type="Encoders"/>
+ </xs:choice>
</xs:complexType>
</xs:element>
<xs:complexType name="Decoders">
@@ -43,12 +54,13 @@
</xs:sequence>
</xs:complexType>
<xs:complexType name="MediaCodec">
- <xs:sequence>
- <xs:element name="Quirk" type="Quirk" maxOccurs="unbounded"/>
- <xs:element name="Type" type="Type" maxOccurs="unbounded"/>
- <xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
- <xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
- </xs:sequence>
+ <xs:choice minOccurs="0" maxOccurs="unbounded">
+ <xs:element name="Quirk" type="Quirk" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Type" type="Type" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
+ </xs:choice>
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="type" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
@@ -58,12 +70,16 @@
</xs:complexType>
<xs:complexType name="Type">
<xs:sequence>
- <xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
- <xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
+ <xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
</xs:complexType>
+ <xs:complexType name="Alias">
+ <xs:attribute name="name" type="xs:string"/>
+ </xs:complexType>
<xs:complexType name="Limit">
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="default" type="xs:string"/>
@@ -86,4 +102,7 @@
<xs:attribute name="value" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
</xs:complexType>
+ <xs:complexType name="Include">
+ <xs:attribute name="href" type="xs:string"/>
+ </xs:complexType>
</xs:schema>
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index f4cc704..a4f5730 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -73,6 +73,7 @@
shared_libs: [
"android.hardware.graphics.bufferqueue@1.0",
"android.hidl.token@1.0-utils",
+ "libandroid_runtime_lazy",
"libbinder",
"libmedia",
"libmedia_omx",
@@ -93,12 +94,6 @@
"libmediandk_utils",
],
- required: [
- // libmediandk may be used by Java and non-Java things. When lower-level things use it,
- // they shouldn't have to take on the cost of loading libandroid_runtime.
- "libandroid_runtime",
- ],
-
export_include_dirs: ["include"],
export_shared_lib_headers: [
diff --git a/media/ndk/NdkMediaDataSource.cpp b/media/ndk/NdkMediaDataSource.cpp
index 0891f2a..7979c2f 100644
--- a/media/ndk/NdkMediaDataSource.cpp
+++ b/media/ndk/NdkMediaDataSource.cpp
@@ -23,7 +23,8 @@
#include <jni.h>
#include <unistd.h>
-#include <binder/IBinder.h>
+#include <android_runtime/AndroidRuntime.h>
+#include <android_util_Binder.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <utils/StrongPointer.h>
@@ -39,67 +40,9 @@
#include "../../libstagefright/include/NuCachedSource2.h"
#include "NdkMediaDataSourceCallbacksPriv.h"
-#include <mutex> // std::call_once,once_flag
-#include <dlfcn.h> // dlopen
using namespace android;
-// load libandroid_runtime.so lazily.
-// A vendor process may use libmediandk but should not depend on libandroid_runtime.
-// TODO(jooyung): remove duplicate (b/125550121)
-// frameworks/native/libs/binder/ndk/ibinder_jni.cpp
-namespace {
-
-typedef JNIEnv* (*getJNIEnv_t)();
-typedef sp<IBinder> (*ibinderForJavaObject_t)(JNIEnv* env, jobject obj);
-
-getJNIEnv_t getJNIEnv_;
-ibinderForJavaObject_t ibinderForJavaObject_;
-
-std::once_flag mLoadFlag;
-
-void load() {
- std::call_once(mLoadFlag, []() {
- void* handle = dlopen("libandroid_runtime.so", RTLD_LAZY);
- if (handle == nullptr) {
- ALOGE("Could not open libandroid_runtime.");
- return;
- }
-
- getJNIEnv_ = reinterpret_cast<getJNIEnv_t>(
- dlsym(handle, "_ZN7android14AndroidRuntime9getJNIEnvEv"));
- if (getJNIEnv_ == nullptr) {
- ALOGE("Could not find AndroidRuntime::getJNIEnv.");
- // no return
- }
-
- ibinderForJavaObject_ = reinterpret_cast<ibinderForJavaObject_t>(
- dlsym(handle, "_ZN7android20ibinderForJavaObjectEP7_JNIEnvP8_jobject"));
- if (ibinderForJavaObject_ == nullptr) {
- ALOGE("Could not find ibinderForJavaObject.");
- // no return
- }
- });
-}
-
-JNIEnv* getJNIEnv() {
- load();
- if (getJNIEnv_ == nullptr) {
- return nullptr;
- }
- return (getJNIEnv_)();
-}
-
-sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj) {
- load();
- if (ibinderForJavaObject_ == nullptr) {
- return nullptr;
- }
- return (ibinderForJavaObject_)(env, obj);
-}
-
-} // namespace
-
struct AMediaDataSource {
void *userdata;
AMediaDataSourceReadAt readAt;
@@ -181,14 +124,9 @@
if (obj == NULL) {
return NULL;
}
- sp<IBinder> binder;
switch (version) {
case 1:
- binder = ibinderForJavaObject(env, obj);
- if (binder == NULL) {
- return NULL;
- }
- return interface_cast<IMediaHTTPService>(binder);
+ return interface_cast<IMediaHTTPService>(ibinderForJavaObject(env, obj));
case 2:
return new JMedia2HTTPService(env, obj);
default:
@@ -241,7 +179,7 @@
switch (version) {
case 1:
- env = getJNIEnv();
+ env = AndroidRuntime::getJNIEnv();
clazz = "android/media/MediaHTTPService";
method = "createHttpServiceBinderIfNecessary";
signature = "(Ljava/lang/String;)Landroid/os/IBinder;";
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ed88cf3..51138c8 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -324,6 +324,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_GENRE = "genre";
EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLUMNS = "grid-cols";
EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT = "haptic-channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HDR10_PLUS_INFO = "hdr10-plus-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 259481d..fd43f36 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -209,6 +209,7 @@
extern const char* AMEDIAFORMAT_KEY_EXIF_SIZE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_FRAME_COUNT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_GENRE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_ICC_PROFILE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 4725e9e..f666ad0 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -89,6 +89,7 @@
AMEDIAFORMAT_KEY_GENRE; # var introduced=29
AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+ AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT; # var introduced=29
AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
AMEDIAFORMAT_KEY_ICC_PROFILE; # var introduced=29
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 2fb24f5..cb681e0 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -130,6 +130,14 @@
return ok;
}
+bool captureMediaOutputAllowed(pid_t pid, uid_t uid) {
+ if (isAudioServerOrRootUid(uid)) return true;
+ static const String16 sCaptureMediaOutput("android.permission.CAPTURE_MEDIA_OUTPUT");
+ bool ok = PermissionCache::checkPermission(sCaptureMediaOutput, pid, uid);
+ if (!ok) ALOGE("Request requires android.permission.CAPTURE_MEDIA_OUTPUT");
+ return ok;
+}
+
bool captureHotwordAllowed(pid_t pid, uid_t uid) {
// CAPTURE_AUDIO_HOTWORD permission implies RECORD_AUDIO permission
bool ok = recordingAllowed(String16(""), pid, uid);
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 94370ee..9377ff3 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -74,6 +74,7 @@
bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
void finishRecording(const String16& opPackageName, uid_t uid);
bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
+bool captureMediaOutputAllowed(pid_t pid, uid_t uid);
bool captureHotwordAllowed(pid_t pid, uid_t uid);
bool settingsAllowed();
bool modifyAudioRoutingAllowed();
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index b8f88cf..43260c2 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -3297,7 +3297,8 @@
// output threads.
// If output is 0 here, sessionId is neither SESSION_OUTPUT_STAGE nor SESSION_OUTPUT_MIX
// because of code checking output when entering the function.
- // Note: io is never 0 when creating an effect on an input
+ // Note: io is never AUDIO_IO_HANDLE_NONE when creating an effect on an input by APM.
+ // An AudioEffect created from the Java API will have io as AUDIO_IO_HANDLE_NONE.
if (io == AUDIO_IO_HANDLE_NONE) {
// look for the thread where the specified audio session is present
io = findIoHandleBySessionId_l(sessionId, mPlaybackThreads);
@@ -3307,6 +3308,25 @@
if (io == AUDIO_IO_HANDLE_NONE) {
io = findIoHandleBySessionId_l(sessionId, mMmapThreads);
}
+
+ // If you wish to create a Record preprocessing AudioEffect in Java,
+ // you MUST create an AudioRecord first and keep it alive so it is picked up above.
+ // Otherwise it will fail when created on a Playback thread by legacy
+ // handling below. Ditto with Mmap, the associated Mmap track must be created
+ // before creating the AudioEffect or the io handle must be specified.
+ //
+ // Detect if the effect is created after an AudioRecord is destroyed.
+ if (getOrphanEffectChain_l(sessionId).get() != nullptr) {
+ ALOGE("%s: effect %s with no specified io handle is denied because the AudioRecord"
+ " for session %d no longer exists",
+ __func__, desc.name, sessionId);
+ lStatus = PERMISSION_DENIED;
+ goto Exit;
+ }
+
+ // Legacy handling of creating an effect on an expired or made-up
+ // session id. We think that it is a Playback effect.
+ //
// If no output thread contains the requested session ID, default to
// first output. The effect chain will be moved to the correct output
// thread when a track with the same session ID is created
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index ec5dfb1..8ac3366 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -615,9 +615,9 @@
virtual binder::Status stop();
virtual binder::Status getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
- virtual binder::Status setMicrophoneDirection(
+ virtual binder::Status setPreferredMicrophoneDirection(
int /*audio_microphone_direction_t*/ direction);
- virtual binder::Status setMicrophoneFieldDimension(float zoom);
+ virtual binder::Status setPreferredMicrophoneFieldDimension(float zoom);
private:
const sp<RecordThread::RecordTrack> mRecordTrack;
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index ab4af33..ec1f86c 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -71,8 +71,8 @@
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
static bool checkServerLatencySupported(
audio_format_t format, audio_input_flags_t flags) {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3ecb37d..984d9fe 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -7245,7 +7245,7 @@
} else {
// FIXME could do a partial drop of framesOut
if (activeTrack->mFramesToDrop > 0) {
- activeTrack->mFramesToDrop -= framesOut;
+ activeTrack->mFramesToDrop -= (ssize_t)framesOut;
if (activeTrack->mFramesToDrop <= 0) {
activeTrack->clearSyncStartEvent();
}
@@ -7718,18 +7718,19 @@
return status;
}
-status_t AudioFlinger::RecordThread::setMicrophoneDirection(audio_microphone_direction_t direction)
+status_t AudioFlinger::RecordThread::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction)
{
- ALOGV("setMicrophoneDirection(%d)", direction);
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
AutoMutex _l(mLock);
- return mInput->stream->setMicrophoneDirection(direction);
+ return mInput->stream->setPreferredMicrophoneDirection(direction);
}
-status_t AudioFlinger::RecordThread::setMicrophoneFieldDimension(float zoom)
+status_t AudioFlinger::RecordThread::setPreferredMicrophoneFieldDimension(float zoom)
{
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
AutoMutex _l(mLock);
- return mInput->stream->setMicrophoneFieldDimension(zoom);
+ return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
}
void AudioFlinger::RecordThread::updateMetadata_l()
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 47e580b..e5abce7 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1607,8 +1607,8 @@
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
void updateMetadata_l() override;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 5a43696..fbf8fef 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1838,16 +1838,16 @@
mRecordTrack->getActiveMicrophones(activeMicrophones));
}
-binder::Status AudioFlinger::RecordHandle::setMicrophoneDirection(
+binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
int /*audio_microphone_direction_t*/ direction) {
ALOGV("%s()", __func__);
- return binder::Status::fromStatusT(mRecordTrack->setMicrophoneDirection(
+ return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
static_cast<audio_microphone_direction_t>(direction)));
}
-binder::Status AudioFlinger::RecordHandle::setMicrophoneFieldDimension(float zoom) {
+binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("%s()", __func__);
- return binder::Status::fromStatusT(mRecordTrack->setMicrophoneFieldDimension(zoom));
+ return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
}
// ----------------------------------------------------------------------------
@@ -2144,22 +2144,22 @@
}
}
-status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneDirection(
+status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
audio_microphone_direction_t direction) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
- return recordThread->setMicrophoneDirection(direction);
+ return recordThread->setPreferredMicrophoneDirection(direction);
} else {
return BAD_VALUE;
}
}
-status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneFieldDimension(float zoom) {
+status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
- return recordThread->setMicrophoneFieldDimension(zoom);
+ return recordThread->setPreferredMicrophoneFieldDimension(zoom);
} else {
return BAD_VALUE;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 635de6f..5b4e2eb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -334,6 +334,13 @@
void AudioInputDescriptor::updateClientRecordingConfiguration(
int event, const sp<RecordClientDescriptor>& client)
{
+ // do not send callback if starting and no device is selected yet to avoid
+ // double callbacks from startInput() before and after the device is selected
+ if (event == RECORD_CONFIG_EVENT_START
+ && mPatchHandle == AUDIO_PATCH_HANDLE_NONE) {
+ return;
+ }
+
const audio_config_base_t sessionConfig = client->config();
const record_client_info_t recordClientInfo{client->uid(), client->session(),
client->source(), client->portId(),
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index f7289ca..f02db6a9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -180,7 +180,12 @@
// Loopback render mixes are created from a public API and thus restricted
// to non sensible audio that have not opted out.
if (is_mix_loopback_render(mix->mRouteFlags)) {
- if ((attributes.flags & AUDIO_FLAG_NO_CAPTURE) == AUDIO_FLAG_NO_CAPTURE) {
+ auto hasFlag = [](auto flags, auto flag) { return (flags & flag) == flag; };
+ if (hasFlag(attributes.flags, AUDIO_FLAG_NO_SYSTEM_CAPTURE)) {
+ return MixMatchStatus::NO_MATCH;
+ }
+ if (!mix->mAllowPrivilegedPlaybackCapture &&
+ hasFlag(attributes.flags, AUDIO_FLAG_NO_MEDIA_PROJECTION)) {
return MixMatchStatus::NO_MATCH;
}
if (!(attributes.usage == AUDIO_USAGE_UNKNOWN ||
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index e8e9fa6..762a4b1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2025,7 +2025,7 @@
mSoundTriggerSessions.indexOfKey(session) > 0;
*portId = AudioPort::getNextUniqueId();
- clientDesc = new RecordClientDescriptor(*portId, uid, session, *attr, *config,
+ clientDesc = new RecordClientDescriptor(*portId, uid, session, attributes, *config,
requestedDeviceId, attributes.source, flags,
isSoundTrigger);
inputDesc = mInputs.valueFor(*input);
@@ -4828,6 +4828,7 @@
ALOGW("closeOutput() unknown output %d", output);
return;
}
+ const bool closingOutputWasActive = closingOutput->isActive();
mPolicyMixes.closeOutput(closingOutput);
// look for duplicated outputs connected to the output being removed.
@@ -4867,6 +4868,9 @@
mpClientInterface->onAudioPatchListUpdate();
}
+ if (closingOutputWasActive) {
+ closingOutput->stop();
+ }
closingOutput->close();
removeOutput(output);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index a672521..17c6450 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -192,7 +192,7 @@
}
audio_attributes_t attr = *originalAttr;
if (!mPackageManager.allowPlaybackCapture(uid)) {
- attr.flags |= AUDIO_FLAG_NO_CAPTURE;
+ attr.flags |= AUDIO_FLAG_NO_MEDIA_PROJECTION;
}
audio_output_flags_t originalFlags = flags;
AutoCallerClear acc;
@@ -322,7 +322,7 @@
return;
}
sp<AudioPlaybackClient> client = mAudioPlaybackClients.valueAt(index);
- mAudioRecordClients.removeItem(portId);
+ mAudioPlaybackClients.removeItem(portId);
// called from internal thread: no need to clear caller identity
mAudioPolicyManager->releaseOutput(portId);
@@ -376,15 +376,17 @@
return PERMISSION_DENIED;
}
+ bool canCaptureOutput = captureAudioOutputAllowed(pid, uid);
if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
attr->source == AUDIO_SOURCE_VOICE_CALL ||
attr->source == AUDIO_SOURCE_ECHO_REFERENCE) &&
- !captureAudioOutputAllowed(pid, uid)) {
+ !canCaptureOutput) {
return PERMISSION_DENIED;
}
- if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed(pid, uid)) {
+ bool canCaptureHotword = captureHotwordAllowed(pid, uid);
+ if ((attr->source == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
return BAD_VALUE;
}
@@ -415,7 +417,7 @@
case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
// FIXME: use the same permission as for remote submix for now.
case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
- if (!captureAudioOutputAllowed(pid, uid)) {
+ if (!canCaptureOutput) {
ALOGE("getInputForAttr() permission denied: capture not allowed");
status = PERMISSION_DENIED;
}
@@ -442,7 +444,8 @@
}
sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
- *selectedDeviceId, opPackageName);
+ *selectedDeviceId, opPackageName,
+ canCaptureOutput, canCaptureHotword);
mAudioRecordClients.add(*portId, client);
}
@@ -1080,6 +1083,14 @@
return PERMISSION_DENIED;
}
+ bool needCaptureMediaOutput = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
+ return mix.mAllowPrivilegedPlaybackCapture; });
+ const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+ const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ if (needCaptureMediaOutput && !captureMediaOutputAllowed(callingPid, callingUid)) {
+ return PERMISSION_DENIED;
+ }
+
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
@@ -1124,9 +1135,10 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
+ // startAudioSource should be created as the calling uid
+ const uid_t callingUid = IPCThreadState::self()->getCallingUid();
AutoCallerClear acc;
- return mAudioPolicyManager->startAudioSource(source, attributes, portId,
- IPCThreadState::self()->getCallingUid());
+ return mAudioPolicyManager->startAudioSource(source, attributes, portId, callingUid);
}
status_t AudioPolicyService::stopAudioSource(audio_port_handle_t portId)
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 8cbf3af..e858e8d 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -414,32 +414,35 @@
{
// Go over all active clients and allow capture (does not force silence) in the
// following cases:
-// The client is the assistant
+// Another client in the same UID has already been allowed to capture
+// OR The client is the assistant
// AND an accessibility service is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR uses VOICE_RECOGNITION AND is on TOP OR latest started
// OR uses HOTWORD
-// AND there is no privacy sensitive active capture
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
// OR The client is an accessibility service
// AND is on TOP OR latest started
// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR the source is one of: AUDIO_SOURCE_VOICE_DOWNLINK, AUDIO_SOURCE_VOICE_UPLINK,
-// AUDIO_SOURCE_VOICE_CALL
+// OR the client source is virtual (remote submix, call audio TX or RX...)
// OR Any other client
// AND The assistant is not on TOP
-// AND is on TOP OR latest started
-// AND there is no privacy sensitive active capture
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
//TODO: mamanage pre processing effects according to use case priority
sp<AudioRecordClient> topActive;
sp<AudioRecordClient> latestActive;
sp<AudioRecordClient> latestSensitiveActive;
+
nsecs_t topStartNs = 0;
nsecs_t latestStartNs = 0;
nsecs_t latestSensitiveStartNs = 0;
bool isA11yOnTop = mUidPolicy->isA11yOnTop();
bool isAssistantOnTop = false;
bool isSensitiveActive = false;
+ bool isInCall = mPhoneState == AUDIO_MODE_IN_CALL;
// if Sensor Privacy is enabled then all recordings should be silenced.
if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) {
@@ -449,15 +452,18 @@
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
- if (!current->active) continue;
- if (isPrivacySensitiveSource(current->attributes.source)) {
- if (current->startTimeNs > latestSensitiveStartNs) {
- latestSensitiveActive = current;
- latestSensitiveStartNs = current->startTimeNs;
- }
- isSensitiveActive = true;
+ if (!current->active) {
+ continue;
}
- if (mUidPolicy->getUidState(current->uid) == ActivityManager::PROCESS_STATE_TOP) {
+
+ app_state_t appState = apmStatFromAmState(mUidPolicy->getUidState(current->uid));
+ // clients which app is in IDLE state are not eligible for top active or
+ // latest active
+ if (appState == APP_STATE_IDLE) {
+ continue;
+ }
+
+ if (appState == APP_STATE_TOP) {
if (current->startTimeNs > topStartNs) {
topActive = current;
topStartNs = current->startTimeNs;
@@ -470,72 +476,105 @@
latestActive = current;
latestStartNs = current->startTimeNs;
}
+ if (isPrivacySensitiveSource(current->attributes.source)) {
+ if (current->startTimeNs > latestSensitiveStartNs) {
+ latestSensitiveActive = current;
+ latestSensitiveStartNs = current->startTimeNs;
+ }
+ isSensitiveActive = true;
+ }
}
- if (topActive == nullptr && latestActive == nullptr) {
- return;
+ // if no active client with UI on Top, consider latest active as top
+ if (topActive == nullptr) {
+ topActive = latestActive;
}
- if (topActive != nullptr) {
- latestActive = nullptr;
- }
+ std::vector<uid_t> enabledUids;
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
- if (!current->active) continue;
+ if (!current->active) {
+ continue;
+ }
+
+ // keep capture allowed if another client with the same UID has already
+ // been allowed to capture
+ if (std::find(enabledUids.begin(), enabledUids.end(), current->uid)
+ != enabledUids.end()) {
+ continue;
+ }
audio_source_t source = current->attributes.source;
- bool isOnTop = current == topActive;
- bool isLatest = current == latestActive;
- bool isLatestSensitive = current == latestSensitiveActive;
- bool forceIdle = true;
+ bool isTopOrLatestActive = topActive == nullptr ? false : current->uid == topActive->uid;
+ bool isLatestSensitive = latestSensitiveActive == nullptr ?
+ false : current->uid == latestSensitiveActive->uid;
+
+ // By default allow capture if:
+ // The assistant is not on TOP
+ // AND there is no active privacy sensitive capture or call
+ // OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+ bool allowCapture = !isAssistantOnTop
+ && !(isSensitiveActive && !(isLatestSensitive || current->canCaptureOutput))
+ && !(isInCall && !current->canCaptureOutput);
if (isVirtualSource(source)) {
- forceIdle = false;
+ // Allow capture for virtual (remote submix, call audio TX or RX...) sources
+ allowCapture = true;
} else if (mUidPolicy->isAssistantUid(current->uid)) {
+ // For assistant allow capture if:
+ // An accessibility service is on TOP
+ // AND the source is VOICE_RECOGNITION or HOTWORD
+ // OR is on TOP OR latest started AND uses VOICE_RECOGNITION
+ // OR uses HOTWORD
+ // AND there is no active privacy sensitive capture or call
+ // OR client has CAPTURE_AUDIO_OUTPUT privileged permission
if (isA11yOnTop) {
if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
- forceIdle = false;
+ allowCapture = true;
}
} else {
- if ((((isOnTop || isLatest) && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
- source == AUDIO_SOURCE_HOTWORD) && !isSensitiveActive) {
- forceIdle = false;
+ if (((isTopOrLatestActive && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
+ source == AUDIO_SOURCE_HOTWORD) &&
+ (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
+ allowCapture = true;
}
}
} else if (mUidPolicy->isA11yUid(current->uid)) {
- if ((isOnTop || isLatest) &&
- (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
- forceIdle = false;
- }
- } else {
- if (!isAssistantOnTop && (isOnTop || isLatest) &&
- (!isSensitiveActive || isLatestSensitive)) {
- forceIdle = false;
+ // For accessibility service allow capture if:
+ // Is on TOP OR latest started
+ // AND the source is VOICE_RECOGNITION or HOTWORD
+ if (isTopOrLatestActive &&
+ (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
+ allowCapture = true;
}
}
setAppState_l(current->uid,
- forceIdle ? APP_STATE_IDLE :
- apmStatFromAmState(mUidPolicy->getUidState(current->uid)));
+ allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(current->uid)) :
+ APP_STATE_IDLE);
+ if (allowCapture) {
+ enabledUids.push_back(current->uid);
+ }
}
}
void AudioPolicyService::silenceAllRecordings_l() {
for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
- setAppState_l(current->uid, APP_STATE_IDLE);
+ if (!isVirtualSource(current->attributes.source)) {
+ setAppState_l(current->uid, APP_STATE_IDLE);
+ }
}
}
/* static */
app_state_t AudioPolicyService::apmStatFromAmState(int amState) {
- switch (amState) {
- case ActivityManager::PROCESS_STATE_UNKNOWN:
+
+ if (amState == ActivityManager::PROCESS_STATE_UNKNOWN) {
return APP_STATE_IDLE;
- case ActivityManager::PROCESS_STATE_TOP:
- return APP_STATE_TOP;
- default:
- break;
+ } else if (amState <= ActivityManager::PROCESS_STATE_TOP) {
+ // include persistent services
+ return APP_STATE_TOP;
}
return APP_STATE_FOREGROUND;
}
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index a2e75cd..160f70f 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -753,13 +753,17 @@
AudioRecordClient(const audio_attributes_t attributes,
const audio_io_handle_t io, uid_t uid, pid_t pid,
const audio_session_t session, const audio_port_handle_t deviceId,
- const String16& opPackageName) :
+ const String16& opPackageName,
+ bool canCaptureOutput, bool canCaptureHotword) :
AudioClient(attributes, io, uid, pid, session, deviceId),
- opPackageName(opPackageName), startTimeNs(0) {}
+ opPackageName(opPackageName), startTimeNs(0),
+ canCaptureOutput(canCaptureOutput), canCaptureHotword(canCaptureHotword) {}
~AudioRecordClient() override = default;
const String16 opPackageName; // client package name
nsecs_t startTimeNs;
+ const bool canCaptureOutput;
+ const bool canCaptureHotword;
};
// --- AudioPlaybackClient ---
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 51d0682..8113c3f 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -972,8 +972,9 @@
userid_t clientUserId = multiuser_get_user_id(clientUid);
// Only allow clients who are being used by the current foreground device user, unless calling
- // from our own process.
- if (callingPid != getpid() && (mAllowedUsers.find(clientUserId) == mAllowedUsers.end())) {
+ // from our own process OR the caller is using the cameraserver's HIDL interface.
+ if (!hardware::IPCThreadState::self()->isServingCall() && callingPid != getpid() &&
+ (mAllowedUsers.find(clientUserId) == mAllowedUsers.end())) {
ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from "
"device user %d, currently allowed device users: %s)", callingPid, clientUserId,
toString(mAllowedUsers).string());
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 0571741..12ff130 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -321,7 +321,7 @@
// so. As documented in hardware/camera3.h:configure_streams().
if (mState == STATE_IN_RECONFIG &&
mOldUsage == mUsage &&
- mOldMaxBuffers == camera3_stream::max_buffers) {
+ mOldMaxBuffers == camera3_stream::max_buffers && !mDataSpaceOverridden) {
mState = STATE_CONFIGURED;
return OK;
}
diff --git a/services/camera/libcameraservice/hidl/Convert.cpp b/services/camera/libcameraservice/hidl/Convert.cpp
index a87812b..c2ed23a 100644
--- a/services/camera/libcameraservice/hidl/Convert.cpp
+++ b/services/camera/libcameraservice/hidl/Convert.cpp
@@ -97,6 +97,21 @@
return outputConfiguration;
}
+hardware::camera2::params::SessionConfiguration convertFromHidl(
+ const HSessionConfiguration &hSessionConfiguration) {
+ hardware::camera2::params::SessionConfiguration sessionConfig(
+ hSessionConfiguration.inputWidth, hSessionConfiguration.inputHeight,
+ hSessionConfiguration.inputFormat,
+ static_cast<int>(hSessionConfiguration.operationMode));
+
+ for (const auto& hConfig : hSessionConfiguration.outputStreams) {
+ hardware::camera2::params::OutputConfiguration config = convertFromHidl(hConfig);
+ sessionConfig.addOutputConfiguration(config);
+ }
+
+ return sessionConfig;
+}
+
// The camera metadata here is cloned. Since we're reading metadata over
// hwbinder we would need to clone it in order to avoid aligment issues.
bool convertFromHidl(const HCameraMetadata &src, CameraMetadata *dst) {
diff --git a/services/camera/libcameraservice/hidl/Convert.h b/services/camera/libcameraservice/hidl/Convert.h
index 82937a3..79683f6 100644
--- a/services/camera/libcameraservice/hidl/Convert.h
+++ b/services/camera/libcameraservice/hidl/Convert.h
@@ -53,6 +53,7 @@
using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
using HPhysicalCaptureResultInfo = frameworks::cameraservice::device::V2_0::PhysicalCaptureResultInfo;
+using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using HSubmitInfo = frameworks::cameraservice::device::V2_0::SubmitInfo;
using HStatus = frameworks::cameraservice::common::V2_0::Status;
using HStreamConfigurationMode = frameworks::cameraservice::device::V2_0::StreamConfigurationMode;
@@ -70,6 +71,9 @@
hardware::camera2::params::OutputConfiguration convertFromHidl(
const HOutputConfiguration &hOutputConfiguration);
+hardware::camera2::params::SessionConfiguration convertFromHidl(
+ const HSessionConfiguration &hSessionConfiguration);
+
HCameraDeviceStatus convertToHidlCameraDeviceStatus(int32_t status);
void convertToHidl(const std::vector<hardware::CameraStatus> &src,
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
index d22ba5a..675ad24 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
@@ -41,6 +41,7 @@
using hardware::Void;
using HSubmitInfo = device::V2_0::SubmitInfo;
using hardware::camera2::params::OutputConfiguration;
+using hardware::camera2::params::SessionConfiguration;
static constexpr int32_t CAMERA_REQUEST_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
static constexpr int32_t CAMERA_RESULT_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
@@ -255,6 +256,18 @@
return B2HStatus(ret);
}
+Return<void> HidlCameraDeviceUser::isSessionConfigurationSupported(
+ const HSessionConfiguration& hSessionConfiguration,
+ isSessionConfigurationSupported_cb _hidl_cb) {
+ bool supported = false;
+ SessionConfiguration sessionConfiguration = convertFromHidl(hSessionConfiguration);
+ binder::Status ret = mDeviceRemote->isSessionConfigurationSupported(
+ sessionConfiguration, &supported);
+ HStatus status = B2HStatus(ret);
+ _hidl_cb(status, supported);
+ return Void();
+}
+
} // implementation
} // V2_0
} // device
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
index be8f1d6..c3a80fe 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
@@ -53,6 +53,7 @@
using HCameraDeviceUser = device::V2_0::ICameraDeviceUser;
using HCameraMetadata = cameraservice::service::V2_0::CameraMetadata;
using HCaptureRequest = device::V2_0::CaptureRequest;
+using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
using HStatus = frameworks::cameraservice::common::V2_0::Status;
@@ -97,6 +98,10 @@
virtual Return<HStatus> updateOutputConfiguration(
int32_t streamId, const HOutputConfiguration& outputConfiguration) override;
+ virtual Return<void> isSessionConfigurationSupported(
+ const HSessionConfiguration& sessionConfiguration,
+ isSessionConfigurationSupported_cb _hidl_cb) override;
+
bool initStatus() { return mInitSuccess; }
std::shared_ptr<CaptureResultMetadataQueue> getCaptureResultMetadataQueue() {