Merge changes Ic983efe6,I6eeb4aa6
* changes:
Fix test that checks if metadata is already set
Fix multiple-track mkv files
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index 09b85d5..691996b 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -287,3 +287,16 @@
}
return device->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
+
+EXPORT
+camera_status_t ACameraDevice_isSessionConfigurationSupported(
+ const ACameraDevice* device,
+ const ACaptureSessionOutputContainer* sessionOutputContainer) {
+ ATRACE_CALL();
+ if (device == nullptr || sessionOutputContainer == nullptr) {
+ ALOGE("%s: Error: invalid input: device %p, sessionOutputContainer %p",
+ __FUNCTION__, device, sessionOutputContainer);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return device->isSessionConfigurationSupported(sessionOutputContainer);
+}
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 5e4fcd0..c9db01e 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -227,6 +227,55 @@
return ACAMERA_OK;
}
+camera_status_t CameraDevice::isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ Mutex::Autolock _l(mDeviceLock);
+ camera_status_t ret = checkCameraClosedOrErrorLocked();
+ if (ret != ACAMERA_OK) {
+ return ret;
+ }
+
+ SessionConfiguration sessionConfiguration(0 /*inputWidth*/, 0 /*inputHeight*/,
+ -1 /*inputFormat*/, CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
+ for (const auto& output : sessionOutputContainer->mOutputs) {
+ sp<IGraphicBufferProducer> iGBP(nullptr);
+ ret = getIGBPfromAnw(output.mWindow, iGBP);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera device %s failed to extract graphic producer from native window",
+ getId());
+ return ret;
+ }
+
+ String16 physicalId16(output.mPhysicalCameraId.c_str());
+ OutputConfiguration outConfig(iGBP, output.mRotation, physicalId16,
+ OutputConfiguration::INVALID_SET_ID, true);
+
+ for (auto& anw : output.mSharedWindows) {
+ ret = getIGBPfromAnw(anw, iGBP);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera device %s failed to extract graphic producer from native window",
+ getId());
+ return ret;
+ }
+ outConfig.addGraphicProducer(iGBP);
+ }
+
+ sessionConfiguration.addOutputConfiguration(outConfig);
+ }
+
+ bool supported = false;
+ binder::Status remoteRet = mRemote->isSessionConfigurationSupported(
+ sessionConfiguration, &supported);
+ if (remoteRet.serviceSpecificErrorCode() ==
+ hardware::ICameraService::ERROR_INVALID_OPERATION) {
+ return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
+ } else if (!remoteRet.isOk()) {
+ return ACAMERA_ERROR_UNKNOWN;
+ } else {
+ return supported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
+ }
+}
+
camera_status_t CameraDevice::updateOutputConfigurationLocked(ACaptureSessionOutput *output) {
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 103efd5..56741ce 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -35,6 +35,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <camera/CaptureResult.h>
#include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SessionConfiguration.h>
#include <camera/camera2/CaptureRequest.h>
#include <camera/NdkCameraManager.h>
@@ -77,6 +78,9 @@
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const;
+
// Callbacks from camera service
class ServiceCallback : public hardware::camera2::BnCameraDeviceCallbacks {
public:
@@ -369,6 +373,11 @@
return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
+ }
+
/***********************
* Device interal APIs *
***********************/
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index cedf83a..bc544e3 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -845,6 +845,43 @@
const ACameraIdList* physicalIdList,
/*out*/ACaptureRequest** request) __INTRODUCED_IN(29);
+/**
+ * Check whether a particular {@ACaptureSessionOutputContainer} is supported by
+ * the camera device.
+ *
+ * <p>This method performs a runtime check of a given {@link
+ * ACaptureSessionOutputContainer}. The result confirms whether or not the
+ * passed CaptureSession outputs can be successfully used to create a camera
+ * capture session using {@link ACameraDevice_createCaptureSession}.</p>
+ *
+ * <p>This method can be called at any point before, during and after active
+ * capture session. It must not impact normal camera behavior in any way and
+ * must complete significantly faster than creating a capture session.</p>
+ *
+ * <p>Although this method is faster than creating a new capture session, it is not intended
+ * to be used for exploring the entire space of supported stream combinations.</p>
+ *
+ * @param device the camera device of interest
+ * @param sessionOutputContainer the {@link ACaptureSessionOutputContainer} of
+ * interest.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the given {@link ACaptureSessionOutputContainer}
+ * is supported by the camera device.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device, or sessionOutputContainer
+ * is NULL.</li>
+ * <li>{@link ACAMERA_ERROR_STREAM_CONFIGURE_FAIL} if the given
+ * {@link ACaptureSessionOutputContainer}
+ * is not supported by
+ * the camera
+ * device.</li>
+ * <li>{@link ACAMERA_ERROR_UNSUPPORTED_OPERATION} if the query operation is not
+ * supported by the camera device.</li>
+ */
+camera_status_t ACameraDevice_isSessionConfigurationSupported(
+ const ACameraDevice* device,
+ const ACaptureSessionOutputContainer* sessionOutputContainer) __INTRODUCED_IN(29);
+
#endif /* __ANDROID_API__ >= 29 */
__END_DECLS
diff --git a/camera/ndk/include/camera/NdkCameraError.h b/camera/ndk/include/camera/NdkCameraError.h
index 6b58155..fc618ee 100644
--- a/camera/ndk/include/camera/NdkCameraError.h
+++ b/camera/ndk/include/camera/NdkCameraError.h
@@ -106,7 +106,8 @@
/**
* Camera device does not support the stream configuration provided by application in
- * {@link ACameraDevice_createCaptureSession}.
+ * {@link ACameraDevice_createCaptureSession} or {@link
+ * ACameraDevice_isSessionConfigurationSupported}.
*/
ACAMERA_ERROR_STREAM_CONFIGURE_FAIL = ACAMERA_ERROR_BASE - 9,
@@ -130,6 +131,11 @@
* The application does not have permission to open camera.
*/
ACAMERA_ERROR_PERMISSION_DENIED = ACAMERA_ERROR_BASE - 13,
+
+ /**
+ * The operation is not supported by the camera device.
+ */
+ ACAMERA_ERROR_UNSUPPORTED_OPERATION = ACAMERA_ERROR_BASE - 14,
} camera_status_t;
#endif /* __ANDROID_API__ >= 24 */
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index 946a98e..b6f1553 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -14,6 +14,7 @@
ACameraDevice_createCaptureRequest_withPhysicalIds; # introduced=29
ACameraDevice_createCaptureSession;
ACameraDevice_createCaptureSessionWithSessionParameters; # introduced=28
+ ACameraDevice_isSessionConfigurationSupported; # introduced=29
ACameraDevice_getId;
ACameraManager_create;
ACameraManager_delete;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index a38a31e..d7d774b 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -38,6 +38,7 @@
using HCameraMetadata = frameworks::cameraservice::device::V2_0::CameraMetadata;
using OutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
+using SessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using hardware::Void;
// Static member definitions
@@ -216,6 +217,47 @@
return ACAMERA_OK;
}
+camera_status_t CameraDevice::isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ Mutex::Autolock _l(mDeviceLock);
+ camera_status_t ret = checkCameraClosedOrErrorLocked();
+ if (ret != ACAMERA_OK) {
+ return ret;
+ }
+
+ SessionConfiguration sessionConfig;
+ sessionConfig.inputWidth = 0;
+ sessionConfig.inputHeight = 0;
+ sessionConfig.inputFormat = -1;
+ sessionConfig.operationMode = StreamConfigurationMode::NORMAL_MODE;
+ sessionConfig.outputStreams.resize(sessionOutputContainer->mOutputs.size());
+ size_t index = 0;
+ for (const auto& output : sessionOutputContainer->mOutputs) {
+ sessionConfig.outputStreams[index].rotation = utils::convertToHidl(output.mRotation);
+ sessionConfig.outputStreams[index].windowGroupId = -1;
+ sessionConfig.outputStreams[index].windowHandles.resize(output.mSharedWindows.size() + 1);
+ sessionConfig.outputStreams[index].windowHandles[0] = output.mWindow;
+ sessionConfig.outputStreams[index].physicalCameraId = output.mPhysicalCameraId;
+ index++;
+ }
+
+ bool configSupported = false;
+ Status status = Status::NO_ERROR;
+ auto remoteRet = mRemote->isSessionConfigurationSupported(sessionConfig,
+ [&status, &configSupported](auto s, auto supported) {
+ status = s;
+ configSupported = supported;
+ });
+
+ if (status == Status::INVALID_OPERATION) {
+ return ACAMERA_ERROR_UNSUPPORTED_OPERATION;
+ } else if (!remoteRet.isOk()) {
+ return ACAMERA_ERROR_UNKNOWN;
+ } else {
+ return configSupported ? ACAMERA_OK : ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
+ }
+}
+
void CameraDevice::addRequestSettingsMetadata(ACaptureRequest *aCaptureRequest,
sp<CaptureRequest> &req) {
CameraMetadata metadataCopy = aCaptureRequest->settings->getInternalData();
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 28092fd..47e6f56 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -101,6 +101,9 @@
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const;
+
// Callbacks from camera service
class ServiceCallback : public ICameraDeviceCallback {
public:
@@ -397,6 +400,11 @@
return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
+ camera_status_t isSessionConfigurationSupported(
+ const ACaptureSessionOutputContainer* sessionOutputContainer) const {
+ return mDevice->isSessionConfigurationSupported(sessionOutputContainer);
+ }
+
/***********************
* Device interal APIs *
***********************/
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 2398922..c51f93b 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -121,6 +121,12 @@
cameraIdList.numCameras = idPointerList.size();
cameraIdList.cameraIds = idPointerList.data();
+ ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
+ if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
+ ALOGE("ACameraDevice_isSessionConfigurationSupported failed, ret=%d", ret);
+ return ret;
+ }
+
ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
if (ret != AMEDIA_OK) {
ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 647540a..5ff1c59 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -5319,7 +5319,9 @@
}
int32_t MPEG4Source::parseHEVCLayerId(const uint8_t *data, size_t size) {
- CHECK(data != nullptr && size >= (mNALLengthSize + 2));
+ if (data == nullptr || size < mNALLengthSize + 2) {
+ return -1;
+ }
// HEVC NAL-header (16-bit)
// 1 6 6 3
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index d99493d..b63ae6b 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -1280,7 +1280,7 @@
//ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
}
- AMediaFormat_getInt32(mFileMeta, "haptic", &mHapticChannelCount);
+ AMediaFormat_getInt32(mFileMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, &mHapticChannelCount);
}
void MyOggExtractor::setChannelMask(int channelCount) {
@@ -1297,6 +1297,8 @@
const audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(
audioChannelCount) | hapticChannelMask;
AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK, channelMask);
+ AMediaFormat_setInt32(
+ mMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, mHapticChannelCount);
}
} else {
AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK,
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 5851533..f07be46 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -410,8 +410,8 @@
}
// Call these directly because we are already holding the lock.
- mAudioRecord->setMicrophoneDirection(mSelectedMicDirection);
- mAudioRecord->setMicrophoneFieldDimension(mSelectedMicFieldDimension);
+ mAudioRecord->setPreferredMicrophoneDirection(mSelectedMicDirection);
+ mAudioRecord->setPreferredMicrophoneFieldDimension(mSelectedMicFieldDimension);
if (status != NO_ERROR) {
mActive = false;
@@ -1381,7 +1381,7 @@
return mAudioRecord->getActiveMicrophones(activeMicrophones).transactionError();
}
-status_t AudioRecord::setMicrophoneDirection(audio_microphone_direction_t direction)
+status_t AudioRecord::setPreferredMicrophoneDirection(audio_microphone_direction_t direction)
{
AutoMutex lock(mLock);
if (mSelectedMicDirection == direction) {
@@ -1394,11 +1394,11 @@
// the internal AudioRecord hasn't be created yet, so just stash the attribute.
return OK;
} else {
- return mAudioRecord->setMicrophoneDirection(direction).transactionError();
+ return mAudioRecord->setPreferredMicrophoneDirection(direction).transactionError();
}
}
-status_t AudioRecord::setMicrophoneFieldDimension(float zoom) {
+status_t AudioRecord::setPreferredMicrophoneFieldDimension(float zoom) {
AutoMutex lock(mLock);
if (mSelectedMicFieldDimension == zoom) {
// NOP
@@ -1410,7 +1410,7 @@
// the internal AudioRecord hasn't be created yet, so just stash the attribute.
return OK;
} else {
- return mAudioRecord->setMicrophoneFieldDimension(zoom).transactionError();
+ return mAudioRecord->setPreferredMicrophoneFieldDimension(zoom).transactionError();
}
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index cf9c7f4..ecf58b6 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -39,9 +39,9 @@
/* Set the microphone direction (for processing).
*/
- void setMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
+ void setPreferredMicrophoneDirection(int /*audio_microphone_direction_t*/ direction);
/* Set the microphone zoom (for processing).
*/
- void setMicrophoneFieldDimension(float zoom);
+ void setPreferredMicrophoneFieldDimension(float zoom);
}
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index b4ddb69..9c81bb7 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -542,11 +542,11 @@
/* Set the Microphone direction (for processing purposes).
*/
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
/* Set the Microphone zoom factor (for processing purposes).
*/
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
/* Get the unique port ID assigned to this AudioRecord instance by audio policy manager.
* The ID is unique across all audioserver clients and can change during the life cycle
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index e396cf3..6c8e6a4 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -855,25 +855,26 @@
#endif
#if MAJOR_VERSION < 5
-status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+status_t StreamInHalHidl::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction __unused) {
if (mStream == 0) return NO_INIT;
return INVALID_OPERATION;
}
-status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom __unused) {
+status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom __unused) {
if (mStream == 0) return NO_INIT;
return INVALID_OPERATION;
}
#else
-status_t StreamInHalHidl::setMicrophoneDirection(audio_microphone_direction_t direction) {
+status_t StreamInHalHidl::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
if (!mStream) return NO_INIT;
- return processReturn("setMicrophoneDirection",
- mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
+ return processReturn("setPreferredMicrophoneDirection",
+ mStream->setMicrophoneDirection(static_cast<MicrophoneDirection>(direction)));
}
-status_t StreamInHalHidl::setMicrophoneFieldDimension(float zoom) {
+status_t StreamInHalHidl::setPreferredMicrophoneFieldDimension(float zoom) {
if (!mStream) return NO_INIT;
- return processReturn("setMicrophoneFieldDimension",
+ return processReturn("setPreferredMicrophoneFieldDimension",
mStream->setMicrophoneFieldDimension(zoom));
}
#endif
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 9ac1067..f587889 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -221,10 +221,11 @@
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
// Set microphone direction (for processing)
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) override;
+ virtual status_t setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction) override;
// Set microphone zoom (for processing)
- virtual status_t setMicrophoneFieldDimension(float zoom) override;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) override;
// Called when the metadata of the stream's sink has been changed.
status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index fcb809b..7d5ce05 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -369,20 +369,21 @@
#endif
#if MAJOR_VERSION < 5
-status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction __unused) {
+status_t StreamInHalLocal::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction __unused) {
return INVALID_OPERATION;
}
-status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom __unused) {
+status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom __unused) {
return INVALID_OPERATION;
}
#else
-status_t StreamInHalLocal::setMicrophoneDirection(audio_microphone_direction_t direction) {
+status_t StreamInHalLocal::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
if (mStream->set_microphone_direction == NULL) return INVALID_OPERATION;
return mStream->set_microphone_direction(mStream, direction);
}
-status_t StreamInHalLocal::setMicrophoneFieldDimension(float zoom) {
+status_t StreamInHalLocal::setPreferredMicrophoneFieldDimension(float zoom) {
if (mStream->set_microphone_field_dimension == NULL) return INVALID_OPERATION;
return mStream->set_microphone_field_dimension(mStream, zoom);
@@ -391,3 +392,5 @@
} // namespace CPP_VERSION
} // namespace android
+
+
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index 3d6c50e..34f2bd8 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -205,10 +205,10 @@
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
// Sets microphone direction (for processing)
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
// Sets microphone zoom (for processing)
- virtual status_t setMicrophoneFieldDimension(float zoom);
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
// Called when the metadata of the stream's sink has been changed.
status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index ed8282f..6c3b21c 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -180,10 +180,10 @@
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
// Set direction for capture processing
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t) = 0;
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t) = 0;
// Set zoom factor for capture stream
- virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
struct SinkMetadata {
std::vector<record_track_metadata_t> tracks;
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index fd21545..5079634 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -60,6 +60,7 @@
# run multichannel effects at different configs, saving only the stereo channel
# pair.
+error_count=0
for flags in "${flags_arr[@]}"
do
for fs in ${fs_arr[*]}
@@ -69,6 +70,13 @@
adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
-o:$testdir/sinesweep_$((chMask))_$((fs)).raw -chMask:$chMask -fs:$fs $flags
+ shell_ret=$?
+ if [ $shell_ret -ne 0 ]; then
+ echo "error: $shell_ret"
+ ((++error_count))
+ fi
+
+
# two channel files should be identical to higher channel
# computation (first 2 channels).
# Do not compare cases where -bE is in flags (due to mono computation)
@@ -82,8 +90,17 @@
$testdir/sinesweep_$((chMask))_$((fs)).raw -thr:90.308998
fi
+ # both cmp and snr return EXIT_FAILURE on mismatch.
+ shell_ret=$?
+ if [ $shell_ret -ne 0 ]; then
+ echo "error: $shell_ret"
+ ((++error_count))
+ fi
+
done
done
done
adb shell rm -r $testdir
+echo "$error_count errors"
+exit $error_count
diff --git a/media/libeffects/lvm/tests/snr.cpp b/media/libeffects/lvm/tests/snr.cpp
index 88110c0..885994c 100644
--- a/media/libeffects/lvm/tests/snr.cpp
+++ b/media/libeffects/lvm/tests/snr.cpp
@@ -84,6 +84,7 @@
printf("\nError: missing input/reference files\n");
return -1;
}
+ int ret = EXIT_SUCCESS;
auto sn = pcm_format == 0
? getSignalNoise<short>(finp, fref)
: getSignalNoise<float>(finp, fref);
@@ -92,6 +93,7 @@
// compare the measured snr value with threshold
if (snr < thr) {
printf("%.6f less than threshold %.6f\n", snr, thr);
+ ret = EXIT_FAILURE;
} else {
printf("%.6f\n", snr);
}
@@ -99,5 +101,5 @@
fclose(finp);
fclose(fref);
- return 0;
+ return ret;
}
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index f283569..a354ce1 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -66,8 +66,8 @@
ENABLE_AUDIO_DEVICE_CALLBACK,
GET_ACTIVE_MICROPHONES,
GET_PORT_ID,
- SET_MICROPHONE_DIRECTION,
- SET_MICROPHONE_FIELD_DIMENSION
+ SET_PREFERRED_MICROPHONE_DIRECTION,
+ SET_PREFERRED_MICROPHONE_FIELD_DIMENSION
};
class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -409,21 +409,21 @@
return status;
}
- status_t setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
data.writeInt32(direction);
- status_t status = remote()->transact(SET_MICROPHONE_DIRECTION, data, &reply);
+ status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_DIRECTION, data, &reply);
return status == NO_ERROR ? (status_t)reply.readInt32() : status;
}
- status_t setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ status_t setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
data.writeFloat(zoom);
- status_t status = remote()->transact(SET_MICROPHONE_FIELD_DIMENSION, data, &reply);
+ status_t status = remote()->transact(SET_PREFERRED_MICROPHONE_FIELD_DIMENSION, data, &reply);
return status == NO_ERROR ? (status_t)reply.readInt32() : status;
}
@@ -709,20 +709,20 @@
}
return NO_ERROR;
}
- case SET_MICROPHONE_DIRECTION: {
- ALOGV("SET_MICROPHONE_DIRECTION");
+ case SET_PREFERRED_MICROPHONE_DIRECTION: {
+ ALOGV("SET_PREFERRED_MICROPHONE_DIRECTION");
CHECK_INTERFACE(IMediaRecorder, data, reply);
int direction = data.readInt32();
- status_t status =
- setMicrophoneDirection(static_cast<audio_microphone_direction_t>(direction));
+ status_t status = setPreferredMicrophoneDirection(
+ static_cast<audio_microphone_direction_t>(direction));
reply->writeInt32(status);
return NO_ERROR;
}
- case SET_MICROPHONE_FIELD_DIMENSION: {
+ case SET_PREFERRED_MICROPHONE_FIELD_DIMENSION: {
ALOGV("SET_MICROPHONE_FIELD_DIMENSION");
CHECK_INTERFACE(IMediaRecorder, data, reply);
float zoom = data.readFloat();
- status_t status = setMicrophoneFieldDimension(zoom);
+ status_t status = setPreferredMicrophoneFieldDimension(zoom);
reply->writeInt32(status);
return NO_ERROR;
}
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index ea0547c..c150407 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -65,6 +65,7 @@
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL,
AMEDIAFORMAT_KEY_GRID_COLUMNS,
AMEDIAFORMAT_KEY_GRID_ROWS,
+ AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT,
AMEDIAFORMAT_KEY_HEIGHT,
AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD,
AMEDIAFORMAT_KEY_IS_ADTS,
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index 0b09420..f9c557c 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -73,8 +73,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
- virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) = 0;
};
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index 88282ac..a2dff31 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -72,8 +72,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
- virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) const = 0;
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index 8580437..2dd4b7f 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -264,8 +264,8 @@
status_t getRoutedDeviceId(audio_port_handle_t *deviceId);
status_t enableAudioDeviceCallback(bool enabled);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 6c59a29..4570af9 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -842,14 +842,14 @@
return mMediaRecorder->getActiveMicrophones(activeMicrophones);
}
-status_t MediaRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
- return mMediaRecorder->setMicrophoneDirection(direction);
+status_t MediaRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
+ return mMediaRecorder->setPreferredMicrophoneDirection(direction);
}
-status_t MediaRecorder::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
- return mMediaRecorder->setMicrophoneFieldDimension(zoom);
+status_t MediaRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
+ return mMediaRecorder->setPreferredMicrophoneFieldDimension(zoom);
}
status_t MediaRecorder::getPortId(audio_port_handle_t *portId) const
diff --git a/media/libmedia/xsd/api/current.txt b/media/libmedia/xsd/api/current.txt
index 0924dd9..05e8a49 100644
--- a/media/libmedia/xsd/api/current.txt
+++ b/media/libmedia/xsd/api/current.txt
@@ -45,10 +45,17 @@
ctor public CamcorderProfiles();
method public int getCameraId();
method public java.util.List<media.profiles.EncoderProfile> getEncoderProfile();
+ method public java.util.List<media.profiles.CamcorderProfiles.ImageDecoding> getImageDecoding();
method public java.util.List<media.profiles.CamcorderProfiles.ImageEncoding> getImageEncoding();
method public void setCameraId(int);
}
+ public static class CamcorderProfiles.ImageDecoding {
+ ctor public CamcorderProfiles.ImageDecoding();
+ method public int getMemCap();
+ method public void setMemCap(int);
+ }
+
public static class CamcorderProfiles.ImageEncoding {
ctor public CamcorderProfiles.ImageEncoding();
method public int getQuality();
diff --git a/media/libmedia/xsd/media_profiles.xsd b/media/libmedia/xsd/media_profiles.xsd
index a9687b0..a02252a 100644
--- a/media/libmedia/xsd/media_profiles.xsd
+++ b/media/libmedia/xsd/media_profiles.xsd
@@ -42,6 +42,11 @@
<xs:attribute name="quality" type="xs:int"/>
</xs:complexType>
</xs:element>
+ <xs:element name="ImageDecoding" minOccurs="0" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:attribute name="memCap" type="xs:int"/>
+ </xs:complexType>
+ </xs:element>
</xs:sequence>
<xs:attribute name="cameraId" type="xs:int"/>
</xs:complexType>
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index d6628d9..9f4265b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -538,18 +538,19 @@
return NO_INIT;
}
-status_t MediaRecorderClient::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+status_t MediaRecorderClient::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
if (mRecorder != NULL) {
- return mRecorder->setMicrophoneDirection(direction);
+ return mRecorder->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
-status_t MediaRecorderClient::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t MediaRecorderClient::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
if (mRecorder != NULL) {
- return mRecorder->setMicrophoneFieldDimension(zoom);
+ return mRecorder->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 8da718f..e698819 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -109,8 +109,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- virtual status_t setMicrophoneFieldDimension(float zoom);
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) override;
private:
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 77777b8..63681fa 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -164,9 +164,12 @@
mAnalyticsItem->setInt32(kRecorderVideoIframeInterval, mIFramesIntervalSec);
// TBD mAudioSourceNode = 0;
// TBD mUse64BitFileOffset = false;
- mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
- mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
- mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
+ if (mMovieTimeScale != -1)
+ mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
+ if (mAudioTimeScale != -1)
+ mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
+ if (mVideoTimeScale != -1)
+ mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
// TBD mCameraId = 0;
// TBD mStartTimeOffsetMs = -1;
mAnalyticsItem->setInt32(kRecorderVideoProfile, mVideoEncoderProfile);
@@ -2210,7 +2213,7 @@
}
status_t StagefrightRecorder::getMetrics(Parcel *reply) {
- ALOGD("StagefrightRecorder::getMetrics");
+ ALOGV("StagefrightRecorder::getMetrics");
if (reply == NULL) {
ALOGE("Null pointer argument");
@@ -2274,20 +2277,20 @@
return NO_INIT;
}
-status_t StagefrightRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+status_t StagefrightRecorder::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
mSelectedMicDirection = direction;
if (mAudioSourceNode != 0) {
- return mAudioSourceNode->setMicrophoneDirection(direction);
+ return mAudioSourceNode->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
-status_t StagefrightRecorder::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t StagefrightRecorder::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
mSelectedMicFieldDimension = zoom;
if (mAudioSourceNode != 0) {
- return mAudioSourceNode->setMicrophoneFieldDimension(zoom);
+ return mAudioSourceNode->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 236b19e..8bf083a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -77,8 +77,8 @@
virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- virtual status_t setMicrophoneFieldDimension(float zoom);
+ virtual status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const override;
private:
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 5f86bd3..5194e03 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -510,18 +510,18 @@
return NO_INIT;
}
-status_t AudioSource::setMicrophoneDirection(audio_microphone_direction_t direction) {
- ALOGV("setMicrophoneDirection(%d)", direction);
+status_t AudioSource::setPreferredMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
if (mRecord != 0) {
- return mRecord->setMicrophoneDirection(direction);
+ return mRecord->setPreferredMicrophoneDirection(direction);
}
return NO_INIT;
}
-status_t AudioSource::setMicrophoneFieldDimension(float zoom) {
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+status_t AudioSource::setPreferredMicrophoneFieldDimension(float zoom) {
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
if (mRecord != 0) {
- return mRecord->setMicrophoneFieldDimension(zoom);
+ return mRecord->setPreferredMicrophoneFieldDimension(zoom);
}
return NO_INIT;
}
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index dbc287e..3f0bc7d 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -309,7 +309,6 @@
void parseVorbisComment(
AMediaFormat *fileMeta, const char *comment, size_t commentLength) {
// Haptic tag is only kept here as it will only be used in extractor to generate channel mask.
- const char* const haptic = "haptic";
struct {
const char *const mTag;
const char *mKey;
@@ -330,7 +329,7 @@
{ "LYRICIST", AMEDIAFORMAT_KEY_LYRICIST },
{ "METADATA_BLOCK_PICTURE", AMEDIAFORMAT_KEY_ALBUMART },
{ "ANDROID_LOOP", AMEDIAFORMAT_KEY_LOOP },
- { "ANDROID_HAPTIC", haptic },
+ { "ANDROID_HAPTIC", AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT },
};
for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
@@ -346,12 +345,12 @@
if (!strcasecmp(&comment[tagLen + 1], "true")) {
AMediaFormat_setInt32(fileMeta, AMEDIAFORMAT_KEY_LOOP, 1);
}
- } else if (kMap[j].mKey == haptic) {
+ } else if (kMap[j].mKey == AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT) {
char *end;
errno = 0;
const int hapticChannelCount = strtol(&comment[tagLen + 1], &end, 10);
if (errno == 0) {
- AMediaFormat_setInt32(fileMeta, haptic, hapticChannelCount);
+ AMediaFormat_setInt32(fileMeta, kMap[j].mKey, hapticChannelCount);
} else {
ALOGE("Error(%d) when parsing haptic channel count", errno);
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 2aa9ed8..c7b2719 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -967,6 +967,11 @@
if (meta->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
msg->setInt32("pcm-encoding", pcmEncoding);
}
+
+ int32_t hapticChannelCount;
+ if (meta->findInt32(kKeyHapticChannelCount, &hapticChannelCount)) {
+ msg->setInt32("haptic-channel-count", hapticChannelCount);
+ }
}
int32_t maxInputSize;
@@ -1708,6 +1713,11 @@
if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
meta->setInt32(kKeyPcmEncoding, pcmEncoding);
}
+
+ int32_t hapticChannelCount;
+ if (msg->findInt32("haptic-channel-count", &hapticChannelCount)) {
+ meta->setInt32(kKeyHapticChannelCount, hapticChannelCount);
+ }
}
int32_t maxInputSize;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index d153598..c62c2cd 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -2125,7 +2125,10 @@
size_t offset = 0;
while (offset < buffer->size()) {
const uint8_t *adtsHeader = buffer->data() + offset;
- CHECK_LT(offset + 5, buffer->size());
+ if (buffer->size() <= offset+5) {
+ ALOGV("buffer does not contain a complete header");
+ return ERROR_MALFORMED;
+ }
// non-const pointer for decryption if needed
uint8_t *adtsFrame = buffer->data() + offset;
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 18e5f10..af04dad 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -70,8 +70,8 @@
status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 0a63140..8dc2dd5 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -236,6 +236,8 @@
kKeyOpaqueCSD0 = 'csd0',
kKeyOpaqueCSD1 = 'csd1',
kKeyOpaqueCSD2 = 'csd2',
+
+ kKeyHapticChannelCount = 'hapC',
};
enum {
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
index e32f676..7d446ab 100644
--- a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
+++ b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
@@ -149,6 +149,11 @@
}
// ADTS header is included in the size
+ if (size < adtsHdrSize) {
+ ALOGV("processAAC: size (%zu) < adtsHdrSize (%zu)", size, adtsHdrSize);
+ android_errorWriteLog(0x534e4554, "128433933");
+ return;
+ }
size_t offset = adtsHdrSize;
size_t remainingBytes = size - adtsHdrSize;
diff --git a/media/libstagefright/xmlparser/api/current.txt b/media/libstagefright/xmlparser/api/current.txt
index f5245c1..5443f2c 100644
--- a/media/libstagefright/xmlparser/api/current.txt
+++ b/media/libstagefright/xmlparser/api/current.txt
@@ -1,6 +1,12 @@
// Signature format: 2.0
package media.codecs {
+ public class Alias {
+ ctor public Alias();
+ method public String getName();
+ method public void setName(String);
+ }
+
public class Decoders {
ctor public Decoders();
method public java.util.List<media.codecs.MediaCodec> getMediaCodec();
@@ -23,6 +29,23 @@
method public void setValue(String);
}
+ public class Include {
+ ctor public Include();
+ method public String getHref();
+ method public void setHref(String);
+ }
+
+ public class Included {
+ ctor public Included();
+ method public media.codecs.Decoders getDecoders_optional();
+ method public media.codecs.Encoders getEncoders_optional();
+ method public java.util.List<media.codecs.Include> getInclude_optional();
+ method public media.codecs.Settings getSettings_optional();
+ method public void setDecoders_optional(media.codecs.Decoders);
+ method public void setEncoders_optional(media.codecs.Encoders);
+ method public void setSettings_optional(media.codecs.Settings);
+ }
+
public class Limit {
ctor public Limit();
method public String getIn();
@@ -47,12 +70,13 @@
public class MediaCodec {
ctor public MediaCodec();
- method public java.util.List<media.codecs.Feature> getFeature();
- method public java.util.List<media.codecs.Limit> getLimit();
+ method public java.util.List<media.codecs.Alias> getAlias_optional();
+ method public java.util.List<media.codecs.Feature> getFeature_optional();
+ method public java.util.List<media.codecs.Limit> getLimit_optional();
method public String getName();
- method public java.util.List<media.codecs.Quirk> getQuirk();
- method public java.util.List<media.codecs.Type> getType();
+ method public java.util.List<media.codecs.Quirk> getQuirk_optional();
method public String getType();
+ method public java.util.List<media.codecs.Type> getType_optional();
method public String getUpdate();
method public void setName(String);
method public void setType(String);
@@ -61,9 +85,13 @@
public class MediaCodecs {
ctor public MediaCodecs();
- method public java.util.List<media.codecs.Decoders> getDecoders();
- method public java.util.List<media.codecs.Encoders> getEncoders();
- method public java.util.List<media.codecs.Settings> getSettings();
+ method public media.codecs.Decoders getDecoders_optional();
+ method public media.codecs.Encoders getEncoders_optional();
+ method public java.util.List<media.codecs.Include> getInclude_optional();
+ method public media.codecs.Settings getSettings_optional();
+ method public void setDecoders_optional(media.codecs.Decoders);
+ method public void setEncoders_optional(media.codecs.Encoders);
+ method public void setSettings_optional(media.codecs.Settings);
}
public class Quirk {
@@ -89,6 +117,7 @@
public class Type {
ctor public Type();
+ method public java.util.List<media.codecs.Alias> getAlias();
method public java.util.List<media.codecs.Feature> getFeature();
method public java.util.List<media.codecs.Limit> getLimit();
method public String getName();
@@ -99,7 +128,8 @@
public class XmlParser {
ctor public XmlParser();
- method public static media.codecs.MediaCodecs read(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static media.codecs.Included readIncluded(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
+ method public static media.codecs.MediaCodecs readMediaCodecs(java.io.InputStream) throws javax.xml.datatype.DatatypeConfigurationException, java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static String readText(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
method public static void skip(org.xmlpull.v1.XmlPullParser) throws java.io.IOException, org.xmlpull.v1.XmlPullParserException;
}
diff --git a/media/libstagefright/xmlparser/media_codecs.xsd b/media/libstagefright/xmlparser/media_codecs.xsd
index 4faba87..77193a2 100644
--- a/media/libstagefright/xmlparser/media_codecs.xsd
+++ b/media/libstagefright/xmlparser/media_codecs.xsd
@@ -20,11 +20,22 @@
xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="MediaCodecs">
<xs:complexType>
- <xs:sequence>
- <xs:element name="Decoders" type="Decoders" maxOccurs="unbounded"/>
- <xs:element name="Encoders" type="Encoders" maxOccurs="unbounded"/>
- <xs:element name="Settings" type="Settings" maxOccurs="unbounded"/>
- </xs:sequence>
+ <xs:choice minOccurs="0" maxOccurs="unbounded">
+ <xs:element name="Include" type="Include" maxOccurs="unbounded"/>
+ <xs:element name="Settings" type="Settings"/>
+ <xs:element name="Decoders" type="Decoders"/>
+ <xs:element name="Encoders" type="Encoders"/>
+ </xs:choice>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="Included">
+ <xs:complexType>
+ <xs:choice minOccurs="0" maxOccurs="unbounded">
+ <xs:element name="Include" type="Include" maxOccurs="unbounded"/>
+ <xs:element name="Settings" type="Settings"/>
+ <xs:element name="Decoders" type="Decoders"/>
+ <xs:element name="Encoders" type="Encoders"/>
+ </xs:choice>
</xs:complexType>
</xs:element>
<xs:complexType name="Decoders">
@@ -43,12 +54,13 @@
</xs:sequence>
</xs:complexType>
<xs:complexType name="MediaCodec">
- <xs:sequence>
- <xs:element name="Quirk" type="Quirk" maxOccurs="unbounded"/>
- <xs:element name="Type" type="Type" maxOccurs="unbounded"/>
- <xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
- <xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
- </xs:sequence>
+ <xs:choice minOccurs="0" maxOccurs="unbounded">
+ <xs:element name="Quirk" type="Quirk" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Type" type="Type" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
+ </xs:choice>
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="type" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
@@ -58,12 +70,16 @@
</xs:complexType>
<xs:complexType name="Type">
<xs:sequence>
- <xs:element name="Limit" type="Limit" maxOccurs="unbounded"/>
- <xs:element name="Feature" type="Feature" maxOccurs="unbounded"/>
+ <xs:element name="Alias" type="Alias" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Limit" type="Limit" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="Feature" type="Feature" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
</xs:complexType>
+ <xs:complexType name="Alias">
+ <xs:attribute name="name" type="xs:string"/>
+ </xs:complexType>
<xs:complexType name="Limit">
<xs:attribute name="name" type="xs:string"/>
<xs:attribute name="default" type="xs:string"/>
@@ -86,4 +102,7 @@
<xs:attribute name="value" type="xs:string"/>
<xs:attribute name="update" type="xs:string"/>
</xs:complexType>
+ <xs:complexType name="Include">
+ <xs:attribute name="href" type="xs:string"/>
+ </xs:complexType>
</xs:schema>
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ed88cf3..51138c8 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -324,6 +324,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_GENRE = "genre";
EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLUMNS = "grid-cols";
EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT = "haptic-channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HDR10_PLUS_INFO = "hdr10-plus-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 259481d..fd43f36 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -209,6 +209,7 @@
extern const char* AMEDIAFORMAT_KEY_EXIF_SIZE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_FRAME_COUNT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_GENRE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_ICC_PROFILE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_IS_SYNC_FRAME __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 4725e9e..f666ad0 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -89,6 +89,7 @@
AMEDIAFORMAT_KEY_GENRE; # var introduced=29
AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+ AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT; # var introduced=29
AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var introduced=21
AMEDIAFORMAT_KEY_ICC_PROFILE; # var introduced=29
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index ec5dfb1..8ac3366 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -615,9 +615,9 @@
virtual binder::Status stop();
virtual binder::Status getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
- virtual binder::Status setMicrophoneDirection(
+ virtual binder::Status setPreferredMicrophoneDirection(
int /*audio_microphone_direction_t*/ direction);
- virtual binder::Status setMicrophoneFieldDimension(float zoom);
+ virtual binder::Status setPreferredMicrophoneFieldDimension(float zoom);
private:
const sp<RecordThread::RecordTrack> mRecordTrack;
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index ab4af33..ec1f86c 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -71,8 +71,8 @@
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
static bool checkServerLatencySupported(
audio_format_t format, audio_input_flags_t flags) {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3ecb37d..e94fb49 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -7718,18 +7718,19 @@
return status;
}
-status_t AudioFlinger::RecordThread::setMicrophoneDirection(audio_microphone_direction_t direction)
+status_t AudioFlinger::RecordThread::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction)
{
- ALOGV("setMicrophoneDirection(%d)", direction);
+ ALOGV("setPreferredMicrophoneDirection(%d)", direction);
AutoMutex _l(mLock);
- return mInput->stream->setMicrophoneDirection(direction);
+ return mInput->stream->setPreferredMicrophoneDirection(direction);
}
-status_t AudioFlinger::RecordThread::setMicrophoneFieldDimension(float zoom)
+status_t AudioFlinger::RecordThread::setPreferredMicrophoneFieldDimension(float zoom)
{
- ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
AutoMutex _l(mLock);
- return mInput->stream->setMicrophoneFieldDimension(zoom);
+ return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
}
void AudioFlinger::RecordThread::updateMetadata_l()
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 47e580b..e5abce7 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1607,8 +1607,8 @@
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
- status_t setMicrophoneDirection(audio_microphone_direction_t direction);
- status_t setMicrophoneFieldDimension(float zoom);
+ status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setPreferredMicrophoneFieldDimension(float zoom);
void updateMetadata_l() override;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 5a43696..fbf8fef 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1838,16 +1838,16 @@
mRecordTrack->getActiveMicrophones(activeMicrophones));
}
-binder::Status AudioFlinger::RecordHandle::setMicrophoneDirection(
+binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
int /*audio_microphone_direction_t*/ direction) {
ALOGV("%s()", __func__);
- return binder::Status::fromStatusT(mRecordTrack->setMicrophoneDirection(
+ return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
static_cast<audio_microphone_direction_t>(direction)));
}
-binder::Status AudioFlinger::RecordHandle::setMicrophoneFieldDimension(float zoom) {
+binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
ALOGV("%s()", __func__);
- return binder::Status::fromStatusT(mRecordTrack->setMicrophoneFieldDimension(zoom));
+ return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
}
// ----------------------------------------------------------------------------
@@ -2144,22 +2144,22 @@
}
}
-status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneDirection(
+status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneDirection(
audio_microphone_direction_t direction) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
- return recordThread->setMicrophoneDirection(direction);
+ return recordThread->setPreferredMicrophoneDirection(direction);
} else {
return BAD_VALUE;
}
}
-status_t AudioFlinger::RecordThread::RecordTrack::setMicrophoneFieldDimension(float zoom) {
+status_t AudioFlinger::RecordThread::RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
- return recordThread->setMicrophoneFieldDimension(zoom);
+ return recordThread->setPreferredMicrophoneFieldDimension(zoom);
} else {
return BAD_VALUE;
}
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index a672521..ea6ca39 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -376,15 +376,17 @@
return PERMISSION_DENIED;
}
+ bool canCaptureOutput = captureAudioOutputAllowed(pid, uid);
if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
attr->source == AUDIO_SOURCE_VOICE_CALL ||
attr->source == AUDIO_SOURCE_ECHO_REFERENCE) &&
- !captureAudioOutputAllowed(pid, uid)) {
+ !canCaptureOutput) {
return PERMISSION_DENIED;
}
- if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed(pid, uid)) {
+ bool canCaptureHotword = captureHotwordAllowed(pid, uid);
+ if ((attr->source == AUDIO_SOURCE_HOTWORD) && !canCaptureHotword) {
return BAD_VALUE;
}
@@ -415,7 +417,7 @@
case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
// FIXME: use the same permission as for remote submix for now.
case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
- if (!captureAudioOutputAllowed(pid, uid)) {
+ if (!canCaptureOutput) {
ALOGE("getInputForAttr() permission denied: capture not allowed");
status = PERMISSION_DENIED;
}
@@ -442,7 +444,8 @@
}
sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
- *selectedDeviceId, opPackageName);
+ *selectedDeviceId, opPackageName,
+ canCaptureOutput, canCaptureHotword);
mAudioRecordClients.add(*portId, client);
}
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 8cbf3af..e858e8d 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -414,32 +414,35 @@
{
// Go over all active clients and allow capture (does not force silence) in the
// following cases:
-// The client is the assistant
+// Another client in the same UID has already been allowed to capture
+// OR The client is the assistant
// AND an accessibility service is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR uses VOICE_RECOGNITION AND is on TOP OR latest started
// OR uses HOTWORD
-// AND there is no privacy sensitive active capture
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
// OR The client is an accessibility service
// AND is on TOP OR latest started
// AND the source is VOICE_RECOGNITION or HOTWORD
-// OR the source is one of: AUDIO_SOURCE_VOICE_DOWNLINK, AUDIO_SOURCE_VOICE_UPLINK,
-// AUDIO_SOURCE_VOICE_CALL
+// OR the client source is virtual (remote submix, call audio TX or RX...)
// OR Any other client
// AND The assistant is not on TOP
-// AND is on TOP OR latest started
-// AND there is no privacy sensitive active capture
+// AND there is no active privacy sensitive capture or call
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
//TODO: mamanage pre processing effects according to use case priority
sp<AudioRecordClient> topActive;
sp<AudioRecordClient> latestActive;
sp<AudioRecordClient> latestSensitiveActive;
+
nsecs_t topStartNs = 0;
nsecs_t latestStartNs = 0;
nsecs_t latestSensitiveStartNs = 0;
bool isA11yOnTop = mUidPolicy->isA11yOnTop();
bool isAssistantOnTop = false;
bool isSensitiveActive = false;
+ bool isInCall = mPhoneState == AUDIO_MODE_IN_CALL;
// if Sensor Privacy is enabled then all recordings should be silenced.
if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) {
@@ -449,15 +452,18 @@
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
- if (!current->active) continue;
- if (isPrivacySensitiveSource(current->attributes.source)) {
- if (current->startTimeNs > latestSensitiveStartNs) {
- latestSensitiveActive = current;
- latestSensitiveStartNs = current->startTimeNs;
- }
- isSensitiveActive = true;
+ if (!current->active) {
+ continue;
}
- if (mUidPolicy->getUidState(current->uid) == ActivityManager::PROCESS_STATE_TOP) {
+
+ app_state_t appState = apmStatFromAmState(mUidPolicy->getUidState(current->uid));
+ // clients which app is in IDLE state are not eligible for top active or
+ // latest active
+ if (appState == APP_STATE_IDLE) {
+ continue;
+ }
+
+ if (appState == APP_STATE_TOP) {
if (current->startTimeNs > topStartNs) {
topActive = current;
topStartNs = current->startTimeNs;
@@ -470,72 +476,105 @@
latestActive = current;
latestStartNs = current->startTimeNs;
}
+ if (isPrivacySensitiveSource(current->attributes.source)) {
+ if (current->startTimeNs > latestSensitiveStartNs) {
+ latestSensitiveActive = current;
+ latestSensitiveStartNs = current->startTimeNs;
+ }
+ isSensitiveActive = true;
+ }
}
- if (topActive == nullptr && latestActive == nullptr) {
- return;
+ // if no active client with UI on Top, consider latest active as top
+ if (topActive == nullptr) {
+ topActive = latestActive;
}
- if (topActive != nullptr) {
- latestActive = nullptr;
- }
+ std::vector<uid_t> enabledUids;
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
- if (!current->active) continue;
+ if (!current->active) {
+ continue;
+ }
+
+ // keep capture allowed if another client with the same UID has already
+ // been allowed to capture
+ if (std::find(enabledUids.begin(), enabledUids.end(), current->uid)
+ != enabledUids.end()) {
+ continue;
+ }
audio_source_t source = current->attributes.source;
- bool isOnTop = current == topActive;
- bool isLatest = current == latestActive;
- bool isLatestSensitive = current == latestSensitiveActive;
- bool forceIdle = true;
+ bool isTopOrLatestActive = topActive == nullptr ? false : current->uid == topActive->uid;
+ bool isLatestSensitive = latestSensitiveActive == nullptr ?
+ false : current->uid == latestSensitiveActive->uid;
+
+ // By default allow capture if:
+ // The assistant is not on TOP
+ // AND there is no active privacy sensitive capture or call
+ // OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+ bool allowCapture = !isAssistantOnTop
+ && !(isSensitiveActive && !(isLatestSensitive || current->canCaptureOutput))
+ && !(isInCall && !current->canCaptureOutput);
if (isVirtualSource(source)) {
- forceIdle = false;
+ // Allow capture for virtual (remote submix, call audio TX or RX...) sources
+ allowCapture = true;
} else if (mUidPolicy->isAssistantUid(current->uid)) {
+ // For assistant allow capture if:
+ // An accessibility service is on TOP
+ // AND the source is VOICE_RECOGNITION or HOTWORD
+ // OR is on TOP OR latest started AND uses VOICE_RECOGNITION
+ // OR uses HOTWORD
+ // AND there is no active privacy sensitive capture or call
+ // OR client has CAPTURE_AUDIO_OUTPUT privileged permission
if (isA11yOnTop) {
if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
- forceIdle = false;
+ allowCapture = true;
}
} else {
- if ((((isOnTop || isLatest) && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
- source == AUDIO_SOURCE_HOTWORD) && !isSensitiveActive) {
- forceIdle = false;
+ if (((isTopOrLatestActive && source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
+ source == AUDIO_SOURCE_HOTWORD) &&
+ (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
+ allowCapture = true;
}
}
} else if (mUidPolicy->isA11yUid(current->uid)) {
- if ((isOnTop || isLatest) &&
- (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
- forceIdle = false;
- }
- } else {
- if (!isAssistantOnTop && (isOnTop || isLatest) &&
- (!isSensitiveActive || isLatestSensitive)) {
- forceIdle = false;
+ // For accessibility service allow capture if:
+ // Is on TOP OR latest started
+ // AND the source is VOICE_RECOGNITION or HOTWORD
+ if (isTopOrLatestActive &&
+ (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
+ allowCapture = true;
}
}
setAppState_l(current->uid,
- forceIdle ? APP_STATE_IDLE :
- apmStatFromAmState(mUidPolicy->getUidState(current->uid)));
+ allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(current->uid)) :
+ APP_STATE_IDLE);
+ if (allowCapture) {
+ enabledUids.push_back(current->uid);
+ }
}
}
void AudioPolicyService::silenceAllRecordings_l() {
for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
- setAppState_l(current->uid, APP_STATE_IDLE);
+ if (!isVirtualSource(current->attributes.source)) {
+ setAppState_l(current->uid, APP_STATE_IDLE);
+ }
}
}
/* static */
app_state_t AudioPolicyService::apmStatFromAmState(int amState) {
- switch (amState) {
- case ActivityManager::PROCESS_STATE_UNKNOWN:
+
+ if (amState == ActivityManager::PROCESS_STATE_UNKNOWN) {
return APP_STATE_IDLE;
- case ActivityManager::PROCESS_STATE_TOP:
- return APP_STATE_TOP;
- default:
- break;
+ } else if (amState <= ActivityManager::PROCESS_STATE_TOP) {
+ // include persistent services
+ return APP_STATE_TOP;
}
return APP_STATE_FOREGROUND;
}
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index a2e75cd..160f70f 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -753,13 +753,17 @@
AudioRecordClient(const audio_attributes_t attributes,
const audio_io_handle_t io, uid_t uid, pid_t pid,
const audio_session_t session, const audio_port_handle_t deviceId,
- const String16& opPackageName) :
+ const String16& opPackageName,
+ bool canCaptureOutput, bool canCaptureHotword) :
AudioClient(attributes, io, uid, pid, session, deviceId),
- opPackageName(opPackageName), startTimeNs(0) {}
+ opPackageName(opPackageName), startTimeNs(0),
+ canCaptureOutput(canCaptureOutput), canCaptureHotword(canCaptureHotword) {}
~AudioRecordClient() override = default;
const String16 opPackageName; // client package name
nsecs_t startTimeNs;
+ const bool canCaptureOutput;
+ const bool canCaptureHotword;
};
// --- AudioPlaybackClient ---
diff --git a/services/camera/libcameraservice/hidl/Convert.cpp b/services/camera/libcameraservice/hidl/Convert.cpp
index a87812b..c2ed23a 100644
--- a/services/camera/libcameraservice/hidl/Convert.cpp
+++ b/services/camera/libcameraservice/hidl/Convert.cpp
@@ -97,6 +97,21 @@
return outputConfiguration;
}
+hardware::camera2::params::SessionConfiguration convertFromHidl(
+ const HSessionConfiguration &hSessionConfiguration) {
+ hardware::camera2::params::SessionConfiguration sessionConfig(
+ hSessionConfiguration.inputWidth, hSessionConfiguration.inputHeight,
+ hSessionConfiguration.inputFormat,
+ static_cast<int>(hSessionConfiguration.operationMode));
+
+ for (const auto& hConfig : hSessionConfiguration.outputStreams) {
+ hardware::camera2::params::OutputConfiguration config = convertFromHidl(hConfig);
+ sessionConfig.addOutputConfiguration(config);
+ }
+
+ return sessionConfig;
+}
+
// The camera metadata here is cloned. Since we're reading metadata over
// hwbinder we would need to clone it in order to avoid aligment issues.
bool convertFromHidl(const HCameraMetadata &src, CameraMetadata *dst) {
diff --git a/services/camera/libcameraservice/hidl/Convert.h b/services/camera/libcameraservice/hidl/Convert.h
index 82937a3..79683f6 100644
--- a/services/camera/libcameraservice/hidl/Convert.h
+++ b/services/camera/libcameraservice/hidl/Convert.h
@@ -53,6 +53,7 @@
using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
using HPhysicalCaptureResultInfo = frameworks::cameraservice::device::V2_0::PhysicalCaptureResultInfo;
+using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using HSubmitInfo = frameworks::cameraservice::device::V2_0::SubmitInfo;
using HStatus = frameworks::cameraservice::common::V2_0::Status;
using HStreamConfigurationMode = frameworks::cameraservice::device::V2_0::StreamConfigurationMode;
@@ -70,6 +71,9 @@
hardware::camera2::params::OutputConfiguration convertFromHidl(
const HOutputConfiguration &hOutputConfiguration);
+hardware::camera2::params::SessionConfiguration convertFromHidl(
+ const HSessionConfiguration &hSessionConfiguration);
+
HCameraDeviceStatus convertToHidlCameraDeviceStatus(int32_t status);
void convertToHidl(const std::vector<hardware::CameraStatus> &src,
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
index d22ba5a..675ad24 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.cpp
@@ -41,6 +41,7 @@
using hardware::Void;
using HSubmitInfo = device::V2_0::SubmitInfo;
using hardware::camera2::params::OutputConfiguration;
+using hardware::camera2::params::SessionConfiguration;
static constexpr int32_t CAMERA_REQUEST_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
static constexpr int32_t CAMERA_RESULT_METADATA_QUEUE_SIZE = 1 << 20 /* 1 MB */;
@@ -255,6 +256,18 @@
return B2HStatus(ret);
}
+Return<void> HidlCameraDeviceUser::isSessionConfigurationSupported(
+ const HSessionConfiguration& hSessionConfiguration,
+ isSessionConfigurationSupported_cb _hidl_cb) {
+ bool supported = false;
+ SessionConfiguration sessionConfiguration = convertFromHidl(hSessionConfiguration);
+ binder::Status ret = mDeviceRemote->isSessionConfigurationSupported(
+ sessionConfiguration, &supported);
+ HStatus status = B2HStatus(ret);
+ _hidl_cb(status, supported);
+ return Void();
+}
+
} // implementation
} // V2_0
} // device
diff --git a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
index be8f1d6..c3a80fe 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
+++ b/services/camera/libcameraservice/hidl/HidlCameraDeviceUser.h
@@ -53,6 +53,7 @@
using HCameraDeviceUser = device::V2_0::ICameraDeviceUser;
using HCameraMetadata = cameraservice::service::V2_0::CameraMetadata;
using HCaptureRequest = device::V2_0::CaptureRequest;
+using HSessionConfiguration = frameworks::cameraservice::device::V2_0::SessionConfiguration;
using HOutputConfiguration = frameworks::cameraservice::device::V2_0::OutputConfiguration;
using HPhysicalCameraSettings = frameworks::cameraservice::device::V2_0::PhysicalCameraSettings;
using HStatus = frameworks::cameraservice::common::V2_0::Status;
@@ -97,6 +98,10 @@
virtual Return<HStatus> updateOutputConfiguration(
int32_t streamId, const HOutputConfiguration& outputConfiguration) override;
+ virtual Return<void> isSessionConfigurationSupported(
+ const HSessionConfiguration& sessionConfiguration,
+ isSessionConfigurationSupported_cb _hidl_cb) override;
+
bool initStatus() { return mInitSuccess; }
std::shared_ptr<CaptureResultMetadataQueue> getCaptureResultMetadataQueue() {