Merge "Add conversion for new AudioDescriptor standards"
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index d50566d..da4484a 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -97,6 +97,10 @@
return mMirrorMode;
}
+bool OutputConfiguration::useReadoutTimestamp() const {
+ return mUseReadoutTimestamp;
+}
+
OutputConfiguration::OutputConfiguration() :
mRotation(INVALID_ROTATION),
mSurfaceSetID(INVALID_SET_ID),
@@ -110,7 +114,8 @@
mColorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED),
mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT),
mTimestampBase(TIMESTAMP_BASE_DEFAULT),
- mMirrorMode(MIRROR_MODE_AUTO) {
+ mMirrorMode(MIRROR_MODE_AUTO),
+ mUseReadoutTimestamp(false) {
}
OutputConfiguration::OutputConfiguration(const android::Parcel& parcel) :
@@ -220,6 +225,12 @@
return err;
}
+ int useReadoutTimestamp = 0;
+ if ((err = parcel->readInt32(&useReadoutTimestamp)) != OK) {
+ ALOGE("%s: Failed to read useReadoutTimestamp flag from parcel", __FUNCTION__);
+ return err;
+ }
+
mRotation = rotation;
mSurfaceSetID = setID;
mSurfaceType = surfaceType;
@@ -231,6 +242,7 @@
mStreamUseCase = streamUseCase;
mTimestampBase = timestampBase;
mMirrorMode = mirrorMode;
+ mUseReadoutTimestamp = useReadoutTimestamp != 0;
for (auto& surface : surfaceShims) {
ALOGV("%s: OutputConfiguration: %p, name %s", __FUNCTION__,
surface.graphicBufferProducer.get(),
@@ -244,10 +256,10 @@
ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d,"
" physicalCameraId = %s, isMultiResolution = %d, streamUseCase = %" PRId64
- ", timestampBase = %d, mirrorMode = %d",
+ ", timestampBase = %d, mirrorMode = %d, useReadoutTimestamp = %d",
__FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType,
String8(mPhysicalCameraId).string(), mIsMultiResolution, mStreamUseCase, timestampBase,
- mMirrorMode);
+ mMirrorMode, mUseReadoutTimestamp);
return err;
}
@@ -267,6 +279,7 @@
mStreamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
mTimestampBase = TIMESTAMP_BASE_DEFAULT;
mMirrorMode = MIRROR_MODE_AUTO;
+ mUseReadoutTimestamp = false;
}
OutputConfiguration::OutputConfiguration(
@@ -280,7 +293,7 @@
mColorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED),
mStreamUseCase(ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT),
mTimestampBase(TIMESTAMP_BASE_DEFAULT),
- mMirrorMode(MIRROR_MODE_AUTO) { }
+ mMirrorMode(MIRROR_MODE_AUTO), mUseReadoutTimestamp(false) { }
status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
@@ -342,6 +355,9 @@
err = parcel->writeInt32(mMirrorMode);
if (err != OK) return err;
+ err = parcel->writeInt32(mUseReadoutTimestamp ? 1 : 0);
+ if (err != OK) return err;
+
return OK;
}
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index a713b40..16fddb5 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -44,8 +44,7 @@
TIMESTAMP_BASE_MONOTONIC = 2,
TIMESTAMP_BASE_REALTIME = 3,
TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED = 4,
- TIMESTAMP_BASE_READOUT_SENSOR = 5,
- TIMESTAMP_BASE_MAX = TIMESTAMP_BASE_READOUT_SENSOR,
+ TIMESTAMP_BASE_MAX = TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED,
};
enum MirrorModeType {
MIRROR_MODE_AUTO = 0,
@@ -69,6 +68,7 @@
int64_t getStreamUseCase() const;
int getTimestampBase() const;
int getMirrorMode() const;
+ bool useReadoutTimestamp() const;
// set of sensor pixel mode resolutions allowed {MAX_RESOLUTION, DEFAULT_MODE};
const std::vector<int32_t>& getSensorPixelModesUsed() const;
@@ -115,7 +115,8 @@
mColorSpace == other.mColorSpace &&
mStreamUseCase == other.mStreamUseCase &&
mTimestampBase == other.mTimestampBase &&
- mMirrorMode == other.mMirrorMode);
+ mMirrorMode == other.mMirrorMode &&
+ mUseReadoutTimestamp == other.mUseReadoutTimestamp);
}
bool operator != (const OutputConfiguration& other) const {
return !(*this == other);
@@ -167,6 +168,9 @@
if (mMirrorMode != other.mMirrorMode) {
return mMirrorMode < other.mMirrorMode;
}
+ if (mUseReadoutTimestamp != other.mUseReadoutTimestamp) {
+ return mUseReadoutTimestamp < other.mUseReadoutTimestamp;
+ }
return gbpsLessThan(other);
}
@@ -196,6 +200,7 @@
int64_t mStreamUseCase;
int mTimestampBase;
int mMirrorMode;
+ bool mUseReadoutTimestamp;
};
} // namespace params
} // namespace camera2
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 2869669..152b786 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -75,6 +75,7 @@
ACAMERA_AUTOMOTIVE,
ACAMERA_AUTOMOTIVE_LENS,
ACAMERA_EXTENSION,
+ ACAMERA_JPEGR,
ACAMERA_SECTION_COUNT,
ACAMERA_VENDOR = 0x8000
@@ -121,6 +122,7 @@
ACAMERA_AUTOMOTIVE_START = ACAMERA_AUTOMOTIVE << 16,
ACAMERA_AUTOMOTIVE_LENS_START = ACAMERA_AUTOMOTIVE_LENS << 16,
ACAMERA_EXTENSION_START = ACAMERA_EXTENSION << 16,
+ ACAMERA_JPEGR_START = ACAMERA_JPEGR << 16,
ACAMERA_VENDOR_START = ACAMERA_VENDOR << 16
} acamera_metadata_section_start_t;
@@ -7513,6 +7515,145 @@
ACAMERA_AUTOMOTIVE_LENS_START,
ACAMERA_AUTOMOTIVE_LENS_END,
+ /**
+ * <p>The available Jpeg/R stream
+ * configurations that this camera device supports
+ * (i.e. format, width, height, output/input stream).</p>
+ *
+ * <p>Type: int32[n*4] (acamera_metadata_enum_android_jpegr_available_jpeg_r_stream_configurations_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>The configurations are listed as <code>(format, width, height, input?)</code> tuples.</p>
+ * <p>If the camera device supports Jpeg/R, it will support the same stream combinations with
+ * Jpeg/R as it does with P010. The stream combinations with Jpeg/R (or P010) supported
+ * by the device is determined by the device's hardware level and capabilities.</p>
+ * <p>All the static, control, and dynamic metadata tags related to JPEG apply to Jpeg/R formats.
+ * Configuring JPEG and Jpeg/R streams at the same time is not supported.</p>
+ * <p>All the configuration tuples <code>(format, width, height, input?)</code> will contain
+ * AIMAGE_FORMAT_JPEGR format as OUTPUT only.</p>
+ */
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS = // int32[n*4] (acamera_metadata_enum_android_jpegr_available_jpeg_r_stream_configurations_t)
+ ACAMERA_JPEGR_START,
+ /**
+ * <p>This lists the minimum frame duration for each
+ * format/size combination for Jpeg/R output formats.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This should correspond to the frame duration when only that
+ * stream is active, with all processing (typically in android.*.mode)
+ * set to either OFF or FAST.</p>
+ * <p>When multiple streams are used in a request, the minimum frame
+ * duration will be max(individual stream min durations).</p>
+ * <p>See ACAMERA_SENSOR_FRAME_DURATION and
+ * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for more details about
+ * calculating the max frame rate.</p>
+ *
+ * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+ * @see ACAMERA_SENSOR_FRAME_DURATION
+ */
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS = // int64[4*n]
+ ACAMERA_JPEGR_START + 1,
+ /**
+ * <p>This lists the maximum stall duration for each
+ * output format/size combination for Jpeg/R streams.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>A stall duration is how much extra time would get added
+ * to the normal minimum frame duration for a repeating request
+ * that has streams with non-zero stall.</p>
+ * <p>This functions similarly to
+ * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for Jpeg/R
+ * streams.</p>
+ * <p>All Jpeg/R output stream formats may have a nonzero stall
+ * duration.</p>
+ *
+ * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+ */
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS = // int64[4*n]
+ ACAMERA_JPEGR_START + 2,
+ /**
+ * <p>The available Jpeg/R stream
+ * configurations that this camera device supports
+ * (i.e. format, width, height, output/input stream).</p>
+ *
+ * <p>Type: int32[n*4] (acamera_metadata_enum_android_jpegr_available_jpeg_r_stream_configurations_maximum_resolution_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>Refer to ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS for details.</p>
+ * <p>All the configuration tuples <code>(format, width, height, input?)</code> will contain
+ * AIMAGE_FORMAT_JPEG_R format as OUTPUT only.</p>
+ *
+ * @see ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS
+ */
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION =
+ // int32[n*4] (acamera_metadata_enum_android_jpegr_available_jpeg_r_stream_configurations_maximum_resolution_t)
+ ACAMERA_JPEGR_START + 3,
+ /**
+ * <p>This lists the minimum frame duration for each
+ * format/size combination for Jpeg/R output formats for CaptureRequests where
+ * ACAMERA_SENSOR_PIXEL_MODE is set to
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>.</p>
+ *
+ * @see ACAMERA_SENSOR_PIXEL_MODE
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>Refer to ACAMERA_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS for details.</p>
+ *
+ * @see ACAMERA_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS
+ */
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION =
+ // int64[4*n]
+ ACAMERA_JPEGR_START + 4,
+ /**
+ * <p>This lists the maximum stall duration for each
+ * output format/size combination for Jpeg/R streams for CaptureRequests where
+ * ACAMERA_SENSOR_PIXEL_MODE is set to
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraMetadata.html#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION">CameraMetadata#SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION</a>.</p>
+ *
+ * @see ACAMERA_SENSOR_PIXEL_MODE
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>Refer to ACAMERA_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS for details.</p>
+ *
+ * @see ACAMERA_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS
+ */
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS_MAXIMUM_RESOLUTION =
+ // int64[4*n]
+ ACAMERA_JPEGR_START + 5,
+ ACAMERA_JPEGR_END,
+
} acamera_metadata_tag_t;
/**
@@ -10991,6 +11132,25 @@
+// ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_jpegr_available_jpeg_r_stream_configurations {
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_OUTPUT = 0,
+
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_INPUT = 1,
+
+} acamera_metadata_enum_android_jpegr_available_jpeg_r_stream_configurations_t;
+
+// ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION
+typedef enum acamera_metadata_enum_acamera_jpegr_available_jpeg_r_stream_configurations_maximum_resolution {
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION_OUTPUT
+ = 0,
+
+ ACAMERA_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION_INPUT
+ = 1,
+
+} acamera_metadata_enum_android_jpegr_available_jpeg_r_stream_configurations_maximum_resolution_t;
+
+
__END_DECLS
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 1667d5b..1e1a49d 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -35,6 +35,7 @@
"CryptoHalAidl.cpp",
"DrmUtils.cpp",
"DrmHalListener.cpp",
+ "DrmStatus.cpp",
],
local_include_dirs: [
diff --git a/drm/libmediadrm/DrmStatus.cpp b/drm/libmediadrm/DrmStatus.cpp
new file mode 100644
index 0000000..0258801
--- /dev/null
+++ b/drm/libmediadrm/DrmStatus.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <mediadrm/DrmStatus.h>
+#include <json/json.h>
+
+namespace android {
+
+DrmStatus::DrmStatus(status_t err, const char *msg) : mStatus(err) {
+ Json::Value errorDetails;
+ Json::Reader reader;
+ if (!reader.parse(msg, errorDetails)) {
+ mErrMsg = msg;
+ return;
+ }
+
+ std::string errMsg;
+ auto val = errorDetails["cdmError"];
+ if (!val.isNull()) {
+ mCdmErr = val.asInt();
+ }
+ val = errorDetails["oemError"];
+ if (!val.isNull()) {
+ mOemErr = val.asInt();
+ }
+ val = errorDetails["context"];
+ if (!val.isNull()) {
+ mCtx = val.asInt();
+ }
+ val = errorDetails["errorMessage"];
+ if (!val.isNull()) {
+ mErrMsg = val.asString();
+ } else {
+ mErrMsg = msg;
+ }
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/DrmUtils.cpp b/drm/libmediadrm/DrmUtils.cpp
index c144fce..cb103f7 100644
--- a/drm/libmediadrm/DrmUtils.cpp
+++ b/drm/libmediadrm/DrmUtils.cpp
@@ -363,18 +363,23 @@
}
} // namespace
-std::string GetExceptionMessage(status_t err, const char* msg,
+std::string GetExceptionMessage(const DrmStatus &err, const char* defaultMsg,
const Vector<::V1_4::LogMessage>& logs) {
std::string ruler("==============================");
std::string header("Beginning of DRM Plugin Log");
std::string footer("End of DRM Plugin Log");
+ std::string msg(err.getErrorMessage());
String8 msg8;
- if (msg) {
- msg8 += msg;
+ if (!msg.empty()) {
+ msg8 += msg.c_str();
+ msg8 += ": ";
+ } else if (defaultMsg) {
+ msg8 += defaultMsg;
msg8 += ": ";
}
- auto errStr = StrCryptoError(err);
- msg8 += errStr.c_str();
+ msg8 += StrCryptoError(err).c_str();
+ msg8 += String8::format("\ncdm err: %d, oem err: %d, ctx: %d",
+ err.getCdmErr(), err.getOemErr(), err.getContext());
msg8 += String8::format("\n%s %s %s", ruler.c_str(), header.c_str(), ruler.c_str());
for (auto log : logs) {
@@ -543,31 +548,7 @@
break;
}
- Json::Value errorDetails;
- Json::Reader reader;
- if (!reader.parse(statusAidl.getMessage(), errorDetails)) {
- return status;
- }
-
- int32_t cdmErr{}, oemErr{}, ctx{};
- std::string errMsg;
- auto val = errorDetails["cdmError"];
- if (!val.isNull()) {
- cdmErr = val.asInt();
- }
- val = errorDetails["oemError"];
- if (!val.isNull()) {
- oemErr = val.asInt();
- }
- val = errorDetails["context"];
- if (!val.isNull()) {
- ctx = val.asInt();
- }
- val = errorDetails["errorMessage"];
- if (!val.isNull()) {
- errMsg = val.asString();
- }
- return DrmStatus(status, cdmErr, oemErr, ctx, errMsg);
+ return DrmStatus(status, statusAidl.getMessage());
}
LogBuffer gLogBuf;
diff --git a/drm/libmediadrm/include/mediadrm/DrmStatus.h b/drm/libmediadrm/include/mediadrm/DrmStatus.h
index 1155af6..15826ca 100644
--- a/drm/libmediadrm/include/mediadrm/DrmStatus.h
+++ b/drm/libmediadrm/include/mediadrm/DrmStatus.h
@@ -32,6 +32,7 @@
int32_t ctx = 0, std::string errMsg = "")
: mStatus(status), mCdmErr(cdmErr), mOemErr(oemErr),
mCtx(ctx), mErrMsg(errMsg) {}
+ DrmStatus(status_t err, const char *msg);
operator status_t() const { return mStatus; }
int32_t getCdmErr() const { return mCdmErr; }
int32_t getOemErr() const { return mOemErr; }
@@ -41,7 +42,7 @@
bool operator!=(status_t other) const { return mStatus != other; }
private:
- status_t mStatus;
+ status_t mStatus{};
int32_t mCdmErr{}, mOemErr{}, mCtx{};
std::string mErrMsg;
};
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 847fc41..0044bac 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -283,14 +283,16 @@
return toStatusT(err);
}
-std::string GetExceptionMessage(status_t err, const char *msg,
+std::string GetExceptionMessage(const DrmStatus & err, const char *defaultMsg,
const Vector<::V1_4::LogMessage> &logs);
template<typename T>
-std::string GetExceptionMessage(status_t err, const char *msg, const sp<T> &iface) {
+std::string GetExceptionMessage(const DrmStatus &err, const char *defaultMsg, const sp<T> &iface) {
Vector<::V1_4::LogMessage> logs;
- iface->getLogMessages(logs);
- return GetExceptionMessage(err, msg, logs);
+ if (iface != NULL) {
+ iface->getLogMessages(logs);
+ }
+ return GetExceptionMessage(err, defaultMsg, logs);
}
} // namespace DrmUtils
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 4bf8dce..b54d35d 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -226,6 +226,9 @@
if (buffer->meta()->findInt32("tunnel-first-frame", &tmp) && tmp) {
tunnelFirstFrame = true;
}
+ if (buffer->meta()->findInt32("decode-only", &tmp) && tmp) {
+ flags |= C2FrameData::FLAG_DROP_FRAME;
+ }
ALOGV("[%s] queueInputBuffer: buffer->size() = %zu", mName, buffer->size());
std::list<std::unique_ptr<C2Work>> items;
std::unique_ptr<C2Work> work(new C2Work);
@@ -1995,6 +1998,12 @@
drop = true;
}
+ // Workaround: if C2FrameData::FLAG_DROP_FRAME is not implemented in
+ // HAL, the flag is then removed in the corresponding output buffer.
+ if (work->input.flags & C2FrameData::FLAG_DROP_FRAME) {
+ flags |= BUFFER_FLAG_DECODE_ONLY;
+ }
+
if (notifyClient && !buffer && !flags) {
if (mTunneled && drop && outputFormat) {
ALOGV("[%s] onWorkDone: Keep tunneled, drop frame with format change (%lld)",
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
index 7193ff3..a3ce58c 100644
--- a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
@@ -40,7 +40,7 @@
ratio.reduce();
mNumerator = ratio.getNumerator();
mDenominator = ratio.getDenominator();
- mIntegerPhase = mDenominator;
+ mIntegerPhase = mDenominator; // so we start with a write needed
}
// static factory method
diff --git a/media/libaaudio/src/flowgraph/resampler/README.md b/media/libaaudio/src/flowgraph/resampler/README.md
index ea319c7..356f06c 100644
--- a/media/libaaudio/src/flowgraph/resampler/README.md
+++ b/media/libaaudio/src/flowgraph/resampler/README.md
@@ -40,7 +40,7 @@
For example, suppose you are converting from 44100 Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output frames you get:
- 960 * 48000 * 44100 = 1044.897959...
+ 960.0 * 48000 / 44100 = 1044.897959...
You cannot generate a fractional number of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. On average it will generate 1044.897959 frames. The resampler stores the fraction internally and keeps track of when to consume or generate a frame.
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
index 42d0ca2..a14ee47 100644
--- a/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
@@ -24,9 +24,10 @@
: MultiChannelResampler(builder)
, mSingleFrame2(builder.getChannelCount()) {
assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
- mNumRows = kMaxCoefficients / getNumTaps(); // no guard row needed
- mPhaseScaler = (double) mNumRows / mDenominator;
- double phaseIncrement = 1.0 / mNumRows;
+ mNumRows = kMaxCoefficients / getNumTaps(); // includes guard row
+ const int32_t numRowsNoGuard = mNumRows - 1;
+ mPhaseScaler = (double) numRowsNoGuard / mDenominator;
+ const double phaseIncrement = 1.0 / numRowsNoGuard;
generateCoefficients(builder.getInputRate(),
builder.getOutputRate(),
mNumRows,
@@ -40,39 +41,31 @@
std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
// Determine indices into coefficients table.
- double tablePhase = getIntegerPhase() * mPhaseScaler;
- int index1 = static_cast<int>(floor(tablePhase));
- if (index1 >= mNumRows) { // no guard row needed because we wrap the indices
- tablePhase -= mNumRows;
- index1 -= mNumRows;
- }
-
- int index2 = index1 + 1;
- if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
- index2 -= mNumRows;
- }
-
- float *coefficients1 = &mCoefficients[static_cast<size_t>(index1)
- * static_cast<size_t>(getNumTaps())];
- float *coefficients2 = &mCoefficients[static_cast<size_t>(index2)
- * static_cast<size_t>(getNumTaps())];
+ const double tablePhase = getIntegerPhase() * mPhaseScaler;
+ const int indexLow = static_cast<int>(floor(tablePhase));
+ const int indexHigh = indexLow + 1; // OK because using a guard row.
+ assert (indexHigh < mNumRows);
+ float *coefficientsLow = &mCoefficients[static_cast<size_t>(indexLow)
+ * static_cast<size_t>(getNumTaps())];
+ float *coefficientsHigh = &mCoefficients[static_cast<size_t>(indexHigh)
+ * static_cast<size_t>(getNumTaps())];
float *xFrame = &mX[static_cast<size_t>(mCursor) * static_cast<size_t>(getChannelCount())];
- for (int i = 0; i < mNumTaps; i++) {
- float coefficient1 = *coefficients1++;
- float coefficient2 = *coefficients2++;
+ for (int tap = 0; tap < mNumTaps; tap++) {
+ const float coefficientLow = *coefficientsLow++;
+ const float coefficientHigh = *coefficientsHigh++;
for (int channel = 0; channel < getChannelCount(); channel++) {
- float sample = *xFrame++;
- mSingleFrame[channel] += sample * coefficient1;
- mSingleFrame2[channel] += sample * coefficient2;
+ const float sample = *xFrame++;
+ mSingleFrame[channel] += sample * coefficientLow;
+ mSingleFrame2[channel] += sample * coefficientHigh;
}
}
// Interpolate and copy to output.
- float fraction = tablePhase - index1;
+ const float fraction = tablePhase - indexLow;
for (int channel = 0; channel < getChannelCount(); channel++) {
- float low = mSingleFrame[channel];
- float high = mSingleFrame2[channel];
+ const float low = mSingleFrame[channel];
+ const float high = mSingleFrame2[channel];
frame[channel] = low + (fraction * (high - low));
}
}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
index 432137e..d459abf 100644
--- a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
@@ -57,9 +57,6 @@
float *coefficients1 = &mCoefficients[static_cast<size_t>(index1)
* static_cast<size_t>(getNumTaps())];
int index2 = (index1 + 1);
- if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
- index2 = 0;
- }
float *coefficients2 = &mCoefficients[static_cast<size_t>(index2)
* static_cast<size_t>(getNumTaps())];
float *xFrame = &mX[static_cast<size_t>(mCursor) * static_cast<size_t>(getChannelCount())];
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 438be0a..24041bc 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -228,3 +228,12 @@
"liblog",
],
}
+
+cc_test {
+ name: "test_resampler",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_resampler.cpp"],
+ shared_libs: [
+ "libaaudio_internal",
+ ],
+}
diff --git a/media/libaaudio/tests/test_resampler.cpp b/media/libaaudio/tests/test_resampler.cpp
new file mode 100644
index 0000000..1e4f59c
--- /dev/null
+++ b/media/libaaudio/tests/test_resampler.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Test FlowGraph
+ *
+ * This file also tests a few different conversion techniques because
+ * sometimes that have caused compiler bugs.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "flowgraph/resampler/MultiChannelResampler.h"
+
+using namespace RESAMPLER_OUTER_NAMESPACE::resampler;
+
+// Measure zero crossings.
+static int32_t countZeroCrossingsWithHysteresis(float *input, int32_t numSamples) {
+ const float kHysteresisLevel = 0.25f;
+ int zeroCrossingCount = 0;
+ int state = 0; // can be -1, 0, +1
+ for (int i = 0; i < numSamples; i++) {
+ if (input[i] >= kHysteresisLevel) {
+ if (state < 0) {
+ zeroCrossingCount++;
+ }
+ state = 1;
+ } else if (input[i] <= -kHysteresisLevel) {
+ if (state > 0) {
+ zeroCrossingCount++;
+ }
+ state = -1;
+ }
+ }
+ return zeroCrossingCount;
+}
+
+static constexpr int kChannelCount = 1;
+
+/**
+ * Convert a sine wave and then look for glitches.
+ * Glitches have a high value in the second derivative.
+ */
+static void checkResampler(int32_t sourceRate, int32_t sinkRate,
+ MultiChannelResampler::Quality quality) {
+ const int kNumOutputSamples = 10000;
+ const double framesPerCycle = 81.379; // target output period
+
+ int numInputSamples = kNumOutputSamples * sourceRate / sinkRate;
+
+ std::unique_ptr<float[]> inputBuffer = std::make_unique<float[]>(numInputSamples);
+ std::unique_ptr<float[]> outputBuffer = std::make_unique<float[]>(kNumOutputSamples);
+
+ // Generate a sine wave for input.
+ const double kPhaseIncrement = 2.0 * sinkRate / (framesPerCycle * sourceRate);
+ double phase = 0.0;
+ for (int i = 0; i < numInputSamples; i++) {
+ inputBuffer[i] = sin(phase * M_PI);
+ phase += kPhaseIncrement;
+ while (phase > 1.0) {
+ phase -= 2.0;
+ }
+ }
+ int sourceZeroCrossingCount = countZeroCrossingsWithHysteresis(
+ inputBuffer.get(), numInputSamples);
+
+ // Use a MultiChannelResampler to convert from the sourceRate to the sinkRate.
+ std::unique_ptr<MultiChannelResampler> mcResampler;
+ mcResampler.reset(MultiChannelResampler::make(kChannelCount,
+ sourceRate,
+ sinkRate,
+ quality));
+ int inputFramesLeft = numInputSamples;
+ int numRead = 0;
+ float *input = inputBuffer.get(); // for iteration
+ float *output = outputBuffer.get();
+ while (inputFramesLeft > 0) {
+ if (mcResampler->isWriteNeeded()) {
+ mcResampler->writeNextFrame(input);
+ input++;
+ inputFramesLeft--;
+ } else {
+ mcResampler->readNextFrame(output);
+ output++;
+ numRead++;
+ }
+ }
+
+ ASSERT_LE(numRead, kNumOutputSamples);
+ // Some frames are lost priming the FIR filter.
+ const int kMaxAlgorithmicFrameLoss = 16;
+ EXPECT_GT(numRead, kNumOutputSamples - kMaxAlgorithmicFrameLoss);
+
+ int sinkZeroCrossingCount = countZeroCrossingsWithHysteresis(outputBuffer.get(), numRead);
+ // Some cycles may get chopped off at the end.
+ const int kMaxZeroCrossingDelta = 3;
+ EXPECT_LE(abs(sourceZeroCrossingCount - sinkZeroCrossingCount), kMaxZeroCrossingDelta);
+
+ // Detect glitches by looking for spikes in the second derivative.
+ output = outputBuffer.get();
+ float previousValue = output[0];
+ float previousSlope = output[1] - output[0];
+ for (int i = 0; i < numRead; i++) {
+ float slope = output[i] - previousValue;
+ float slopeDelta = fabs(slope - previousSlope);
+ // Skip a few samples because there are often some steep slope changes at the beginning.
+ if (i > 10) {
+ EXPECT_LT(slopeDelta, 0.1);
+ }
+ previousValue = output[i];
+ previousSlope = slope;
+ }
+
+#if 0
+ // Save to disk for inspection.
+ FILE *fp = fopen( "/sdcard/Download/src_float_out.raw" , "wb" );
+ fwrite(outputBuffer.get(), sizeof(float), numRead, fp );
+ fclose(fp);
+#endif
+}
+
+
+TEST(test_resampler, resampler_scan_all) {
+ // TODO Add 64000, 88200, 96000 when they work. Failing now.
+ const int rates[] = {8000, 11025, 22050, 32000, 44100, 48000};
+ const MultiChannelResampler::Quality qualities[] =
+ {
+ MultiChannelResampler::Quality::Fastest,
+ MultiChannelResampler::Quality::Low,
+ MultiChannelResampler::Quality::Medium,
+ MultiChannelResampler::Quality::High,
+ MultiChannelResampler::Quality::Best
+ };
+ for (int srcRate : rates) {
+ for (int destRate : rates) {
+ for (auto quality : qualities) {
+ if (srcRate != destRate) {
+ checkResampler(srcRate, destRate, quality);
+ }
+ }
+ }
+ }
+}
+
+TEST(test_resampler, resampler_8000_11025_best) {
+ checkResampler(8000, 11025, MultiChannelResampler::Quality::Best);
+}
+TEST(test_resampler, resampler_8000_48000_best) {
+ checkResampler(8000, 48000, MultiChannelResampler::Quality::Best);
+}
+
+TEST(test_resampler, resampler_8000_44100_best) {
+ checkResampler(8000, 44100, MultiChannelResampler::Quality::Best);
+}
+
+TEST(test_resampler, resampler_11025_24000_best) {
+ checkResampler(11025, 24000, MultiChannelResampler::Quality::Best);
+}
+
+TEST(test_resampler, resampler_11025_48000_fastest) {
+ checkResampler(11025, 48000, MultiChannelResampler::Quality::Fastest);
+}
+TEST(test_resampler, resampler_11025_48000_low) {
+ checkResampler(11025, 48000, MultiChannelResampler::Quality::Low);
+}
+TEST(test_resampler, resampler_11025_48000_medium) {
+ checkResampler(11025, 48000, MultiChannelResampler::Quality::Medium);
+}
+TEST(test_resampler, resampler_11025_48000_high) {
+ checkResampler(11025, 48000, MultiChannelResampler::Quality::High);
+}
+
+TEST(test_resampler, resampler_11025_48000_best) {
+ checkResampler(11025, 48000, MultiChannelResampler::Quality::Best);
+}
+
+TEST(test_resampler, resampler_11025_44100_best) {
+ checkResampler(11025, 44100, MultiChannelResampler::Quality::Best);
+}
+
+// TODO This fails because the output is very low.
+//TEST(test_resampler, resampler_11025_88200_best) {
+// checkResampler(11025, 88200, MultiChannelResampler::Quality::Best);
+//}
+
+TEST(test_resampler, resampler_16000_48000_best) {
+ checkResampler(16000, 48000, MultiChannelResampler::Quality::Best);
+}
+
+TEST(test_resampler, resampler_44100_48000_low) {
+ checkResampler(44100, 48000, MultiChannelResampler::Quality::Low);
+}
+TEST(test_resampler, resampler_44100_48000_best) {
+ checkResampler(44100, 48000, MultiChannelResampler::Quality::Best);
+}
+
+// Look for glitches when downsampling.
+TEST(test_resampler, resampler_48000_11025_best) {
+ checkResampler(48000, 11025, MultiChannelResampler::Quality::Best);
+}
+TEST(test_resampler, resampler_48000_44100_best) {
+ checkResampler(48000, 44100, MultiChannelResampler::Quality::Best);
+}
+TEST(test_resampler, resampler_44100_11025_best) {
+ checkResampler(44100, 11025, MultiChannelResampler::Quality::Best);
+}
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 9fb0290..5ea4926 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -417,7 +417,7 @@
}
ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
- const media::AudioPortConfig& aidl) {
+ const media::AudioPortConfigFw& aidl) {
audio_port_config legacy{};
legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
@@ -457,9 +457,9 @@
return legacy;
}
-ConversionResult<media::AudioPortConfig> legacy2aidl_audio_port_config_AudioPortConfig(
+ConversionResult<media::AudioPortConfigFw> legacy2aidl_audio_port_config_AudioPortConfig(
const audio_port_config& legacy) {
- media::AudioPortConfig aidl;
+ media::AudioPortConfigFw aidl;
aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
@@ -492,7 +492,7 @@
}
ConversionResult<struct audio_patch> aidl2legacy_AudioPatch_audio_patch(
- const media::AudioPatch& aidl) {
+ const media::AudioPatchFw& aidl) {
struct audio_patch legacy;
legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_patch_handle_t(aidl.id));
legacy.num_sinks = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.sinks.size()));
@@ -514,9 +514,9 @@
return legacy;
}
-ConversionResult<media::AudioPatch> legacy2aidl_audio_patch_AudioPatch(
+ConversionResult<media::AudioPatchFw> legacy2aidl_audio_patch_AudioPatch(
const struct audio_patch& legacy) {
- media::AudioPatch aidl;
+ media::AudioPatchFw aidl;
aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_patch_handle_t_int32_t(legacy.id));
if (legacy.num_sinks > AUDIO_PATCH_PORTS_MAX) {
@@ -930,7 +930,7 @@
}
ConversionResult<audio_port_v7>
-aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl) {
+aidl2legacy_AudioPort_audio_port_v7(const media::AudioPortFw& aidl) {
audio_port_v7 legacy;
legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
@@ -975,9 +975,9 @@
return legacy;
}
-ConversionResult<media::AudioPort>
+ConversionResult<media::AudioPortFw>
legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy) {
- media::AudioPort aidl;
+ media::AudioPortFw aidl;
aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 4a1ba57..4679731 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -287,11 +287,11 @@
"aidl/android/media/AudioHalVersion.aidl",
"aidl/android/media/AudioIoConfigEvent.aidl",
"aidl/android/media/AudioIoDescriptor.aidl",
- "aidl/android/media/AudioPatch.aidl",
+ "aidl/android/media/AudioPatchFw.aidl",
"aidl/android/media/AudioPlaybackRate.aidl",
- "aidl/android/media/AudioPort.aidl",
+ "aidl/android/media/AudioPortFw.aidl",
"aidl/android/media/AudioPortSys.aidl",
- "aidl/android/media/AudioPortConfig.aidl",
+ "aidl/android/media/AudioPortConfigFw.aidl",
"aidl/android/media/AudioPortConfigSys.aidl",
"aidl/android/media/AudioPortDeviceExtSys.aidl",
"aidl/android/media/AudioPortExtSys.aidl",
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 1f107a6..5a1dc64 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1086,6 +1086,8 @@
responseAidl.secondaryOutputs, aidl2legacy_int32_t_audio_io_handle_t));
*isSpatialized = responseAidl.isSpatialized;
*isBitPerfect = responseAidl.isBitPerfect;
+ *attr = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioAttributesInternal_audio_attributes_t(responseAidl.attr));
return OK;
}
@@ -1549,7 +1551,7 @@
legacy2aidl_audio_port_type_t_AudioPortType(type));
Int numPortsAidl;
numPortsAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_ports));
- std::vector<media::AudioPort> portsAidl;
+ std::vector<media::AudioPortFw> portsAidl;
int32_t generationAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1568,7 +1570,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioPort portAidl;
+ media::AudioPortFw portAidl;
RETURN_STATUS_IF_ERROR(
statusTFromBinderStatus(aps->getAudioPort(port->id, &portAidl)));
*port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPort_audio_port_v7(portAidl));
@@ -1584,7 +1586,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioPatch patchAidl = VALUE_OR_RETURN_STATUS(
+ media::AudioPatchFw patchAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_patch_AudioPatch(*patch));
int32_t handleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(*handle));
RETURN_STATUS_IF_ERROR(
@@ -1615,7 +1617,7 @@
Int numPatchesAidl;
numPatchesAidl.value = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
- std::vector<media::AudioPatch> patchesAidl;
+ std::vector<media::AudioPatchFw> patchesAidl;
int32_t generationAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
@@ -1635,7 +1637,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioPortConfig configAidl = VALUE_OR_RETURN_STATUS(
+ media::AudioPortConfigFw configAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_config_AudioPortConfig(*config));
return statusTFromBinderStatus(aps->setAudioPortConfig(configAidl));
}
@@ -1856,7 +1858,7 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- media::AudioPortConfig sourceAidl = VALUE_OR_RETURN_STATUS(
+ media::AudioPortConfigFw sourceAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_config_AudioPortConfig(*source));
media::AudioAttributesInternal attributesAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attributes));
@@ -2457,6 +2459,32 @@
return af->getSupportedLatencyModes(output, modes);
}
+status_t AudioSystem::setBluetoothVariableLatencyEnabled(bool enabled) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->setBluetoothVariableLatencyEnabled(enabled);
+}
+
+status_t AudioSystem::isBluetoothVariableLatencyEnabled(
+ bool *enabled) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->isBluetoothVariableLatencyEnabled(enabled);
+}
+
+status_t AudioSystem::supportsBluetoothVariableLatency(
+ bool *support) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->supportsBluetoothVariableLatency(support);
+}
+
class CaptureStateListenerImpl : public media::BnCaptureStateListener,
public IBinder::DeathRecipient {
public:
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 141c84f..d895f80 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -493,12 +493,6 @@
return statusTFromBinderStatus(mDelegate->closeInput(inputAidl));
}
-status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
- AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
- return statusTFromBinderStatus(mDelegate->invalidateStream(streamAidl));
-}
-
status_t AudioFlingerClientAdapter::setVoiceVolume(float volume) {
return statusTFromBinderStatus(mDelegate->setVoiceVolume(volume));
}
@@ -679,8 +673,9 @@
}
status_t AudioFlingerClientAdapter::getAudioPort(struct audio_port_v7* port) {
- media::AudioPort portAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_v7_AudioPort(*port));
- media::AudioPort aidlRet;
+ media::AudioPortFw portAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_v7_AudioPort(*port));
+ media::AudioPortFw aidlRet;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
mDelegate->getAudioPort(portAidl, &aidlRet)));
*port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPort_audio_port_v7(aidlRet));
@@ -689,7 +684,8 @@
status_t AudioFlingerClientAdapter::createAudioPatch(const struct audio_patch* patch,
audio_patch_handle_t* handle) {
- media::AudioPatch patchAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_AudioPatch(*patch));
+ media::AudioPatchFw patchAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_patch_AudioPatch(*patch));
int32_t aidlRet = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(
AUDIO_PATCH_HANDLE_NONE));
if (handle != nullptr) {
@@ -710,7 +706,7 @@
status_t AudioFlingerClientAdapter::listAudioPatches(unsigned int* num_patches,
struct audio_patch* patches) {
- std::vector<media::AudioPatch> aidlRet;
+ std::vector<media::AudioPatchFw> aidlRet;
int32_t maxPatches = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
mDelegate->listAudioPatches(maxPatches, &aidlRet)));
@@ -720,7 +716,7 @@
}
status_t AudioFlingerClientAdapter::setAudioPortConfig(const struct audio_port_config* config) {
- media::AudioPortConfig configAidl = VALUE_OR_RETURN_STATUS(
+ media::AudioPortConfigFw configAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_config_AudioPortConfig(*config));
return statusTFromBinderStatus(mDelegate->setAudioPortConfig(configAidl));
}
@@ -818,7 +814,7 @@
status_t AudioFlingerClientAdapter::setDeviceConnectedState(
const struct audio_port_v7 *port, bool connected) {
- media::AudioPort aidlPort = VALUE_OR_RETURN_STATUS(
+ media::AudioPortFw aidlPort = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_v7_AudioPort(*port));
return statusTFromBinderStatus(mDelegate->setDeviceConnectedState(aidlPort, connected));
}
@@ -850,12 +846,46 @@
return NO_ERROR;
}
+status_t AudioFlingerClientAdapter::setBluetoothVariableLatencyEnabled(bool enabled) {
+ return statusTFromBinderStatus(mDelegate->setBluetoothVariableLatencyEnabled(enabled));
+}
+
+status_t AudioFlingerClientAdapter::isBluetoothVariableLatencyEnabled(bool* enabled) {
+ if (enabled == nullptr) {
+ return BAD_VALUE;
+ }
+
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mDelegate->isBluetoothVariableLatencyEnabled(enabled)));
+
+ return NO_ERROR;
+}
+
+status_t AudioFlingerClientAdapter::supportsBluetoothVariableLatency(bool* support) {
+ if (support == nullptr) {
+ return BAD_VALUE;
+ }
+
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mDelegate->supportsBluetoothVariableLatency(support)));
+
+ return NO_ERROR;
+}
+
status_t AudioFlingerClientAdapter::getSoundDoseInterface(
const sp<media::ISoundDoseCallback> &callback,
sp<media::ISoundDose>* soundDose) {
return statusTFromBinderStatus(mDelegate->getSoundDoseInterface(callback, soundDose));
}
+status_t AudioFlingerClientAdapter::invalidateTracks(
+ const std::vector<audio_port_handle_t>& portIds) {
+ std::vector<int32_t> portIdsAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<int32_t>>(
+ portIds, legacy2aidl_audio_port_handle_t_int32_t));
+ return statusTFromBinderStatus(mDelegate->invalidateTracks(portIdsAidl));
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
// AudioFlingerServerAdapter
AudioFlingerServerAdapter::AudioFlingerServerAdapter(
@@ -1088,12 +1118,6 @@
return Status::fromStatusT(mDelegate->closeInput(inputLegacy));
}
-Status AudioFlingerServerAdapter::invalidateStream(AudioStreamType stream) {
- audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
- aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
- return Status::fromStatusT(mDelegate->invalidateStream(streamLegacy));
-}
-
Status AudioFlingerServerAdapter::setVoiceVolume(float volume) {
return Status::fromStatusT(mDelegate->setVoiceVolume(volume));
}
@@ -1230,15 +1254,15 @@
return Status::fromStatusT(mDelegate->setLowRamDevice(isLowRamDevice, totalMemory));
}
-Status AudioFlingerServerAdapter::getAudioPort(const media::AudioPort& port,
- media::AudioPort* _aidl_return) {
+Status AudioFlingerServerAdapter::getAudioPort(const media::AudioPortFw& port,
+ media::AudioPortFw* _aidl_return) {
audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPort_audio_port_v7(port));
RETURN_BINDER_IF_ERROR(mDelegate->getAudioPort(&portLegacy));
*_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_port_v7_AudioPort(portLegacy));
return Status::ok();
}
-Status AudioFlingerServerAdapter::createAudioPatch(const media::AudioPatch& patch,
+Status AudioFlingerServerAdapter::createAudioPatch(const media::AudioPatchFw& patch,
int32_t* _aidl_return) {
audio_patch patchLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPatch_audio_patch(patch));
audio_patch_handle_t handleLegacy = VALUE_OR_RETURN_BINDER(
@@ -1255,7 +1279,7 @@
}
Status AudioFlingerServerAdapter::listAudioPatches(int32_t maxCount,
- std::vector<media::AudioPatch>* _aidl_return) {
+ std::vector<media::AudioPatchFw>* _aidl_return) {
unsigned int count = VALUE_OR_RETURN_BINDER(convertIntegral<unsigned int>(maxCount));
count = std::min(count, static_cast<unsigned int>(MAX_ITEMS_PER_LIST));
std::unique_ptr<audio_patch[]> patchesLegacy(new audio_patch[count]);
@@ -1267,7 +1291,7 @@
return Status::ok();
}
-Status AudioFlingerServerAdapter::setAudioPortConfig(const media::AudioPortConfig& config) {
+Status AudioFlingerServerAdapter::setAudioPortConfig(const media::AudioPortConfigFw& config) {
audio_port_config configLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_AudioPortConfig_audio_port_config(config));
return Status::fromStatusT(mDelegate->setAudioPortConfig(&configLegacy));
@@ -1347,7 +1371,7 @@
}
Status AudioFlingerServerAdapter::setDeviceConnectedState(
- const media::AudioPort& port, bool connected) {
+ const media::AudioPortFw& port, bool connected) {
audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPort_audio_port_v7(port));
return Status::fromStatusT(mDelegate->setDeviceConnectedState(&portLegacy, connected));
}
@@ -1376,6 +1400,18 @@
return Status::ok();
}
+Status AudioFlingerServerAdapter::setBluetoothVariableLatencyEnabled(bool enabled) {
+ return Status::fromStatusT(mDelegate->setBluetoothVariableLatencyEnabled(enabled));
+}
+
+Status AudioFlingerServerAdapter::isBluetoothVariableLatencyEnabled(bool *enabled) {
+ return Status::fromStatusT(mDelegate->isBluetoothVariableLatencyEnabled(enabled));
+}
+
+Status AudioFlingerServerAdapter::supportsBluetoothVariableLatency(bool *support) {
+ return Status::fromStatusT(mDelegate->supportsBluetoothVariableLatency(support));
+}
+
Status AudioFlingerServerAdapter::getSoundDoseInterface(
const sp<media::ISoundDoseCallback>& callback,
sp<media::ISoundDose>* soundDose)
@@ -1383,4 +1419,12 @@
return Status::fromStatusT(mDelegate->getSoundDoseInterface(callback, soundDose));
}
+Status AudioFlingerServerAdapter::invalidateTracks(const std::vector<int32_t>& portIds) {
+ std::vector<audio_port_handle_t> portIdsLegacy = VALUE_OR_RETURN_BINDER(
+ convertContainer<std::vector<audio_port_handle_t>>(
+ portIds, aidl2legacy_int32_t_audio_port_handle_t));
+ RETURN_BINDER_IF_ERROR(mDelegate->invalidateTracks(portIdsLegacy));
+ return Status::ok();
+}
+
} // namespace android
diff --git a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
index b01f902..5dd898c 100644
--- a/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioIoDescriptor.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioPatch;
+import android.media.AudioPatchFw;
import android.media.audio.common.AudioChannelLayout;
import android.media.audio.common.AudioFormatDescription;
@@ -26,7 +26,7 @@
parcelable AudioIoDescriptor {
/** Interpreted as audio_io_handle_t. */
int ioHandle;
- AudioPatch patch;
+ AudioPatchFw patch;
boolean isInput;
int samplingRate;
AudioFormatDescription format;
diff --git a/media/libaudioclient/aidl/android/media/AudioPatch.aidl b/media/libaudioclient/aidl/android/media/AudioPatchFw.aidl
similarity index 74%
rename from media/libaudioclient/aidl/android/media/AudioPatch.aidl
rename to media/libaudioclient/aidl/android/media/AudioPatchFw.aidl
index 8519faf..9ec3fa9 100644
--- a/media/libaudioclient/aidl/android/media/AudioPatch.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPatchFw.aidl
@@ -16,17 +16,19 @@
package android.media;
-import android.media.AudioPortConfig;
+import android.media.AudioPortConfigFw;
/**
* {@hide}
+ * The Fw suffix is used to break a namespace collision with an SDK API.
+ * It contains the framework version of AudioPortConfig.
*/
-parcelable AudioPatch {
+parcelable AudioPatchFw {
/**
* Patch unique ID.
* Interpreted as audio_patch_handle_t.
*/
int id;
- AudioPortConfig[] sources;
- AudioPortConfig[] sinks;
+ AudioPortConfigFw[] sources;
+ AudioPortConfigFw[] sinks;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl b/media/libaudioclient/aidl/android/media/AudioPortConfigFw.aidl
similarity index 89%
rename from media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortConfigFw.aidl
index 3a4ca31..e7565d7 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortConfig.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortConfigFw.aidl
@@ -21,8 +21,9 @@
/**
* {@hide}
+ * Suffixed with Fw to avoid name conflict with SDK class.
*/
-parcelable AudioPortConfig {
+parcelable AudioPortConfigFw {
AudioPortConfig hal;
AudioPortConfigSys sys;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPort.aidl b/media/libaudioclient/aidl/android/media/AudioPortFw.aidl
similarity index 88%
rename from media/libaudioclient/aidl/android/media/AudioPort.aidl
rename to media/libaudioclient/aidl/android/media/AudioPortFw.aidl
index ff177c0..5580e35 100644
--- a/media/libaudioclient/aidl/android/media/AudioPort.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortFw.aidl
@@ -21,8 +21,9 @@
/**
* {@hide}
+ * The Fw suffix is used to break a namespace collision with an SDK API.
*/
-parcelable AudioPort {
+parcelable AudioPortFw {
AudioPort hal;
AudioPortSys sys;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
index f3b5c19..756c469 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
@@ -17,7 +17,7 @@
package android.media;
import android.media.AudioGainSys;
-import android.media.AudioPortConfig;
+import android.media.AudioPortConfigFw;
import android.media.AudioPortExtSys;
import android.media.AudioPortRole;
import android.media.AudioPortType;
@@ -36,7 +36,7 @@
/** System-only parameters for each AudioGain from 'port.gains'. */
AudioGainSys[] gains;
/** Current audio port configuration. */
- AudioPortConfig activeConfig;
+ AudioPortConfigFw activeConfig;
/** System-only extra parameters for 'port.ext'. */
AudioPortExtSys ext;
}
diff --git a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
index 385f787..9d44bb0 100644
--- a/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
+++ b/media/libaudioclient/aidl/android/media/GetOutputForAttrResponse.aidl
@@ -18,7 +18,7 @@
import android.media.audio.common.AudioConfigBase;
import android.media.audio.common.AudioStreamType;
-
+import android.media.AudioAttributesInternal;
/**
* {@hide}
*/
@@ -37,4 +37,6 @@
/** The suggested audio config if fails to get an output. **/
AudioConfigBase configBase;
boolean isBitPerfect;
+ /** The corrected audio attributes. **/
+ AudioAttributesInternal attr;
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index 4383b9e..7deb384 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -16,9 +16,9 @@
package android.media;
-import android.media.AudioPatch;
-import android.media.AudioPort;
-import android.media.AudioPortConfig;
+import android.media.AudioPatchFw;
+import android.media.AudioPortFw;
+import android.media.AudioPortConfigFw;
import android.media.AudioUniqueIdUse;
import android.media.AudioVibratorInfo;
import android.media.CreateEffectRequest;
@@ -134,8 +134,6 @@
OpenInputResponse openInput(in OpenInputRequest request);
void closeInput(int /* audio_io_handle_t */ input);
- void invalidateStream(AudioStreamType stream);
-
void setVoiceVolume(float volume);
RenderPosition getRenderPosition(int /* audio_io_handle_t */ output);
@@ -184,18 +182,18 @@
void setLowRamDevice(boolean isLowRamDevice, long totalMemory);
/* Get attributes for a given audio port */
- AudioPort getAudioPort(in AudioPort port);
+ AudioPortFw getAudioPort(in AudioPortFw port);
/* Create an audio patch between several source and sink ports */
- int /* audio_patch_handle_t */ createAudioPatch(in AudioPatch patch);
+ int /* audio_patch_handle_t */ createAudioPatch(in AudioPatchFw patch);
/* Release an audio patch */
void releaseAudioPatch(int /* audio_patch_handle_t */ handle);
/* List existing audio patches */
- AudioPatch[] listAudioPatches(int maxCount);
+ AudioPatchFw[] listAudioPatches(int maxCount);
/* Set audio port configuration */
- void setAudioPortConfig(in AudioPortConfig config);
+ void setAudioPortConfig(in AudioPortConfigFw config);
/* Get the HW synchronization source used for an audio session */
int /* audio_hw_sync_t */ getAudioHwSyncForSession(int /* audio_session_t */ sessionId);
@@ -229,7 +227,7 @@
int getAAudioHardwareBurstMinUsec();
- void setDeviceConnectedState(in AudioPort devicePort, boolean connected);
+ void setDeviceConnectedState(in AudioPortFw devicePort, boolean connected);
/**
* Requests a given latency mode (See LatencyMode.aidl) on an output stream.
@@ -249,11 +247,36 @@
LatencyMode[] getSupportedLatencyModes(int output);
/**
+ * Requests if the implementation supports controlling the latency modes
+ * over the Bluetooth A2DP or LE Audio links. If it does,
+ * setRequestedLatencyMode() and getSupportedLatencyModes() APIs can also be used
+ * for streams routed to Bluetooth and not just for the spatializer output.
+ */
+ boolean supportsBluetoothVariableLatency();
+
+ /**
+ * Enables or disables the variable Bluetooth latency control mechanism in the
+ * audio framework and the audio HAL. This does not apply to the latency mode control
+ * on the spatializer output as this is a built-in feature.
+ */
+ void setBluetoothVariableLatencyEnabled(boolean enabled);
+
+ /**
+ * Indicates if the variable Bluetooth latency control mechanism is enabled or disabled.
+ */
+ boolean isBluetoothVariableLatencyEnabled();
+
+ /**
* Registers the sound dose callback and returns the interface for executing
* sound dose methods on the audio server.
*/
ISoundDose getSoundDoseInterface(in ISoundDoseCallback callback);
+ /**
+ * Invalidate all tracks with given port ids.
+ */
+ void invalidateTracks(in int[] /* audio_port_handle_t[] */ portIds);
+
// When adding a new method, please review and update
// IAudioFlinger.h AudioFlingerServerAdapter::Delegate::TransactionCode
// AudioFlinger.cpp AudioFlinger::onTransactWrapper()
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index e0db8f9..ec5097a 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -24,12 +24,12 @@
import android.media.AudioMix;
import android.media.AudioMixerAttributesInternal;
import android.media.AudioOffloadMode;
-import android.media.AudioPatch;
+import android.media.AudioPatchFw;
import android.media.AudioPolicyDeviceState;
import android.media.AudioPolicyForcedConfig;
import android.media.AudioPolicyForceUse;
-import android.media.AudioPort;
-import android.media.AudioPortConfig;
+import android.media.AudioPortFw;
+import android.media.AudioPortConfigFw;
import android.media.AudioPortRole;
import android.media.AudioPortType;
import android.media.AudioProductStrategy;
@@ -213,16 +213,16 @@
int listAudioPorts(AudioPortRole role,
AudioPortType type,
inout Int count,
- out AudioPort[] ports);
+ out AudioPortFw[] ports);
/** Get attributes for the audio port with the given id (AudioPort.hal.id field). */
- AudioPort getAudioPort(int /* audio_port_handle_t */ portId);
+ AudioPortFw getAudioPort(int /* audio_port_handle_t */ portId);
/**
* Create an audio patch between several source and sink ports.
* The handle argument is used when updating an existing patch.
*/
- int /* audio_patch_handle_t */ createAudioPatch(in AudioPatch patch, int handle);
+ int /* audio_patch_handle_t */ createAudioPatch(in AudioPatchFw patch, int handle);
/** Release an audio patch. */
void releaseAudioPatch(int /* audio_patch_handle_t */ handle);
@@ -235,10 +235,10 @@
* Passing '0' on input and inspecting the value on output is a common way of determining the
* number of elements without actually retrieving them.
*/
- int listAudioPatches(inout Int count, out AudioPatch[] patches);
+ int listAudioPatches(inout Int count, out AudioPatchFw[] patches);
/** Set audio port configuration. */
- void setAudioPortConfig(in AudioPortConfig config);
+ void setAudioPortConfig(in AudioPortConfigFw config);
void registerClient(IAudioPolicyServiceClient client);
@@ -262,7 +262,7 @@
void removeUserIdDeviceAffinities(int userId);
- int /* audio_port_handle_t */ startAudioSource(in AudioPortConfig source,
+ int /* audio_port_handle_t */ startAudioSource(in AudioPortConfigFw source,
in AudioAttributesInternal attributes);
void stopAudioSource(int /* audio_port_handle_t */ portId);
diff --git a/media/libaudioclient/aidl/android/media/ISoundDose.aidl b/media/libaudioclient/aidl/android/media/ISoundDose.aidl
index f31f091..7310160 100644
--- a/media/libaudioclient/aidl/android/media/ISoundDose.aidl
+++ b/media/libaudioclient/aidl/android/media/ISoundDose.aidl
@@ -22,9 +22,9 @@
* Interface used to push the sound dose related information from the
* AudioService#SoundDoseHelper to the audio server
*/
-oneway interface ISoundDose {
+interface ISoundDose {
/** Set a new RS2 value used for momentary exposure warnings. */
- void setOutputRs2(float rs2Value);
+ oneway void setOutputRs2(float rs2Value);
/**
* Resets the native CSD values. This can happen after a crash in the
@@ -33,5 +33,15 @@
* dosage values and MELs together with their timestamps that lead to this
* CSD.
*/
- void resetCsd(float currentCsd, in SoundDoseRecord[] records);
+ oneway void resetCsd(float currentCsd, in SoundDoseRecord[] records);
+
+ /* -------------------------- Test API methods --------------------------
+ /** Get the currently used RS2 value. */
+ float getOutputRs2();
+ /** Get the current CSD from audioserver. */
+ float getCsd();
+ /** Enables/Disables MEL computations from framework. */
+ oneway void forceUseFrameworkMel(boolean useFrameworkMel);
+ /** Enables/Disables the computation of CSD on all devices. */
+ oneway void forceComputeCsdOnAllDevices(boolean computeCsdOnAllDevices);
}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 90e7ea6..ddda8bb 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -16,7 +16,7 @@
package android.media;
-import android.media.AudioPort;
+import android.media.AudioPortFw;
import android.media.audio.common.AudioConfig;
import android.media.audio.common.AudioConfigBase;
@@ -29,7 +29,7 @@
AudioConfig halConfig;
AudioConfigBase mixerConfig;
/** Type must be DEVICE. */
- AudioPort device;
+ AudioPortFw device;
/** Bitmask, indexed by AudioOutputFlag. */
int flags;
}
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index 5536bcb..47fe0f6 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -584,7 +584,12 @@
float balance = mFdp.ConsumeFloatingPoint<float>();
af->getMasterBalance(&balance);
- af->invalidateStream(static_cast<audio_stream_type_t>(mFdp.ConsumeIntegral<uint32_t>()));
+
+ std::vector<audio_port_handle_t> tracks;
+ for (int i = 0; i < mFdp.ConsumeIntegralInRange<int32_t>(0, MAX_ARRAY_LENGTH); ++i) {
+ tracks.push_back(static_cast<audio_port_handle_t>(mFdp.ConsumeIntegral<int32_t>()));
+ }
+ af->invalidateTracks(tracks);
}
status_t AudioFlingerFuzzer::invokeAudioInputDevice() {
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index afcb61a..ff3598f 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -29,8 +29,8 @@
#include <android/media/AudioIoConfigEvent.h>
#include <android/media/AudioIoDescriptor.h>
#include <android/media/AudioPlaybackRate.h>
-#include <android/media/AudioPort.h>
-#include <android/media/AudioPortConfig.h>
+#include <android/media/AudioPortFw.h>
+#include <android/media/AudioPortConfigFw.h>
#include <android/media/AudioPortDeviceExtSys.h>
#include <android/media/AudioTimestampInternal.h>
#include <android/media/AudioUniqueIdUse.h>
@@ -92,13 +92,13 @@
const audio_port_config_session_ext& legacy);
ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
- const media::AudioPortConfig& aidl);
-ConversionResult<media::AudioPortConfig> legacy2aidl_audio_port_config_AudioPortConfig(
+ const media::AudioPortConfigFw& aidl);
+ConversionResult<media::AudioPortConfigFw> legacy2aidl_audio_port_config_AudioPortConfig(
const audio_port_config& legacy);
ConversionResult<struct audio_patch> aidl2legacy_AudioPatch_audio_patch(
- const media::AudioPatch& aidl);
-ConversionResult<media::AudioPatch> legacy2aidl_audio_patch_AudioPatch(
+ const media::AudioPatchFw& aidl);
+ConversionResult<media::AudioPatchFw> legacy2aidl_audio_patch_AudioPatch(
const struct audio_patch& legacy);
ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
@@ -170,8 +170,8 @@
legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy);
ConversionResult<audio_port_v7>
-aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl);
-ConversionResult<media::AudioPort>
+aidl2legacy_AudioPort_audio_port_v7(const media::AudioPortFw& aidl);
+ConversionResult<media::AudioPortFw>
legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
ConversionResult<audio_unique_id_use_t>
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 862a0f9..2567542 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -94,6 +94,7 @@
using AttributesVector = std::vector<audio_attributes_t>;
using StreamTypeVector = std::vector<audio_stream_type_t>;
+using PortHandleVector = std::vector<audio_port_handle_t>;
using TrackSecondaryOutputsMap = std::map<audio_port_handle_t, std::vector<audio_io_handle_t>>;
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index e04b82e..5ed8219 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -629,6 +629,12 @@
static status_t getSupportedLatencyModes(audio_io_handle_t output,
std::vector<audio_latency_mode_t>* modes);
+ static status_t setBluetoothVariableLatencyEnabled(bool enabled);
+
+ static status_t isBluetoothVariableLatencyEnabled(bool *enabled);
+
+ static status_t supportsBluetoothVariableLatency(bool *support);
+
static status_t getSupportedMixerAttributes(audio_port_handle_t portId,
std::vector<audio_mixer_attributes_t> *mixerAttrs);
static status_t setPreferredMixerAttributes(const audio_attributes_t *attr,
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 6c89862..fdf3113 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -268,8 +268,6 @@
virtual status_t closeInput(audio_io_handle_t input) = 0;
- virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
-
virtual status_t setVoiceVolume(float volume) = 0;
virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
@@ -374,6 +372,14 @@
virtual status_t getSoundDoseInterface(const sp<media::ISoundDoseCallback>& callback,
sp<media::ISoundDose>* soundDose) = 0;
+
+ virtual status_t invalidateTracks(const std::vector<audio_port_handle_t>& portIds) = 0;
+
+ virtual status_t setBluetoothVariableLatencyEnabled(bool enabled) = 0;
+
+ virtual status_t isBluetoothVariableLatencyEnabled(bool* enabled) = 0;
+
+ virtual status_t supportsBluetoothVariableLatency(bool* support) = 0;
};
/**
@@ -428,7 +434,6 @@
status_t openInput(const media::OpenInputRequest& request,
media::OpenInputResponse* response) override;
status_t closeInput(audio_io_handle_t input) override;
- status_t invalidateStream(audio_stream_type_t stream) override;
status_t setVoiceVolume(float volume) override;
status_t getRenderPosition(uint32_t* halFrames, uint32_t* dspFrames,
audio_io_handle_t output) const override;
@@ -480,8 +485,12 @@
audio_latency_mode_t mode) override;
status_t getSupportedLatencyModes(
audio_io_handle_t output, std::vector<audio_latency_mode_t>* modes) override;
+ status_t setBluetoothVariableLatencyEnabled(bool enabled) override;
+ status_t isBluetoothVariableLatencyEnabled(bool* enabled) override;
+ status_t supportsBluetoothVariableLatency(bool* support) override;
status_t getSoundDoseInterface(const sp<media::ISoundDoseCallback>& callback,
sp<media::ISoundDose>* soundDose) override;
+ status_t invalidateTracks(const std::vector<audio_port_handle_t>& portIds) override;
private:
const sp<media::IAudioFlingerService> mDelegate;
@@ -535,7 +544,6 @@
RESTORE_OUTPUT = media::BnAudioFlingerService::TRANSACTION_restoreOutput,
OPEN_INPUT = media::BnAudioFlingerService::TRANSACTION_openInput,
CLOSE_INPUT = media::BnAudioFlingerService::TRANSACTION_closeInput,
- INVALIDATE_STREAM = media::BnAudioFlingerService::TRANSACTION_invalidateStream,
SET_VOICE_VOLUME = media::BnAudioFlingerService::TRANSACTION_setVoiceVolume,
GET_RENDER_POSITION = media::BnAudioFlingerService::TRANSACTION_getRenderPosition,
GET_INPUT_FRAMES_LOST = media::BnAudioFlingerService::TRANSACTION_getInputFramesLost,
@@ -573,7 +581,14 @@
SET_DEVICE_CONNECTED_STATE = media::BnAudioFlingerService::TRANSACTION_setDeviceConnectedState,
SET_REQUESTED_LATENCY_MODE = media::BnAudioFlingerService::TRANSACTION_setRequestedLatencyMode,
GET_SUPPORTED_LATENCY_MODES = media::BnAudioFlingerService::TRANSACTION_getSupportedLatencyModes,
+ SET_BLUETOOTH_VARIABLE_LATENCY_ENABLED =
+ media::BnAudioFlingerService::TRANSACTION_setBluetoothVariableLatencyEnabled,
+ IS_BLUETOOTH_VARIABLE_LATENCY_ENABLED =
+ media::BnAudioFlingerService::TRANSACTION_isBluetoothVariableLatencyEnabled,
+ SUPPORTS_BLUETOOTH_VARIABLE_LATENCY =
+ media::BnAudioFlingerService::TRANSACTION_supportsBluetoothVariableLatency,
GET_SOUND_DOSE_INTERFACE = media::BnAudioFlingerService::TRANSACTION_getSoundDoseInterface,
+ INVALIDATE_TRACKS = media::BnAudioFlingerService::TRANSACTION_invalidateTracks,
};
protected:
@@ -653,7 +668,6 @@
Status openInput(const media::OpenInputRequest& request,
media::OpenInputResponse* _aidl_return) override;
Status closeInput(int32_t input) override;
- Status invalidateStream(media::audio::common::AudioStreamType stream) override;
Status setVoiceVolume(float volume) override;
Status getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) override;
Status getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) override;
@@ -674,12 +688,12 @@
Status getPrimaryOutputSamplingRate(int32_t* _aidl_return) override;
Status getPrimaryOutputFrameCount(int64_t* _aidl_return) override;
Status setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override;
- Status getAudioPort(const media::AudioPort& port, media::AudioPort* _aidl_return) override;
- Status createAudioPatch(const media::AudioPatch& patch, int32_t* _aidl_return) override;
+ Status getAudioPort(const media::AudioPortFw& port, media::AudioPortFw* _aidl_return) override;
+ Status createAudioPatch(const media::AudioPatchFw& patch, int32_t* _aidl_return) override;
Status releaseAudioPatch(int32_t handle) override;
Status listAudioPatches(int32_t maxCount,
- std::vector<media::AudioPatch>* _aidl_return) override;
- Status setAudioPortConfig(const media::AudioPortConfig& config) override;
+ std::vector<media::AudioPatchFw>* _aidl_return) override;
+ Status setAudioPortConfig(const media::AudioPortConfigFw& config) override;
Status getAudioHwSyncForSession(int32_t sessionId, int32_t* _aidl_return) override;
Status systemReady() override;
Status audioPolicyReady() override;
@@ -694,12 +708,16 @@
std::vector<media::audio::common::AudioMMapPolicyInfo> *_aidl_return) override;
Status getAAudioMixerBurstCount(int32_t* _aidl_return) override;
Status getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) override;
- Status setDeviceConnectedState(const media::AudioPort& port, bool connected) override;
+ Status setDeviceConnectedState(const media::AudioPortFw& port, bool connected) override;
Status setRequestedLatencyMode(int output, media::LatencyMode mode) override;
Status getSupportedLatencyModes(int output,
std::vector<media::LatencyMode>* _aidl_return) override;
+ Status setBluetoothVariableLatencyEnabled(bool enabled) override;
+ Status isBluetoothVariableLatencyEnabled(bool* enabled) override;
+ Status supportsBluetoothVariableLatency(bool* support) override;
Status getSoundDoseInterface(const sp<media::ISoundDoseCallback>& callback,
sp<media::ISoundDose>* _aidl_return) override;
+ Status invalidateTracks(const std::vector<int32_t>& portIds) override;
private:
const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
};
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index 4513323..6e05abc 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -222,7 +222,7 @@
mExtraAudioDescriptors == other->getExtraAudioDescriptors();
}
-status_t AudioPort::writeToParcelable(media::AudioPort* parcelable) const {
+status_t AudioPort::writeToParcelable(media::AudioPortFw* parcelable) const {
parcelable->hal.name = mName;
parcelable->sys.type = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_type_t_AudioPortType(mType));
@@ -249,7 +249,7 @@
return OK;
}
-status_t AudioPort::readFromParcelable(const media::AudioPort& parcelable) {
+status_t AudioPort::readFromParcelable(const media::AudioPortFw& parcelable) {
mName = parcelable.hal.name;
mType = VALUE_OR_RETURN_STATUS(
aidl2legacy_AudioPortType_audio_port_type_t(parcelable.sys.type));
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 91efb96..c499513 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -181,7 +181,7 @@
return false;
}
-status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
+status_t DeviceDescriptorBase::writeToParcelable(media::AudioPortFw* parcelable) const {
AudioPort::writeToParcelable(parcelable);
AudioPortConfig::writeToParcelable(&parcelable->sys.activeConfig.hal, useInputChannelMask());
parcelable->hal.id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
@@ -203,7 +203,7 @@
return OK;
}
-status_t DeviceDescriptorBase::readFromParcelable(const media::AudioPort& parcelable) {
+status_t DeviceDescriptorBase::readFromParcelable(const media::AudioPortFw& parcelable) {
if (parcelable.sys.type != media::AudioPortType::DEVICE) {
return BAD_VALUE;
}
@@ -252,7 +252,7 @@
}
ConversionResult<sp<DeviceDescriptorBase>>
-aidl2legacy_DeviceDescriptorBase(const media::AudioPort& aidl) {
+aidl2legacy_DeviceDescriptorBase(const media::AudioPortFw& aidl) {
sp<DeviceDescriptorBase> result = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
status_t status = result->readFromParcelable(aidl);
if (status != OK) {
@@ -261,9 +261,9 @@
return result;
}
-ConversionResult<media::AudioPort>
+ConversionResult<media::AudioPortFw>
legacy2aidl_DeviceDescriptorBase(const sp<DeviceDescriptorBase>& legacy) {
- media::AudioPort aidl;
+ media::AudioPortFw aidl;
status_t status = legacy->writeToParcelable(&aidl);
if (status != OK) {
return base::unexpected(status);
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index bce131c..2a14504 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -67,6 +67,9 @@
for (const auto &channel : channelMasks) {
if (audio_channel_mask_out_to_in(channel) != AUDIO_CHANNEL_INVALID) {
inMaskSet.insert(audio_channel_mask_out_to_in(channel));
+ } else if (audio_channel_mask_get_representation(channel)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ inMaskSet.insert(channel);
}
}
return inMaskSet;
@@ -77,6 +80,9 @@
for (const auto &channel : channelMasks) {
if (audio_channel_mask_in_to_out(channel) != AUDIO_CHANNEL_INVALID) {
outMaskSet.insert(audio_channel_mask_in_to_out(channel));
+ } else if (audio_channel_mask_get_representation(channel)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ outMaskSet.insert(channel);
}
}
return outMaskSet;
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
index b1235f5..77e58ed 100644
--- a/media/libaudiofoundation/include/media/AudioPort.h
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -19,8 +19,8 @@
#include <string>
#include <type_traits>
-#include <android/media/AudioPort.h>
-#include <android/media/AudioPortConfig.h>
+#include <android/media/AudioPortFw.h>
+#include <android/media/AudioPortConfigFw.h>
#include <android/media/audio/common/ExtraAudioDescriptor.h>
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
@@ -118,8 +118,8 @@
bool equals(const sp<AudioPort>& other) const;
- status_t writeToParcelable(media::AudioPort* parcelable) const;
- status_t readFromParcelable(const media::AudioPort& parcelable);
+ status_t writeToParcelable(media::AudioPortFw* parcelable) const;
+ status_t readFromParcelable(const media::AudioPortFw& parcelable);
AudioGains mGains; // gain controllers
// Maximum number of input or output streams that can be simultaneously
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index dc2899a..501831d 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -18,7 +18,7 @@
#include <vector>
-#include <android/media/AudioPort.h>
+#include <android/media/AudioPortFw.h>
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
#include <media/AudioContainers.h>
@@ -79,8 +79,8 @@
bool equals(const sp<DeviceDescriptorBase>& other) const;
- status_t writeToParcelable(media::AudioPort* parcelable) const;
- status_t readFromParcelable(const media::AudioPort& parcelable);
+ status_t writeToParcelable(media::AudioPortFw* parcelable) const;
+ status_t readFromParcelable(const media::AudioPortFw& parcelable);
protected:
AudioDeviceTypeAddr mDeviceTypeAddr;
@@ -116,8 +116,8 @@
// Conversion routines, according to AidlConversion.h conventions.
ConversionResult<sp<DeviceDescriptorBase>>
-aidl2legacy_DeviceDescriptorBase(const media::AudioPort& aidl);
-ConversionResult<media::AudioPort>
+aidl2legacy_DeviceDescriptorBase(const media::AudioPortFw& aidl);
+ConversionResult<media::AudioPortFw>
legacy2aidl_DeviceDescriptorBase(const sp<DeviceDescriptorBase>& legacy);
} // namespace android
diff --git a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
index 50d8dc8..e315858 100644
--- a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
+++ b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
@@ -117,7 +117,7 @@
audioPort->setGains(getAudioGainsForTest());
audioPort->setAudioProfiles(getAudioProfileVectorForTest());
- media::AudioPort parcelable;
+ media::AudioPortFw parcelable;
ASSERT_EQ(NO_ERROR, audioPort->writeToParcelable(&parcelable));
sp<AudioPort> audioPortFromParcel = new AudioPort(
"", AUDIO_PORT_TYPE_NONE, AUDIO_PORT_ROLE_NONE);
@@ -152,7 +152,7 @@
ASSERT_EQ(desc->setEncapsulationMetadataTypes(
AUDIO_ENCAPSULATION_METADATA_TYPE_ALL_POSITION_BITS), NO_ERROR);
- media::AudioPort parcelable;
+ media::AudioPortFw parcelable;
ASSERT_EQ(NO_ERROR, desc->writeToParcelable(&parcelable));
sp<DeviceDescriptorBase> descFromParcel = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
ASSERT_EQ(NO_ERROR, descFromParcel->readFromParcelable(parcelable));
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index f6519b6..3e33609 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -132,6 +132,11 @@
return INVALID_OPERATION;
}
+ int32_t supportsBluetoothVariableLatency(bool* supports __unused) override {
+ // TODO: Implement the HAL query when moving to AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
status_t setConnectedState(const struct audio_port_v7 *port, bool connected) override;
error::Result<audio_hw_sync_t> getHwAvSync() override;
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index d27ad4c..3858607 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -128,6 +128,7 @@
std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
virtual int32_t getAAudioMixerBurstCount() = 0;
virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
+ virtual int32_t supportsBluetoothVariableLatency(bool* supports) = 0;
// Update the connection status of an external device.
virtual status_t setConnectedState(const struct audio_port_v7 *port, bool connected) = 0;
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index c2b82d1..6a39108 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -116,6 +116,9 @@
track->mKeepContractedChannels = false;
}
+ track->mInputFrameSize = audio_bytes_per_frame(
+ track->channelCount + track->mHapticChannelCount, track->mFormat);
+
// channel masks have changed, does this track need a downmixer?
// update to try using our desired format (if we aren't already using it)
const status_t status = track->prepareForDownmix();
@@ -309,9 +312,8 @@
ALOGV("AudioMixer::%s(%p) teeBuffer=%p", __func__, this, teeBuffer);
unprepareForTee();
if (teeBuffer != nullptr) {
- const size_t frameSize = audio_bytes_per_frame(channelCount + mHapticChannelCount, mFormat);
mTeeBufferProvider.reset(new TeeBufferProvider(
- frameSize, frameSize, kCopyBufferFrameCount,
+ mInputFrameSize, mInputFrameSize, kCopyBufferFrameCount,
(uint8_t*)teeBuffer, mTeeBufferFrameCount));
reconfigureBufferProviders();
}
@@ -590,6 +592,8 @@
t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
t->mAdjustOutChannelCount = t->channelCount;
t->mKeepContractedChannels = false;
+ t->mInputFrameSize = audio_bytes_per_frame(
+ t->channelCount + t->mHapticChannelCount, t->mFormat);
// Check the downmixing (or upmixing) requirements.
status_t status = t->prepareForDownmix();
if (status != OK) {
@@ -641,6 +645,10 @@
}
break;
}
+ if (t->teeBuffer != nullptr && t->volumeRL == 0) {
+ // Need to mute tee
+ memset(t->teeBuffer, 0, t->mTeeBufferFrameCount * t->mInputFrameSize);
+ }
}
}
}
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
index fd06991..427bd55 100644
--- a/media/libaudioprocessing/AudioMixerBase.cpp
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -152,6 +152,7 @@
AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
t->mTeeBufferFrameCount = 0;
+ t->mInputFrameSize = audio_bytes_per_frame(t->channelCount, t->mFormat);
status_t status = postCreateTrack(t.get());
if (status != OK) return status;
mTracks[name] = t;
@@ -178,6 +179,7 @@
track->channelCount = trackChannelCount;
track->mMixerChannelMask = mixerChannelMask;
track->mMixerChannelCount = mixerChannelCount;
+ track->mInputFrameSize = audio_bytes_per_frame(track->channelCount, track->mFormat);
// Resampler channels may have changed.
track->recreateResampler(mSampleRate);
diff --git a/media/libaudioprocessing/include/media/AudioMixerBase.h b/media/libaudioprocessing/include/media/AudioMixerBase.h
index caccb6a..4bd85d8 100644
--- a/media/libaudioprocessing/include/media/AudioMixerBase.h
+++ b/media/libaudioprocessing/include/media/AudioMixerBase.h
@@ -297,6 +297,8 @@
int32_t mTeeBufferFrameCount;
+ uint32_t mInputFrameSize; // The track input frame size, used for tee buffer
+
protected:
// hooks
diff --git a/media/libaudiousecasevalidation/Android.bp b/media/libaudiousecasevalidation/Android.bp
new file mode 100644
index 0000000..3ee7e32
--- /dev/null
+++ b/media/libaudiousecasevalidation/Android.bp
@@ -0,0 +1,49 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_library {
+ name: "libaudiousecasevalidation",
+ host_supported: true,
+ srcs: [
+ "UsecaseLookup.cpp",
+ "UsecaseValidator.cpp",
+ ],
+ header_libs: [
+ "liberror_headers",
+ ],
+ shared_libs: [
+ "framework-permission-aidl-cpp",
+ "libaudioutils",
+ "libbase",
+ "liblog",
+ ],
+ export_include_dirs: [
+ "include",
+ ],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+}
+
+cc_test_host {
+ name: "libaudiousecasevalidation-test",
+ srcs: [
+ "tests/UsecaseValidator-test.cpp",
+ ],
+ header_libs: [
+ "liberror_headers",
+ ],
+ shared_libs: [
+ "framework-permission-aidl-cpp",
+ "libaudiousecasevalidation",
+ "libutils",
+ ],
+}
diff --git a/media/libaudiousecasevalidation/UsecaseLookup.cpp b/media/libaudiousecasevalidation/UsecaseLookup.cpp
new file mode 100644
index 0000000..01e667f
--- /dev/null
+++ b/media/libaudiousecasevalidation/UsecaseLookup.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "UsecaseLookup"
+// #define LOG_NDEBUG 0
+
+#include "media/UsecaseLookup.h"
+
+#include <utils/Log.h>
+
+namespace android {
+namespace media {
+
+/**
+ * Add streamId and outputFlags to stream list.
+ */
+void UsecaseLookup::addStream(STREAMID streamId, bool outputFlagGame) {
+ ALOGV("%s streamId: %d outputFlagGame: %d", __func__, streamId, outputFlagGame);
+
+ mutex_lock lock(m_mutex);
+ m_streams[streamId] = outputFlagGame;
+}
+
+/**
+ * Remove streamId from stream list.
+ */
+void UsecaseLookup::removeStream(STREAMID streamId) {
+ ALOGV("%s streamId: %d ", __func__, streamId);
+
+ mutex_lock lock(m_mutex);
+ m_streams.erase(streamId);
+
+ // Shouldn't happen but it might.
+ for (auto it = m_tracks.begin(); it != m_tracks.end();) {
+ if (it->second == streamId) {
+ it = m_tracks.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+/**
+ * Add streamId and portId to track list.
+ */
+void UsecaseLookup::addTrack(STREAMID streamId, PORTID portId) {
+ ALOGV("%s streamId: %d portId: %d", __func__, streamId, portId);
+
+ mutex_lock lock(m_mutex);
+
+ if (m_tracks.find(portId) == m_tracks.end()) {
+ m_tracks[portId] = streamId;
+ }
+}
+
+/**
+ * Remove streamId and portId from track list.
+ */
+void UsecaseLookup::removeTrack(STREAMID streamId, PORTID portId) {
+ ALOGV("%s streamId: %d portId: %d", __func__, streamId, portId);
+
+ mutex_lock lock(m_mutex);
+ auto it = m_tracks.find(portId);
+
+ if (it != m_tracks.end() && it->second == streamId) {
+ m_tracks.erase(portId);
+ }
+}
+
+/**
+ * Check if stream list contains streamId with Game outputFlag.
+ */
+bool UsecaseLookup::isGameStream(STREAMID streamId) {
+ ALOGV("%s streamId: %d ", __func__, streamId);
+ mutex_lock lock(m_mutex);
+ auto it = m_streams.find(streamId);
+
+ return (it != m_streams.end()) ? it->second : false;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libaudiousecasevalidation/UsecaseValidator.cpp b/media/libaudiousecasevalidation/UsecaseValidator.cpp
new file mode 100644
index 0000000..0e5a824
--- /dev/null
+++ b/media/libaudiousecasevalidation/UsecaseValidator.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "UsecaseValidator"
+// #define LOG_NDEBUG 0
+
+#include <inttypes.h>
+
+#include <utils/Log.h>
+
+#include "media/UsecaseValidator.h"
+#include "media/UsecaseLookup.h"
+
+namespace android {
+namespace media {
+namespace {
+
+class UsecaseValidatorImpl : public UsecaseValidator {
+ public:
+ UsecaseValidatorImpl() {}
+
+ /**
+ * Register a new mixer/stream.
+ * Called when the stream is opened at the HAL and communicates
+ * immutable stream attributes like flags, sampling rate, format.
+ */
+ status_t registerStream(audio_io_handle_t streamId,
+ const audio_config_base_t& audioConfig __attribute__((unused)),
+ const audio_output_flags_t outputFlags) override {
+ ALOGV("%s output: %d flags: %#x", __func__, streamId, outputFlags);
+
+ // Check if FAST or MMAP output flag has been set.
+ bool outputFlagGame = outputFlags & (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
+ m_lookup.addStream(streamId, outputFlagGame);
+ return OK;
+ };
+
+ /**
+ * Unregister a stream/mixer.
+ * Called when the stream is closed.
+ */
+ status_t unregisterStream(audio_io_handle_t streamId) override {
+ ALOGV("%s output: %d", __func__, streamId);
+
+ m_lookup.removeStream(streamId);
+ return OK;
+ };
+
+ /**
+ * Indicates that some playback activity started on the stream.
+ * Called each time an audio track starts or resumes.
+ */
+ error::Result<audio_attributes_t> startClient(audio_io_handle_t streamId,
+ audio_port_handle_t portId, const content::AttributionSourceState& attributionSource,
+ const audio_attributes_t& attributes,
+ const AttributesChangedCallback *callback __attribute__((unused))) override {
+ ALOGV("%s output: %d portId: %d usage: %d pid: %d package: %s",
+ __func__, streamId, portId, attributes.usage, attributionSource.pid,
+ attributionSource.packageName.value_or("").c_str());
+
+ m_lookup.addTrack(streamId, portId);
+
+ return verifyAudioAttributes(streamId, attributionSource, attributes);
+ };
+
+ /**
+ * Indicates that some playback activity stopped on the stream.
+ * Called each time an audio track stops or pauses.
+ */
+ status_t stopClient(audio_io_handle_t streamId, audio_port_handle_t portId) override {
+ ALOGV("%s output: %d portId: %d", __func__, streamId, portId);
+
+ m_lookup.removeTrack(streamId, portId);
+ return OK;
+ };
+
+ /**
+ * Called to verify and update audio attributes for a track that is connected
+ * to the specified stream.
+ */
+ error::Result<audio_attributes_t> verifyAudioAttributes(audio_io_handle_t streamId,
+ const content::AttributionSourceState& attributionSource,
+ const audio_attributes_t& attributes) override {
+ ALOGV("%s output: %d usage: %d pid: %d package: %s",
+ __func__, streamId, attributes.usage, attributionSource.pid,
+ attributionSource.packageName.value_or("").c_str());
+
+ audio_attributes_t attrRet = attributes;
+
+ // Check if attribute usage media or unknown has been set.
+ bool isUsageValid = this->isUsageValid(attributes);
+
+ if (isUsageValid && m_lookup.isGameStream(streamId)) {
+ ALOGI("%s update usage: %d to AUDIO_USAGE_GAME for output: %d pid: %d package: %s",
+ __func__, attributes.usage, streamId, attributionSource.pid,
+ attributionSource.packageName.value_or("").c_str());
+ // Set attribute usage Game.
+ attrRet.usage = AUDIO_USAGE_GAME;
+ }
+
+ return {attrRet};
+ };
+
+ protected:
+ /**
+ * Check if attribute usage valid.
+ */
+ bool isUsageValid(const audio_attributes_t& attr) {
+ ALOGV("isUsageValid attr.usage: %d", attr.usage);
+ switch (attr.usage) {
+ case AUDIO_USAGE_MEDIA:
+ case AUDIO_USAGE_UNKNOWN:
+ return true;
+ default:
+ break;
+ }
+ return false;
+ }
+
+ protected:
+ UsecaseLookup m_lookup;
+};
+
+} // namespace
+
+std::unique_ptr<UsecaseValidator> createUsecaseValidator() {
+ return std::make_unique<UsecaseValidatorImpl>();
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libaudiousecasevalidation/include/media/UsecaseLookup.h b/media/libaudiousecasevalidation/include/media/UsecaseLookup.h
new file mode 100644
index 0000000..a35d88d
--- /dev/null
+++ b/media/libaudiousecasevalidation/include/media/UsecaseLookup.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MEDIA_LIBAUDIOUSECASEVALIDATION_INCLUDE_MEDIA_USECASELOOKUP_H_
+#define MEDIA_LIBAUDIOUSECASEVALIDATION_INCLUDE_MEDIA_USECASELOOKUP_H_
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <set>
+
+namespace android {
+namespace media {
+
+typedef int STREAMID;
+typedef int PORTID;
+
+// List of streamId and outputFlag state.
+typedef std::map<STREAMID, bool> STREAMLIST;
+// List of portId and streamId.
+typedef std::map<PORTID, STREAMID> TRACKLIST;
+typedef std::lock_guard<std::mutex> mutex_lock;
+
+class UsecaseLookup {
+ public:
+ UsecaseLookup() { }
+ virtual ~UsecaseLookup() { }
+
+ // Required for testing.
+ void clear() {
+ m_streams.clear();
+ m_tracks.clear();
+ }
+
+ /**
+ * Add streamId and outputFlag to stream list.
+ */
+ void addStream(STREAMID streamId, bool outputFlagGame = false);
+
+ /**
+ * Remove streamId from stream list.
+ */
+ void removeStream(STREAMID streamId);
+
+ /**
+ * Add streamId and portId to track list.
+ */
+ void addTrack(STREAMID streamId, PORTID portId);
+
+ /**
+ * Remove streamId and portId from track list.
+ */
+ void removeTrack(STREAMID streamId, PORTID portId);
+
+ /**
+ * Check if stream list contains streamId with Game output flag.
+ */
+ bool isGameStream(STREAMID streamId);
+
+ protected:
+ STREAMLIST m_streams;
+ TRACKLIST m_tracks;
+ std::mutex m_mutex;
+};
+
+} // namespace media
+} // namespace android
+
+#endif // MEDIA_LIBAUDIOUSECASEVALIDATION_INCLUDE_MEDIA_USECASELOOKUP_H_
diff --git a/media/libaudiousecasevalidation/include/media/UsecaseValidator.h b/media/libaudiousecasevalidation/include/media/UsecaseValidator.h
new file mode 100644
index 0000000..2e1d7f4
--- /dev/null
+++ b/media/libaudiousecasevalidation/include/media/UsecaseValidator.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MEDIA_LIBAUDIOUSECASEVALIDATION_INCLUDE_MEDIA_USECASEVALIDATOR_H_
+#define MEDIA_LIBAUDIOUSECASEVALIDATION_INCLUDE_MEDIA_USECASEVALIDATOR_H_
+
+#pragma once
+
+#include <error/Result.h>
+#include <system/audio.h>
+#include <android/content/AttributionSourceState.h>
+
+#include <limits>
+#include <memory>
+
+namespace android {
+namespace media {
+
+/**
+ * Main entry-point for this library.
+ */
+class UsecaseValidator {
+ public:
+ virtual ~UsecaseValidator() = default;
+
+ /**
+ * A callback called by the module when the audio attributes for
+ * an active portId changes.
+ */
+ class AttributesChangedCallback {
+ public:
+ virtual ~AttributesChangedCallback() = default;
+ virtual void onAttributesChanged(audio_port_handle_t portId,
+ const audio_attributes_t& attributes) = 0;
+ };
+
+ /**
+ * Register a new mixer/stream.
+ * Called when the stream is opened at the HAL and communicates
+ * immutable stream attributes like flags, sampling rate, format.
+ */
+ virtual status_t registerStream(audio_io_handle_t streamId,
+ const audio_config_base_t& audioConfig,
+ const audio_output_flags_t outputFlags) = 0;
+
+ /**
+ * Unregister a stream/mixer.
+ * Called when the stream is closed.
+ */
+ virtual status_t unregisterStream(audio_io_handle_t streamId) = 0;
+
+ /**
+ * Indicates that some playback activity started on the stream.
+ * Called each time an audio track starts or resumes.
+ */
+ virtual error::Result<audio_attributes_t> startClient(audio_io_handle_t streamId,
+ audio_port_handle_t portId,
+ const content::AttributionSourceState& attributionSource,
+ const audio_attributes_t& attributes,
+ const AttributesChangedCallback *callback) = 0;
+
+ /**
+ * Indicates that some playback activity stopped on the stream.
+ * Called each time an audio track stops or pauses.
+ */
+ virtual status_t stopClient(audio_io_handle_t streamId, audio_port_handle_t portId) = 0;
+
+ /**
+ * Called to verify and update audio attributes for a track that is connected
+ * to the specified stream.
+ */
+ virtual error::Result<audio_attributes_t> verifyAudioAttributes(audio_io_handle_t streamId,
+ const content::AttributionSourceState& attributionSource,
+ const audio_attributes_t& attributes) = 0;
+};
+
+/**
+ * Creates an instance featuring a default implementation of the UsecaseValidator interface.
+ */
+std::unique_ptr<UsecaseValidator> createUsecaseValidator();
+
+} // namespace media
+} // namespace android
+
+#endif // MEDIA_LIBAUDIOUSECASEVALIDATION_INCLUDE_MEDIA_USECASEVALIDATOR_H_
diff --git a/media/libaudiousecasevalidation/tests/UsecaseValidator-test.cpp b/media/libaudiousecasevalidation/tests/UsecaseValidator-test.cpp
new file mode 100644
index 0000000..d92c8ba
--- /dev/null
+++ b/media/libaudiousecasevalidation/tests/UsecaseValidator-test.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "tests/UsecaseValidator-test.h"
+
+#include <gtest/gtest.h>
+
+namespace android {
+namespace media {
+
+/**
+ * Helper test functions.
+ */
+
+/**
+ * Register a mock stream.
+ */
+audio_io_handle_t UsecaseValidatorTest::testRegisterStream(bool outputFlagGame) {
+ static int streamId = 0;
+ status_t result;
+ static audio_config_base_t audioConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ audio_output_flags_t outputFlags = outputFlagGame ? GAME_OUTPUT_FLAGS : MEDIA_OUTPUT_FLAGS;
+
+ result = m_validator->registerStream(++streamId, audioConfig, outputFlags);
+
+ return result == OK ? streamId : 0;
+}
+
+/**
+ * Create a mock portId.
+ */
+audio_port_handle_t UsecaseValidatorTest::testCreatePortId(audio_io_handle_t streamId) {
+ static int portId = 0;
+
+ return (streamId << 8) | (++portId);
+}
+
+/**
+ * Add a mock portId to a stream and verify.
+ */
+error::Result<audio_attributes_t> UsecaseValidatorTest::testStartClient(audio_io_handle_t streamId,
+ audio_port_handle_t portId,
+ audio_usage_t usage) {
+ content::AttributionSourceState attributionSource;
+ audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ attributes.usage = usage;
+
+ return m_validator->startClient(streamId, portId, attributionSource, attributes, NULL);
+}
+
+/**
+ * Verify a mock stream.
+ */
+error::Result<audio_attributes_t> UsecaseValidatorTest::testVerifyAudioAttributes(
+ audio_io_handle_t streamId,
+ audio_usage_t usage) {
+ content::AttributionSourceState attributionSource;
+ audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ attributes.usage = usage;
+
+ return m_validator->verifyAudioAttributes(streamId, attributionSource, attributes);
+}
+
+/**
+ * Test functions.
+ */
+
+/**
+ * Test adding and removing streams.
+ */
+TEST_F(UsecaseLookupTest, testAddAndRemoveStream) {
+ addStream(1, false);
+ addStream(2, true);
+
+ EXPECT_NE(m_streams.find(1), m_streams.end());
+ EXPECT_NE(m_streams.find(2), m_streams.end());
+ EXPECT_EQ(m_streams.find(3), m_streams.end());
+
+ EXPECT_FALSE(isGameStream(1));
+ EXPECT_TRUE(isGameStream(2));
+ EXPECT_FALSE(isGameStream(3));
+
+ removeStream(2);
+
+ EXPECT_FALSE(isGameStream(2));
+}
+
+/**
+ * Verify attributes usage for stream.
+ */
+TEST_F(UsecaseValidatorTest, testAttributesUsage) {
+ audio_io_handle_t gameStreamId, mediaStreamId;
+
+ // Register game and media stream.
+ gameStreamId = testRegisterStream(true);
+ mediaStreamId = testRegisterStream(false);
+ EXPECT_NE(gameStreamId, 0);
+ EXPECT_NE(mediaStreamId, 0);
+ EXPECT_NE(gameStreamId, mediaStreamId);
+
+ // Verify attributes on game stream.
+ auto attr = testVerifyAudioAttributes(gameStreamId, AUDIO_USAGE_GAME);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_GAME);
+
+ // Verify attributes on media stream.
+ attr = testVerifyAudioAttributes(mediaStreamId, AUDIO_USAGE_MEDIA);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_MEDIA);
+
+ EXPECT_EQ(m_validator->unregisterStream(gameStreamId), 0);
+ EXPECT_EQ(m_validator->unregisterStream(mediaStreamId), 0);
+}
+
+/**
+ * Test hanging client.
+ */
+TEST_F(UsecaseValidatorTest, testHangingClient) {
+ audio_io_handle_t gameStreamId, mediaStreamId;
+ audio_port_handle_t gamePortId, mediaPortId;
+
+ // Register game and media stream.
+ gameStreamId = testRegisterStream(true);
+ EXPECT_NE(gameStreamId, 0);
+ mediaStreamId = testRegisterStream(false);
+ EXPECT_NE(mediaStreamId, 0);
+
+ // Assign portId.
+ gamePortId = testCreatePortId(gameStreamId);
+ EXPECT_NE(gamePortId, 0);
+ mediaPortId = testCreatePortId(mediaStreamId);
+ EXPECT_NE(mediaPortId, 0);
+
+ // Start client on game stream.
+ testStartClient(gameStreamId, gamePortId, AUDIO_USAGE_GAME);
+
+ // Start client on media stream.
+ testStartClient(mediaStreamId, mediaPortId, AUDIO_USAGE_MEDIA);
+
+ // Unregister media stream before stopClient.
+ EXPECT_EQ(m_validator->unregisterStream(gameStreamId), 0);
+ EXPECT_EQ(m_validator->unregisterStream(mediaStreamId), 0);
+}
+
+/**
+ * Verify attributes usage does not change.
+ */
+TEST_F(UsecaseValidatorTest, testAttributesUsageUnchanged) {
+ audio_io_handle_t gameStreamId, mediaStreamId;
+ audio_port_handle_t gamePortId, mediaPortId, unknownPortId, voiceCommPortId;
+
+ // Register game and media stream.
+ gameStreamId = testRegisterStream(true);
+ EXPECT_NE(gameStreamId, 0);
+ mediaStreamId = testRegisterStream(false);
+ EXPECT_NE(mediaStreamId, 0);
+
+ // Assign portId.
+ gamePortId = testCreatePortId(gameStreamId);
+ EXPECT_NE(gamePortId, 0);
+ mediaPortId = testCreatePortId(mediaStreamId);
+ EXPECT_NE(mediaPortId, 0);
+ unknownPortId = testCreatePortId(mediaStreamId);
+ EXPECT_NE(unknownPortId, 0);
+ voiceCommPortId = testCreatePortId(gameStreamId);
+ EXPECT_NE(voiceCommPortId, 0);
+
+ // Verify attributes on game stream.
+ auto attr = testStartClient(gameStreamId, gamePortId, AUDIO_USAGE_GAME);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_GAME);
+
+ attr = testStartClient(gameStreamId, voiceCommPortId, AUDIO_USAGE_VOICE_COMMUNICATION);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_VOICE_COMMUNICATION);
+
+ // Verify attributes on media stream.
+ attr = testStartClient(mediaStreamId, mediaPortId, AUDIO_USAGE_MEDIA);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_MEDIA);
+
+ attr = testStartClient(mediaStreamId, unknownPortId, AUDIO_USAGE_UNKNOWN);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_UNKNOWN);
+
+ // Stop client on game and media stream.
+ EXPECT_EQ(m_validator->stopClient(gameStreamId, gamePortId), 0);
+ EXPECT_EQ(m_validator->stopClient(mediaStreamId, mediaPortId), 0);
+
+ // Unregister game and media stream.
+ EXPECT_EQ(m_validator->unregisterStream(gameStreamId), 0);
+ EXPECT_EQ(m_validator->unregisterStream(mediaStreamId), 0);
+}
+
+/**
+ * Verify attributes usage changes.
+ */
+TEST_F(UsecaseValidatorTest, testAttributesUsageChanged) {
+ audio_io_handle_t gameStreamId;
+ audio_port_handle_t mediaPortId, unknownPortId;
+
+ // Register game and media stream.
+ gameStreamId = testRegisterStream(true);
+ EXPECT_NE(gameStreamId, 0);
+
+ // Assign portId.
+ mediaPortId = testCreatePortId(gameStreamId);
+ EXPECT_NE(mediaPortId, 0);
+ unknownPortId = testCreatePortId(gameStreamId);
+ EXPECT_NE(unknownPortId, 0);
+
+ // Verify attributes on game stream.
+ auto attr = testStartClient(gameStreamId, mediaPortId, AUDIO_USAGE_MEDIA);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_GAME);
+
+ attr = testStartClient(gameStreamId, unknownPortId, AUDIO_USAGE_UNKNOWN);
+ EXPECT_EQ(attr.value().usage, AUDIO_USAGE_GAME);
+
+ // Unregister game stream.
+ EXPECT_EQ(m_validator->unregisterStream(gameStreamId), 0);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libaudiousecasevalidation/tests/UsecaseValidator-test.h b/media/libaudiousecasevalidation/tests/UsecaseValidator-test.h
new file mode 100644
index 0000000..3159ab4
--- /dev/null
+++ b/media/libaudiousecasevalidation/tests/UsecaseValidator-test.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MEDIA_LIBAUDIOUSECASEVALIDATION_TESTS_USECASEVALIDATOR_TEST_H_
+#define MEDIA_LIBAUDIOUSECASEVALIDATION_TESTS_USECASEVALIDATOR_TEST_H_
+
+#include <gtest/gtest.h>
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <set>
+
+#include "media/UsecaseLookup.h"
+#include "media/UsecaseValidator.h"
+
+namespace android {
+namespace media {
+
+#define MEDIA_OUTPUT_FLAGS (audio_output_flags_t)(0xFFFFF &\
+ ~(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ))
+
+#define GAME_OUTPUT_FLAGS (audio_output_flags_t)\
+ (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ)
+
+class TestCallback : public UsecaseValidator::AttributesChangedCallback {
+ public:
+ TestCallback() {
+ m_iCallCnt = 0;
+ }
+ virtual ~TestCallback() { }
+ virtual void onAttributesChanged(audio_port_handle_t /*portId*/,
+ const audio_attributes_t& /*attributes*/) {
+ ++m_iCallCnt;
+ }
+
+ public:
+ int m_iCallCnt;
+};
+
+class UsecaseLookupTest : public UsecaseLookup, public ::testing::Test {
+ public:
+ UsecaseLookupTest() { }
+ virtual ~UsecaseLookupTest() = default;
+};
+
+class UsecaseValidatorTest : public ::testing::Test {
+ public:
+ UsecaseValidatorTest() {
+ m_validator = createUsecaseValidator();
+ }
+
+ virtual ~UsecaseValidatorTest() = default;
+
+ protected:
+ audio_io_handle_t testRegisterStream(bool outputFlagGame);
+ audio_port_handle_t testCreatePortId(audio_io_handle_t streamId);
+ error::Result<audio_attributes_t> testStartClient(audio_io_handle_t streamId,
+ audio_port_handle_t portId,
+ audio_usage_t usage);
+ error::Result<audio_attributes_t> testVerifyAudioAttributes(audio_io_handle_t streamId,
+ audio_usage_t usage);
+
+ std::unique_ptr<UsecaseValidator> m_validator;
+};
+
+} // namespace media
+} // namespace android
+
+#endif // MEDIA_LIBAUDIOUSECASEVALIDATION_TESTS_USECASEVALIDATOR_TEST_H_
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index abe622d..742626c 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -47,3 +47,29 @@
"libhardware_headers",
],
}
+
+cc_library_shared {
+ name: "libdownmixaidl",
+ srcs: [
+ "aidl/EffectDownmix.cpp",
+ "aidl/DownmixContext.cpp",
+ ":effectCommonFile",
+ ],
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ header_libs: [
+ "libaudioeffects",
+ "libhardware_headers"
+ ],
+ shared_libs: [
+ "libaudioutils",
+ "libcutils",
+ "liblog",
+ ],
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
+ ],
+}
diff --git a/media/libeffects/downmix/aidl/DownmixContext.cpp b/media/libeffects/downmix/aidl/DownmixContext.cpp
new file mode 100644
index 0000000..6869689
--- /dev/null
+++ b/media/libeffects/downmix/aidl/DownmixContext.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DownmixContext"
+
+#include <android-base/logging.h>
+
+#include "DownmixContext.h"
+
+using aidl::android::hardware::audio::effect::IEffect;
+using ::android::hardware::audio::common::getChannelCount;
+
+namespace aidl::android::hardware::audio::effect {
+
+DownmixContext::DownmixContext(int statusDepth, const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+ LOG(DEBUG) << __func__;
+ mState = DOWNMIX_STATE_UNINITIALIZED;
+ init_params(common);
+}
+
+DownmixContext::~DownmixContext() {
+ LOG(DEBUG) << __func__;
+ mState = DOWNMIX_STATE_UNINITIALIZED;
+}
+
+RetCode DownmixContext::enable() {
+ LOG(DEBUG) << __func__;
+ if (mState != DOWNMIX_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DOWNMIX_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode DownmixContext::disable() {
+ LOG(DEBUG) << __func__;
+ if (mState != DOWNMIX_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DOWNMIX_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void DownmixContext::reset() {
+ LOG(DEBUG) << __func__;
+ disable();
+ resetBuffer();
+}
+
+IEffect::Status DownmixContext::lvmProcess(float* in, float* out, int samples) {
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+ IEffect::Status status = {EX_ILLEGAL_ARGUMENT, 0, 0};
+
+ if (in == nullptr || out == nullptr || getInputFrameSize() != getOutputFrameSize() ||
+ getInputFrameSize() == 0) {
+ return status;
+ }
+
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ if (mState == DOWNMIX_STATE_UNINITIALIZED) {
+ LOG(ERROR) << __func__ << "Trying to use an uninitialized downmixer";
+ return status;
+ } else if (mState == DOWNMIX_STATE_INITIALIZED) {
+ LOG(ERROR) << __func__ << "Trying to use a non-configured downmixer";
+ return status;
+ }
+
+ LOG(DEBUG) << __func__ << " start processing";
+ bool accumulate = false;
+ int frames = samples * sizeof(float) / getInputFrameSize();
+ if (mType == Downmix::Type::STRIP) {
+ int inputChannelCount = getChannelCount(mChMask);
+ while (frames) {
+ if (accumulate) {
+ out[0] = std::clamp(out[0] + in[0], -1.f, 1.f);
+ out[1] = std::clamp(out[1] + in[1], -1.f, 1.f);
+ } else {
+ out[0] = in[0];
+ out[1] = in[1];
+ }
+ in += inputChannelCount;
+ out += 2;
+ frames--;
+ }
+ } else {
+ int chMask = mChMask.get<AudioChannelLayout::layoutMask>();
+ if (!mChannelMix.process(in, out, frames, accumulate, (audio_channel_mask_t)chMask)) {
+ LOG(ERROR) << "Multichannel configuration " << mChMask.toString()
+ << " is not supported";
+ return status;
+ }
+ }
+ LOG(DEBUG) << __func__ << " done processing";
+ return {STATUS_OK, samples, samples};
+}
+
+void DownmixContext::init_params(const Parameter::Common& common) {
+ // when configuring the effect, do not allow a blank or unsupported channel mask
+ AudioChannelLayout channelMask = common.input.base.channelMask;
+ if (isChannelMaskValid(channelMask)) {
+ LOG(ERROR) << "Downmix_Configure error: input channel mask " << channelMask.toString()
+ << " not supported";
+ } else {
+ mType = Downmix::Type::FOLD;
+ mChMask = channelMask;
+ mState = DOWNMIX_STATE_INITIALIZED;
+ }
+}
+
+bool DownmixContext::isChannelMaskValid(AudioChannelLayout channelMask) {
+ if (channelMask.getTag() == AudioChannelLayout::layoutMask) return false;
+ int chMask = channelMask.get<AudioChannelLayout::layoutMask>();
+ // check against unsupported channels (up to FCC_26)
+ constexpr uint32_t MAXIMUM_CHANNEL_MASK = AudioChannelLayout::LAYOUT_22POINT2 |
+ AudioChannelLayout::CHANNEL_FRONT_WIDE_LEFT |
+ AudioChannelLayout::CHANNEL_FRONT_WIDE_RIGHT;
+ if (chMask & ~MAXIMUM_CHANNEL_MASK) {
+ LOG(ERROR) << "Unsupported channels in " << (chMask & ~MAXIMUM_CHANNEL_MASK);
+ return false;
+ }
+ return true;
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/DownmixContext.h b/media/libeffects/downmix/aidl/DownmixContext.h
new file mode 100644
index 0000000..8a244ac
--- /dev/null
+++ b/media/libeffects/downmix/aidl/DownmixContext.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "effect-impl/EffectContext.h"
+
+#include <audio_utils/ChannelMix.h>
+
+namespace aidl::android::hardware::audio::effect {
+
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioDeviceDescription;
+
+enum DownmixState {
+ DOWNMIX_STATE_UNINITIALIZED,
+ DOWNMIX_STATE_INITIALIZED,
+ DOWNMIX_STATE_ACTIVE,
+};
+
+class DownmixContext final : public EffectContext {
+ public:
+ DownmixContext(int statusDepth, const Parameter::Common& common);
+ ~DownmixContext();
+ RetCode enable();
+ RetCode disable();
+ void reset();
+
+ RetCode setDmType(Downmix::Type type) {
+ mType = type;
+ return RetCode::SUCCESS;
+ }
+ Downmix::Type getDmType() const { return mType; }
+
+ RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override {
+ // FIXME change volume
+ mVolumeStereo = volumeStereo;
+ return RetCode::SUCCESS;
+ }
+ Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; }
+
+ RetCode setOutputDevice(const AudioDeviceDescription& device) override {
+ // FIXME change type if playing on headset vs speaker
+ mOutputDevice = device;
+ return RetCode::SUCCESS;
+ }
+ AudioDeviceDescription getOutputDevice() { return mOutputDevice; }
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ DownmixState mState;
+ Downmix::Type mType;
+ AudioChannelLayout mChMask;
+ ::android::audio_utils::channels::ChannelMix mChannelMix;
+
+ // Common Params
+ AudioDeviceDescription mOutputDevice;
+ Parameter::VolumeStereo mVolumeStereo;
+
+ void init_params(const Parameter::Common& common);
+ bool isChannelMaskValid(AudioChannelLayout channelMask);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/EffectDownmix.cpp b/media/libeffects/downmix/aidl/EffectDownmix.cpp
new file mode 100644
index 0000000..17d0736
--- /dev/null
+++ b/media/libeffects/downmix/aidl/EffectDownmix.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DownmixImpl"
+
+#include <android-base/logging.h>
+
+#include "EffectDownmix.h"
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::DownmixImpl;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kDownmixImplUUID;
+using aidl::android::hardware::audio::effect::kDownmixTypeUUID;
+using aidl::android::media::audio::common::AudioUuid;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kDownmixImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<DownmixImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kDownmixImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = DownmixImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string DownmixImpl::kEffectName = "Multichannel Downmix To Stereo";
+const Descriptor DownmixImpl::kDescriptor = {
+ .common = {
+ .id = {.type = kDownmixTypeUUID, .uuid = kDownmixImplUUID, .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT, .insert = Flags::Insert::FIRST},
+ .name = DownmixImpl::kEffectName,
+ .implementor = "The Android Open Source Project"}};
+
+ndk::ScopedAStatus DownmixImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DownmixImpl::setParameterCommon(const Parameter& param) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto tag = param.getTag();
+ switch (tag) {
+ case Parameter::common:
+ RETURN_IF(mContext->setCommon(param.get<Parameter::common>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setCommFailed");
+ break;
+ case Parameter::deviceDescription:
+ RETURN_IF(mContext->setOutputDevice(param.get<Parameter::deviceDescription>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDeviceFailed");
+ break;
+ case Parameter::mode:
+ RETURN_IF(mContext->setAudioMode(param.get<Parameter::mode>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setModeFailed");
+ break;
+ case Parameter::source:
+ RETURN_IF(mContext->setAudioSource(param.get<Parameter::source>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setSourceFailed");
+ break;
+ case Parameter::volumeStereo:
+ RETURN_IF(mContext->setVolumeStereo(param.get<Parameter::volumeStereo>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setVolumeStereoFailed");
+ break;
+ default: {
+ LOG(ERROR) << __func__ << " unsupportedParameterTag " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commonParamNotSupported");
+ }
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DownmixImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->reset();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DownmixImpl::setParameterSpecific(const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::downmix != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& dmParam = specific.get<Parameter::Specific::downmix>();
+ auto tag = dmParam.getTag();
+
+ switch (tag) {
+ case Downmix::type: {
+ RETURN_IF(mContext->setDmType(dmParam.get<Downmix::type>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setTypeFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "DownmixTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus DownmixImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::downmixTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto dmId = id.get<Parameter::Id::downmixTag>();
+ auto dmIdTag = dmId.getTag();
+ switch (dmIdTag) {
+ case Downmix::Id::commonTag:
+ return getParameterDownmix(dmId.get<Downmix::Id::commonTag>(), specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(dmIdTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "DownmixTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus DownmixImpl::getParameterDownmix(const Downmix::Tag& tag,
+ Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ Downmix dmParam;
+ switch (tag) {
+ case Downmix::type: {
+ dmParam.set<Downmix::type>(mContext->getDmType());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "DownmixTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::downmix>(dmParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> DownmixImpl::createContext(const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<DownmixContext>(1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+RetCode DownmixImpl::releaseContext() {
+ if (mContext) {
+ mContext.reset();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status DownmixImpl::effectProcessImpl(float* in, float* out, int sampleToProcess) {
+ if (!mContext) {
+ LOG(ERROR) << __func__ << " nullContext";
+ return {EX_NULL_POINTER, 0, 0};
+ }
+ return mContext->lvmProcess(in, out, sampleToProcess);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/EffectDownmix.h b/media/libeffects/downmix/aidl/EffectDownmix.h
new file mode 100644
index 0000000..d590133
--- /dev/null
+++ b/media/libeffects/downmix/aidl/EffectDownmix.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+#include <audio_effects/effect_downmix.h>
+
+#include "DownmixContext.h"
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class DownmixImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Descriptor kDescriptor;
+ DownmixImpl() { LOG(DEBUG) << __func__; }
+ ~DownmixImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterCommon(const Parameter& param) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<DownmixContext> mContext;
+ ndk::ScopedAStatus getParameterDownmix(const Downmix::Tag& tag, Parameter::Specific* specific);
+};
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/Android.bp b/media/libeffects/loudness/Android.bp
index bcd6947..fc0217b 100644
--- a/media/libeffects/loudness/Android.bp
+++ b/media/libeffects/loudness/Android.bp
@@ -44,3 +44,32 @@
header_libs: ["libaudioeffects"],
}
+
+cc_library_shared {
+ name: "libloudnessenhanceraidl",
+ srcs: [
+ "aidl/EffectLoudnessEnhancer.cpp",
+ "aidl/LoudnessEnhancerContext.cpp",
+ "dsp/core/dynamic_range_compression.cpp",
+ ":effectCommonFile",
+ ],
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ header_libs: [
+ "libaudioeffects",
+ "libhardware_headers",
+ ],
+ cflags: [
+ "-Wthread-safety",
+ ],
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ ],
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
+ ],
+}
diff --git a/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.cpp
new file mode 100644
index 0000000..9d8bc80
--- /dev/null
+++ b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_LoudnessEnhancerImpl"
+
+#include <android-base/logging.h>
+
+#include "EffectLoudnessEnhancer.h"
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kLoudnessEnhancerImplUUID;
+using aidl::android::hardware::audio::effect::LoudnessEnhancerImpl;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kLoudnessEnhancerImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<LoudnessEnhancerImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kLoudnessEnhancerImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = LoudnessEnhancerImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string LoudnessEnhancerImpl::kEffectName = "Loudness Enhancer";
+const Descriptor LoudnessEnhancerImpl::kDescriptor = {
+ .common = {.id = {.type = kLoudnessEnhancerTypeUUID,
+ .uuid = kLoudnessEnhancerImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT, .insert = Flags::Insert::FIRST},
+ .name = LoudnessEnhancerImpl::kEffectName,
+ .implementor = "The Android Open Source Project"}};
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::setParameterSpecific(const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::loudnessEnhancer != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& leParam = specific.get<Parameter::Specific::loudnessEnhancer>();
+ auto tag = leParam.getTag();
+
+ switch (tag) {
+ case LoudnessEnhancer::gainMb: {
+ RETURN_IF(mContext->setLeGain(leParam.get<LoudnessEnhancer::gainMb>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setGainMbFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "LoudnessEnhancerTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::loudnessEnhancerTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto leId = id.get<Parameter::Id::loudnessEnhancerTag>();
+ auto leIdTag = leId.getTag();
+ switch (leIdTag) {
+ case LoudnessEnhancer::Id::commonTag:
+ return getParameterLoudnessEnhancer(leId.get<LoudnessEnhancer::Id::commonTag>(),
+ specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(leIdTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "LoudnessEnhancerTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::getParameterLoudnessEnhancer(
+ const LoudnessEnhancer::Tag& tag, Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ LoudnessEnhancer leParam;
+ switch (tag) {
+ case LoudnessEnhancer::gainMb: {
+ leParam.set<LoudnessEnhancer::gainMb>(mContext->getLeGain());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "LoudnessEnhancerTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::loudnessEnhancer>(leParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> LoudnessEnhancerImpl::createContext(
+ const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<LoudnessEnhancerContext>(1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+RetCode LoudnessEnhancerImpl::releaseContext() {
+ if (mContext) {
+ mContext->disable();
+ mContext->resetBuffer();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status LoudnessEnhancerImpl::effectProcessImpl(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, samples);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.h b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.h
new file mode 100644
index 0000000..6402fd2
--- /dev/null
+++ b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+#include "LoudnessEnhancerContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class LoudnessEnhancerImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Descriptor kDescriptor;
+ LoudnessEnhancerImpl() { LOG(DEBUG) << __func__; }
+ ~LoudnessEnhancerImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<LoudnessEnhancerContext> mContext;
+ ndk::ScopedAStatus getParameterLoudnessEnhancer(const LoudnessEnhancer::Tag& tag,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/aidl/LoudnessEnhancerContext.cpp b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.cpp
new file mode 100644
index 0000000..033b222
--- /dev/null
+++ b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LoudnessEnhancerContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+LoudnessEnhancerContext::LoudnessEnhancerContext(int statusDepth, const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+ LOG(DEBUG) << __func__;
+ mState = LOUDNESS_ENHANCER_STATE_UNINITIALIZED;
+ mSampleRate = common.input.base.sampleRate;
+ init_params();
+}
+
+LoudnessEnhancerContext::~LoudnessEnhancerContext() {
+ LOG(DEBUG) << __func__;
+ mState = LOUDNESS_ENHANCER_STATE_UNINITIALIZED;
+}
+
+RetCode LoudnessEnhancerContext::enable() {
+ if (mState != LOUDNESS_ENHANCER_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = LOUDNESS_ENHANCER_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode LoudnessEnhancerContext::disable() {
+ if (mState != LOUDNESS_ENHANCER_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = LOUDNESS_ENHANCER_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void LoudnessEnhancerContext::reset() {
+ float targetAmp = pow(10, mGain / 2000.0f); // mB to linear amplification
+ {
+ std::lock_guard lg(mMutex);
+ if (mCompressor != nullptr) {
+ // Get samplingRate from input
+ mCompressor->Initialize(targetAmp, mSampleRate);
+ }
+ }
+}
+
+RetCode LoudnessEnhancerContext::setLeGain(int gainMb) {
+ mGain = gainMb;
+ reset(); // apply parameter update
+ return RetCode::SUCCESS;
+}
+
+IEffect::Status LoudnessEnhancerContext::lvmProcess(float* in, float* out, int samples) {
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ RETURN_VALUE_IF(getInputFrameSize() != getOutputFrameSize(), status, "FrameSizeMismatch");
+ auto frameSize = getInputFrameSize();
+ RETURN_VALUE_IF(0 == frameSize, status, "zeroFrameSize");
+
+ LOG(DEBUG) << __func__ << " start processing";
+ {
+ std::lock_guard lg(mMutex);
+ // PcmType is always expected to be Float 32 bit.
+ constexpr float scale = 1 << 15; // power of 2 is lossless conversion to int16_t range
+ constexpr float inverseScale = 1.f / scale;
+ const float inputAmp = pow(10, mGain / 2000.0f) * scale;
+ float leftSample, rightSample;
+ if (mCompressor != nullptr) {
+ for (int inIdx = 0; inIdx < samples; inIdx += 2) {
+ // makeup gain is applied on the input of the compressor
+ leftSample = inputAmp * in[inIdx];
+ rightSample = inputAmp * in[inIdx + 1];
+ mCompressor->Compress(&leftSample, &rightSample);
+ in[inIdx] = leftSample * inverseScale;
+ in[inIdx + 1] = rightSample * inverseScale;
+ }
+ } else {
+ for (int inIdx = 0; inIdx < samples; inIdx += 2) {
+ leftSample = inputAmp * in[inIdx];
+ rightSample = inputAmp * in[inIdx + 1];
+ in[inIdx] = leftSample * inverseScale;
+ in[inIdx + 1] = rightSample * inverseScale;
+ }
+ }
+ bool accumulate = false;
+ if (in != out) {
+ for (int i = 0; i < samples; i++) {
+ if (accumulate) {
+ out[i] += in[i];
+ } else {
+ out[i] = in[i];
+ }
+ }
+ }
+ }
+ return {STATUS_OK, samples, samples};
+}
+
+void LoudnessEnhancerContext::init_params() {
+ mGain = LOUDNESS_ENHANCER_DEFAULT_TARGET_GAIN_MB;
+ float targetAmp = pow(10, mGain / 2000.0f); // mB to linear amplification
+ LOG(DEBUG) << __func__ << "Target gain = " << mGain << "mB <=> factor = " << targetAmp;
+
+ {
+ std::lock_guard lg(mMutex);
+ mCompressor = std::make_unique<le_fx::AdaptiveDynamicRangeCompression>();
+ mCompressor->Initialize(targetAmp, mSampleRate);
+ }
+ mState = LOUDNESS_ENHANCER_STATE_INITIALIZED;
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/aidl/LoudnessEnhancerContext.h b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.h
new file mode 100644
index 0000000..b478b27
--- /dev/null
+++ b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <audio_effects/effect_loudnessenhancer.h>
+
+#include "dsp/core/dynamic_range_compression.h"
+#include "effect-impl/EffectContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+enum LoudnessEnhancerState {
+ LOUDNESS_ENHANCER_STATE_UNINITIALIZED,
+ LOUDNESS_ENHANCER_STATE_INITIALIZED,
+ LOUDNESS_ENHANCER_STATE_ACTIVE,
+};
+
+class LoudnessEnhancerContext final : public EffectContext {
+ public:
+ LoudnessEnhancerContext(int statusDepth, const Parameter::Common& common);
+ ~LoudnessEnhancerContext();
+
+ RetCode enable();
+ RetCode disable();
+ void reset();
+
+ RetCode setLeGain(int gainMb);
+ int getLeGain() const { return mGain; }
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ std::mutex mMutex;
+ LoudnessEnhancerState mState;
+ int mSampleRate;
+ int mGain;
+ // In this implementation, there is no coupling between the compression on the left and right
+ // channels
+ std::unique_ptr<le_fx::AdaptiveDynamicRangeCompression> mCompressor GUARDED_BY(mMutex);
+
+ void init_params();
+};
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
index fa26e60..1bcde74 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
@@ -20,9 +20,13 @@
#include "BundleContext.h"
#include "BundleTypes.h"
+#include "math.h"
namespace aidl::android::hardware::audio::effect {
+using aidl::android::media::audio::common::AudioDeviceDescription;
+using aidl::android::media::audio::common::AudioDeviceType;
+
RetCode BundleContext::init() {
std::lock_guard lg(mMutex);
// init with pre-defined preset NORMAL
@@ -67,38 +71,276 @@
}
RetCode BundleContext::enable() {
+ if (mEnabled) return RetCode::ERROR_ILLEGAL_PARAMETER;
+ // Bass boost or Virtualizer can be temporarily disabled if playing over device speaker due to
+ // their nature.
+ bool tempDisabled = false;
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle EQ";
+ if (mSamplesToExitCountEq <= 0) mNumberEffectsEnabled++;
+ mSamplesToExitCountEq = (mSamplesPerSecond * 0.1);
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::EQUALIZER));
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " enable bundle BB";
+ if (mSamplesToExitCountBb <= 0) mNumberEffectsEnabled++;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::BASS_BOOST));
+ mSamplesToExitCountBb = (mSamplesPerSecond * 0.1);
+ tempDisabled = mBassTempDisabled;
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle VR";
+ if (mSamplesToExitCountVirt <= 0) mNumberEffectsEnabled++;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VIRTUALIZER));
+ mSamplesToExitCountVirt = (mSamplesPerSecond * 0.1);
+ tempDisabled = mVirtualizerTempDisabled;
+ break;
+ default:
+ // Add handling for other effects
+ break;
+ }
+ mEnabled = true;
+ return (tempDisabled ? RetCode::SUCCESS : enableOperatingMode());
+}
+
+RetCode BundleContext::enableOperatingMode() {
LVM_ControlParams_t params;
{
std::lock_guard lg(mMutex);
RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
- if (mType == lvm::BundleEffectType::EQUALIZER) {
- LOG(DEBUG) << __func__ << " enable bundle EQ";
- params.EQNB_OperatingMode = LVM_EQNB_ON;
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle EQ";
+ params.EQNB_OperatingMode = LVM_EQNB_ON;
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " enable bundle BB";
+ params.BE_OperatingMode = LVM_BE_ON;
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle VR";
+ params.VirtualizerOperatingMode = LVM_MODE_ON;
+ break;
+ default:
+ // Add handling for other effects
+ break;
}
RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
}
- mEnabled = true;
- // LvmEffect_limitLevel(pContext);
- return RetCode::SUCCESS;
+ return limitLevel();
}
RetCode BundleContext::disable() {
+ if (!mEnabled) return RetCode::ERROR_ILLEGAL_PARAMETER;
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle EQ";
+ mEffectInDrain |= 1 << int(lvm::BundleEffectType::EQUALIZER);
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " disable bundle BB";
+ mEffectInDrain |= 1 << int(lvm::BundleEffectType::BASS_BOOST);
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle VR";
+ mEffectInDrain |= 1 << int(lvm::BundleEffectType::VIRTUALIZER);
+ break;
+ default:
+ // Add handling for other effects
+ break;
+ }
+ mEnabled = false;
+ return disableOperatingMode();
+}
+
+RetCode BundleContext::disableOperatingMode() {
LVM_ControlParams_t params;
{
std::lock_guard lg(mMutex);
RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
- if (mType == lvm::BundleEffectType::EQUALIZER) {
- LOG(DEBUG) << __func__ << " disable bundle EQ";
- params.EQNB_OperatingMode = LVM_EQNB_OFF;
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle EQ";
+ params.EQNB_OperatingMode = LVM_EQNB_OFF;
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " disable bundle BB";
+ params.BE_OperatingMode = LVM_BE_OFF;
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle VR";
+ params.VirtualizerOperatingMode = LVM_MODE_OFF;
+ break;
+ default:
+ // Add handling for other effects
+ break;
}
RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
}
mEnabled = false;
- // LvmEffect_limitLevel(pContext);
+ return limitLevel();
+}
+
+RetCode BundleContext::limitLevel() {
+ int gainCorrection = 0;
+ // Count the energy contribution per band for EQ and BassBoost only if they are active.
+ float energyContribution = 0;
+ float energyCross = 0;
+ float energyBassBoost = 0;
+ float crossCorrection = 0;
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ bool eqEnabled = params.EQNB_OperatingMode == LVM_EQNB_ON;
+ bool bbEnabled = params.BE_OperatingMode == LVM_BE_ON;
+ bool viEnabled = params.VirtualizerOperatingMode == LVM_MODE_ON;
+
+ if (eqEnabled) {
+ for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ float bandFactor = mBandGaindB[i] / 15.0;
+ float bandCoefficient = lvm::kBandEnergyCoefficient[i];
+ float bandEnergy = bandFactor * bandCoefficient * bandCoefficient;
+ if (bandEnergy > 0) energyContribution += bandEnergy;
+ }
+
+ // cross EQ coefficients
+ float bandFactorSum = 0;
+ for (int i = 0; i < lvm::MAX_NUM_BANDS - 1; i++) {
+ float bandFactor1 = mBandGaindB[i] / 15.0;
+ float bandFactor2 = mBandGaindB[i + 1] / 15.0;
+
+ if (bandFactor1 > 0 && bandFactor2 > 0) {
+ float crossEnergy =
+ bandFactor1 * bandFactor2 * lvm::kBandEnergyCrossCoefficient[i];
+ bandFactorSum += bandFactor1 * bandFactor2;
+
+ if (crossEnergy > 0) energyCross += crossEnergy;
+ }
+ }
+ bandFactorSum -= 1.0;
+ if (bandFactorSum > 0) crossCorrection = bandFactorSum * 0.7;
+ }
+ // BassBoost contribution
+ if (bbEnabled) {
+ float boostFactor = mBassStrengthSaved / 1000.0;
+ float boostCoefficient = lvm::kBassBoostEnergyCoefficient;
+
+ energyContribution += boostFactor * boostCoefficient * boostCoefficient;
+
+ if (eqEnabled) {
+ for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ float bandFactor = mBandGaindB[i] / 15.0;
+ float bandCrossCoefficient = lvm::kBassBoostEnergyCrossCoefficient[i];
+ float bandEnergy = boostFactor * bandFactor * bandCrossCoefficient;
+ if (bandEnergy > 0) energyBassBoost += bandEnergy;
+ }
+ }
+ }
+ // Virtualizer contribution
+ if (viEnabled) {
+ energyContribution += lvm::kVirtualizerContribution * lvm::kVirtualizerContribution;
+ }
+
+ double totalEnergyEstimation =
+ sqrt(energyContribution + energyCross + energyBassBoost) - crossCorrection;
+ LOG(INFO) << " TOTAL energy estimation: " << totalEnergyEstimation << " dB";
+
+ // roundoff
+ int maxLevelRound = (int)(totalEnergyEstimation + 0.99);
+ if (maxLevelRound + mLevelSaved > 0) {
+ gainCorrection = maxLevelRound + mLevelSaved;
+ }
+
+ params.VC_EffectLevel = mLevelSaved - gainCorrection;
+ if (params.VC_EffectLevel < -96) {
+ params.VC_EffectLevel = -96;
+ }
+ LOG(INFO) << "\tVol: " << mLevelSaved << ", GainCorrection: " << gainCorrection
+ << ", Actual vol: " << params.VC_EffectLevel;
+
+ /* Activate the initial settings */
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+
+ if (mFirstVolume) {
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetVolumeNoSmoothing(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setVolumeNoSmoothingFailed");
+ LOG(INFO) << "\tLVM_VOLUME: Disabling Smoothing for first volume change to remove "
+ "spikes/clicks";
+ mFirstVolume = false;
+ }
+ }
+
+ return RetCode::SUCCESS;
+}
+
+bool BundleContext::isDeviceSupportedBassBoost(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) {
+ return (device == AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER, ""} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_CARKIT,
+ AudioDeviceDescription::CONNECTION_BT_SCO} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER,
+ AudioDeviceDescription::CONNECTION_BT_A2DP});
+}
+
+bool BundleContext::isDeviceSupportedVirtualizer(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) {
+ return (device == AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_ANALOG} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_ANALOG} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_BT_A2DP} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_USB});
+}
+
+RetCode BundleContext::setOutputDevice(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) {
+ mOutputDevice = device;
+ switch (mType) {
+ case lvm::BundleEffectType::BASS_BOOST:
+ if (isDeviceSupportedBassBoost(device)) {
+ // If a device doesn't support bass boost, the effect must be temporarily disabled.
+ // The effect must still report its original state as this can only be changed by
+ // the start/stop commands.
+ if (mEnabled) {
+ disableOperatingMode();
+ }
+ mBassTempDisabled = true;
+ } else {
+ // If a device supports bass boost and the effect has been temporarily disabled
+ // previously then re-enable it
+ if (!mEnabled) {
+ enableOperatingMode();
+ }
+ mBassTempDisabled = false;
+ }
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ if (isDeviceSupportedVirtualizer(device)) {
+ if (mEnabled) {
+ disableOperatingMode();
+ }
+ mVirtualizerTempDisabled = true;
+ } else {
+ if (!mEnabled) {
+ enableOperatingMode();
+ }
+ mVirtualizerTempDisabled = false;
+ }
+ break;
+ default:
+ break;
+ }
return RetCode::SUCCESS;
}
@@ -252,6 +494,55 @@
return RetCode::SUCCESS;
}
+RetCode BundleContext::setBassBoostStrength(int strength) {
+ if (strength < BassBoost::MIN_PER_MILLE_STRENGTH ||
+ strength > BassBoost::MAX_PER_MILLE_STRENGTH) {
+ LOG(ERROR) << __func__ << " invalid strength: " << strength;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.BE_EffectLevel = (LVM_INT16)((15 * strength) / 1000);
+ params.BE_CentreFreq = LVM_BE_CENTRE_90Hz;
+
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mBassStrengthSaved = strength;
+ LOG(INFO) << __func__ << " success with strength " << strength;
+ return limitLevel();
+}
+
+RetCode BundleContext::setVirtualizerStrength(int strength) {
+ if (strength < Virtualizer::MIN_PER_MILLE_STRENGTH ||
+ strength > Virtualizer::MAX_PER_MILLE_STRENGTH) {
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.CS_EffectLevel = ((strength * 32767) / 1000);
+
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+
+ mVirtStrengthSaved = strength;
+ LOG(INFO) << __func__ << " success with strength " << strength;
+ return limitLevel();
+}
+
void BundleContext::initControlParameter(LVM_ControlParams_t& params) const {
/* General parameters */
params.OperatingMode = LVM_MODE_ON;
@@ -339,21 +630,126 @@
IEffect::Status BundleContext::lvmProcess(float* in, float* out, int samples) {
IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ int64_t inputFrameCount = getCommon().input.frameCount;
+ int64_t outputFrameCount = getCommon().output.frameCount;
+ RETURN_VALUE_IF(inputFrameCount != outputFrameCount, status, "FrameCountMismatch");
+ int isDataAvailable = true;
auto frameSize = getInputFrameSize();
- RETURN_VALUE_IF(0== frameSize, status, "nullContext");
+ RETURN_VALUE_IF(0 == frameSize, status, "zeroFrameSize");
LOG(DEBUG) << __func__ << " start processing";
- LVM_UINT16 frames = samples * sizeof(float) / frameSize;
- LVM_ReturnStatus_en lvmStatus;
- {
- std::lock_guard lg(mMutex);
- lvmStatus = LVM_Process(mInstance, in, out, frames, 0);
+ if ((mEffectProcessCalled & 1 << int(mType)) != 0) {
+ const int undrainedEffects = mEffectInDrain & ~mEffectProcessCalled;
+ if ((undrainedEffects & 1 << int(lvm::BundleEffectType::EQUALIZER)) != 0) {
+ LOG(DEBUG) << "Draining EQUALIZER";
+ mSamplesToExitCountEq = 0;
+ --mNumberEffectsEnabled;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::EQUALIZER));
+ }
+ if ((undrainedEffects & 1 << int(lvm::BundleEffectType::BASS_BOOST)) != 0) {
+ LOG(DEBUG) << "Draining BASS_BOOST";
+ mSamplesToExitCountBb = 0;
+ --mNumberEffectsEnabled;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::BASS_BOOST));
+ }
+ if ((undrainedEffects & 1 << int(lvm::BundleEffectType::VIRTUALIZER)) != 0) {
+ LOG(DEBUG) << "Draining VIRTUALIZER";
+ mSamplesToExitCountVirt = 0;
+ --mNumberEffectsEnabled;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VIRTUALIZER));
+ }
}
-
- if (lvmStatus != LVM_SUCCESS) {
- LOG(ERROR) << __func__ << lvmStatus;
- return {EX_UNSUPPORTED_OPERATION, 0, 0};
+ mEffectProcessCalled |= 1 << int(mType);
+ if (!mEnabled) {
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ if (mSamplesToExitCountEq > 0) {
+ mSamplesToExitCountEq -= samples;
+ }
+ if (mSamplesToExitCountEq <= 0) {
+ isDataAvailable = false;
+ if ((mEffectInDrain & 1 << int(lvm::BundleEffectType::EQUALIZER)) != 0) {
+ mNumberEffectsEnabled--;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::EQUALIZER));
+ }
+ LOG(DEBUG) << "Effect_process() this is the last frame for EQUALIZER";
+ }
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ if (mSamplesToExitCountBb > 0) {
+ mSamplesToExitCountBb -= samples;
+ }
+ if (mSamplesToExitCountBb <= 0) {
+ isDataAvailable = false;
+ if ((mEffectInDrain & 1 << int(lvm::BundleEffectType::BASS_BOOST)) != 0) {
+ mNumberEffectsEnabled--;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::BASS_BOOST));
+ }
+ LOG(DEBUG) << "Effect_process() this is the last frame for BASS_BOOST";
+ }
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ if (mSamplesToExitCountVirt > 0) {
+ mSamplesToExitCountVirt -= samples;
+ }
+ if (mSamplesToExitCountVirt <= 0) {
+ isDataAvailable = false;
+ if ((mEffectInDrain & 1 << int(lvm::BundleEffectType::VIRTUALIZER)) != 0) {
+ mNumberEffectsEnabled--;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VIRTUALIZER));
+ }
+ LOG(DEBUG) << "Effect_process() this is the last frame for VIRTUALIZER";
+ }
+ break;
+ default:
+ // Add handling for other effects
+ break;
+ }
+ }
+ if (isDataAvailable) {
+ mNumberEffectsCalled++;
+ }
+ bool accumulate = false;
+ if (mNumberEffectsCalled >= mNumberEffectsEnabled) {
+ // We expect the # effects called to be equal to # effects enabled in sequence (including
+ // draining effects). Warn if this is not the case due to inconsistent calls.
+ ALOGW_IF(mNumberEffectsCalled > mNumberEffectsEnabled,
+ "%s Number of effects called %d is greater than number of effects enabled %d",
+ __func__, mNumberEffectsCalled, mNumberEffectsEnabled);
+ mEffectProcessCalled = 0; // reset our consistency check.
+ if (!isDataAvailable) {
+ LOG(DEBUG) << "Effect_process() processing last frame";
+ }
+ mNumberEffectsCalled = 0;
+ LVM_UINT16 frames = samples * sizeof(float) / frameSize;
+ float* outTmp = (accumulate ? getWorkBuffer() : out);
+ /* Process the samples */
+ LVM_ReturnStatus_en lvmStatus;
+ {
+ std::lock_guard lg(mMutex);
+ lvmStatus = LVM_Process(mInstance, in, outTmp, frames, 0);
+ if (lvmStatus != LVM_SUCCESS) {
+ LOG(ERROR) << __func__ << lvmStatus;
+ return {EX_UNSUPPORTED_OPERATION, 0, 0};
+ }
+ if (accumulate) {
+ for (int i = 0; i < samples; i++) {
+ out[i] += outTmp[i];
+ }
+ }
+ }
+ } else {
+ for (int i = 0; i < samples; i++) {
+ if (accumulate) {
+ out[i] += in[i];
+ } else {
+ out[i] = in[i];
+ }
+ }
}
LOG(DEBUG) << __func__ << " done processing";
return {STATUS_OK, samples, samples};
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.h b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
index 7b38e66..116ae6f 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
@@ -43,9 +43,11 @@
lvm::BundleEffectType getBundleType() const { return mType; }
RetCode enable();
+ RetCode enableOperatingMode();
RetCode disable();
+ RetCode disableOperatingMode();
- void setSampleRate (const int sampleRate) { mSampleRate = sampleRate; }
+ void setSampleRate(const int sampleRate) { mSampleRate = sampleRate; }
int getSampleRate() const { return mSampleRate; }
void setChannelMask(const aidl::android::media::audio::common::AudioChannelLayout& chMask) {
@@ -54,17 +56,31 @@
aidl::android::media::audio::common::AudioChannelLayout getChannelMask() const {
return mChMask;
}
+ bool isDeviceSupportedBassBoost(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device);
+ bool isDeviceSupportedVirtualizer(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device);
+ RetCode setOutputDevice(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) override;
RetCode setEqualizerPreset(const std::size_t presetIdx);
int getEqualizerPreset() const { return mCurPresetIdx; }
RetCode setEqualizerBandLevels(const std::vector<Equalizer::BandLevel>& bandLevels);
std::vector<Equalizer::BandLevel> getEqualizerBandLevels() const;
+ RetCode setBassBoostStrength(int strength);
+ int getBassBoostStrength() const { return mBassStrengthSaved; }
+
+ RetCode setVirtualizerStrength(int strength);
+ int getVirtualizerStrength() const { return mVirtStrengthSaved; }
+
RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override;
Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; }
IEffect::Status lvmProcess(float* in, float* out, int samples);
+ IEffect::Status processEffect(float* in, float* out, int sampleToProcess);
+
private:
std::mutex mMutex;
const lvm::BundleEffectType mType;
@@ -106,6 +122,7 @@
void initControlParameter(LVM_ControlParams_t& params) const;
void initHeadroomParameter(LVM_HeadroomParams_t& params) const;
+ RetCode limitLevel();
int16_t VolToDb(uint32_t vol) const;
LVM_INT16 LVC_ToDB_s32Tos16(LVM_INT32 Lin_fix) const;
RetCode updateControlParameter(const std::vector<Equalizer::BandLevel>& bandLevels);
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
index 97f08a0..95a0dab 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
@@ -70,6 +70,8 @@
static const Equalizer::Capability kEqCap = {.bandFrequencies = kEqBandFrequency,
.presets = kEqPresets};
+static const std::string kEqualizerEffectName = "EqualizerBundle";
+
static const Descriptor kEqualizerDesc = {
.common = {.id = {.type = kEqualizerTypeUUID,
.uuid = kEqualizerBundleImplUUID,
@@ -77,13 +79,49 @@
.flags = {.type = Flags::Type::INSERT,
.insert = Flags::Insert::FIRST,
.volume = Flags::Volume::CTRL},
- .name = "EqualizerBundle",
+ .name = kEqualizerEffectName,
.implementor = "NXP Software Ltd."},
.capability = Capability::make<Capability::equalizer>(kEqCap)};
+static const bool mStrengthSupported = true;
+
+static const BassBoost::Capability kBassBoostCap = {.strengthSupported = mStrengthSupported};
+
+static const std::string kBassBoostEffectName = "Dynamic Bass Boost";
+
+static const Descriptor kBassBoostDesc = {
+ .common = {.id = {.type = kBassBoostTypeUUID,
+ .uuid = kBassBoostBundleImplUUID,
+ .proxy = kBassBoostProxyUUID},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::FIRST,
+ .volume = Flags::Volume::CTRL,
+ .deviceIndication = true},
+ .cpuLoad = BASS_BOOST_CUP_LOAD_ARM9E,
+ .memoryUsage = BUNDLE_MEM_USAGE,
+ .name = kBassBoostEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::bassBoost>(kBassBoostCap)};
+
+static const Virtualizer::Capability kVirtualizerCap = {.strengthSupported = mStrengthSupported};
+
+static const std::string kVirtualizerEffectName = "Virtualizer";
+
+static const Descriptor kVirtualizerDesc = {
+ .common = {.id = {.type = kVirtualizerTypeUUID,
+ .uuid = kVirtualizerBundleImplUUID,
+ .proxy = kVirtualizerProxyUUID},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::FIRST,
+ .volume = Flags::Volume::CTRL,
+ .deviceIndication = true},
+ .cpuLoad = VIRTUALIZER_CUP_LOAD_ARM9E,
+ .memoryUsage = BUNDLE_MEM_USAGE,
+ .name = kVirtualizerEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::virtualizer>(kVirtualizerCap)};
+
// TODO: add descriptors for other bundle effect types here.
-static const Descriptor kVirtualizerDesc;
-static const Descriptor kBassBoostDesc;
static const Descriptor kVolumeDesc;
/* The following tables have been computed using the actual levels measured by the output of
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
index f99bdd5..b435ab4 100644
--- a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
@@ -31,14 +31,21 @@
using aidl::android::hardware::audio::effect::Descriptor;
using aidl::android::hardware::audio::effect::EffectBundleAidl;
-using aidl::android::hardware::audio::effect::kEqualizerBundleImplUUID;
using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kBassBoostBundleImplUUID;
+using aidl::android::hardware::audio::effect::kEqualizerBundleImplUUID;
+using aidl::android::hardware::audio::effect::kVirtualizerBundleImplUUID;
using aidl::android::hardware::audio::effect::State;
using aidl::android::media::audio::common::AudioUuid;
+bool isUuidSupported(const AudioUuid* uuid) {
+ return (*uuid == kEqualizerBundleImplUUID || *uuid == kBassBoostBundleImplUUID ||
+ *uuid == kVirtualizerBundleImplUUID);
+}
+
extern "C" binder_exception_t createEffect(const AudioUuid* uuid,
std::shared_ptr<IEffect>* instanceSpp) {
- if (uuid == nullptr || *uuid != kEqualizerBundleImplUUID) {
+ if (uuid == nullptr || !isUuidSupported(uuid)) {
LOG(ERROR) << __func__ << "uuid not supported";
return EX_ILLEGAL_ARGUMENT;
}
@@ -53,11 +60,17 @@
}
extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
- if (!in_impl_uuid || *in_impl_uuid != kEqualizerBundleImplUUID) {
+ if (!in_impl_uuid || !isUuidSupported(in_impl_uuid)) {
LOG(ERROR) << __func__ << "uuid not supported";
return EX_ILLEGAL_ARGUMENT;
}
- *_aidl_return = aidl::android::hardware::audio::effect::lvm::kEqualizerDesc;
+ if (*in_impl_uuid == kEqualizerBundleImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kEqualizerDesc;
+ } else if (*in_impl_uuid == kBassBoostBundleImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm:: kBassBoostDesc;
+ } else if (*in_impl_uuid == kVirtualizerBundleImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kVirtualizerDesc;
+ }
return EX_NONE;
}
@@ -68,6 +81,15 @@
if (uuid == kEqualizerBundleImplUUID) {
mType = lvm::BundleEffectType::EQUALIZER;
mDescriptor = &lvm::kEqualizerDesc;
+ mEffectName = &lvm::kEqualizerEffectName;
+ } else if (uuid == kBassBoostBundleImplUUID) {
+ mType = lvm::BundleEffectType::BASS_BOOST;
+ mDescriptor = &lvm::kBassBoostDesc;
+ mEffectName = &lvm::kBassBoostEffectName;
+ } else if (uuid == kVirtualizerBundleImplUUID) {
+ mType = lvm::BundleEffectType::VIRTUALIZER;
+ mDescriptor = &lvm::kVirtualizerDesc;
+ mEffectName = &lvm::kVirtualizerEffectName;
} else {
// TODO: add other bundle effect types here.
LOG(ERROR) << __func__ << uuid.toString() << " not supported yet!";
@@ -124,52 +146,104 @@
ndk::ScopedAStatus EffectBundleAidl::setParameterSpecific(const Parameter::Specific& specific) {
LOG(DEBUG) << __func__ << " specific " << specific.toString();
- auto tag = specific.getTag();
- RETURN_IF(tag != Parameter::Specific::equalizer, EX_ILLEGAL_ARGUMENT,
- "specificParamNotSupported");
RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ auto tag = specific.getTag();
+ switch (tag) {
+ case Parameter::Specific::equalizer:
+ return setParameterEqualizer(specific);
+ case Parameter::Specific::bassBoost:
+ return setParameterBassBoost(specific);
+ case Parameter::Specific::virtualizer:
+ return setParameterVirtualizer(specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "specificParamNotSupported");
+ }
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterEqualizer(const Parameter::Specific& specific) {
auto& eq = specific.get<Parameter::Specific::equalizer>();
auto eqTag = eq.getTag();
switch (eqTag) {
case Equalizer::preset:
RETURN_IF(mContext->setEqualizerPreset(eq.get<Equalizer::preset>()) != RetCode::SUCCESS,
EX_ILLEGAL_ARGUMENT, "setBandLevelsFailed");
- break;
+ return ndk::ScopedAStatus::ok();
case Equalizer::bandLevels:
RETURN_IF(mContext->setEqualizerBandLevels(eq.get<Equalizer::bandLevels>()) !=
RetCode::SUCCESS,
EX_ILLEGAL_ARGUMENT, "setBandLevelsFailed");
- break;
+ return ndk::ScopedAStatus::ok();
default:
LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
"eqTagNotSupported");
}
- return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterBassBoost(const Parameter::Specific& specific) {
+ auto& bb = specific.get<Parameter::Specific::bassBoost>();
+ auto bbTag = bb.getTag();
+ switch (bbTag) {
+ case BassBoost::strengthPm: {
+ RETURN_IF(mContext->setBassBoostStrength(bb.get<BassBoost::strengthPm>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setStrengthFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default:
+ LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "bbTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterVirtualizer(const Parameter::Specific& specific) {
+ auto& vr = specific.get<Parameter::Specific::virtualizer>();
+ auto vrTag = vr.getTag();
+ switch (vrTag) {
+ case Virtualizer::strengthPm: {
+ RETURN_IF(mContext->setVirtualizerStrength(vr.get<Virtualizer::strengthPm>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setStrengthFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default:
+ LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "vrTagNotSupported");
+ }
}
ndk::ScopedAStatus EffectBundleAidl::getParameterSpecific(const Parameter::Id& id,
Parameter::Specific* specific) {
RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
auto tag = id.getTag();
- RETURN_IF(Parameter::Id::equalizerTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
- auto eqId = id.get<Parameter::Id::equalizerTag>();
- auto eqIdTag = eqId.getTag();
- switch (eqIdTag) {
- case Equalizer::Id::commonTag:
- return getParameterEqualizer(eqId.get<Equalizer::Id::commonTag>(), specific);
+
+ switch (tag) {
+ case Parameter::Id::equalizerTag:
+ return getParameterEqualizer(id.get<Parameter::Id::equalizerTag>(), specific);
+ case Parameter::Id::bassBoostTag:
+ return getParameterBassBoost(id.get<Parameter::Id::bassBoostTag>(), specific);
+ case Parameter::Id::virtualizerTag:
+ return getParameterVirtualizer(id.get<Parameter::Id::virtualizerTag>(), specific);
default:
- LOG(ERROR) << __func__ << " tag " << toString(eqIdTag) << " not supported";
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
- "EqualizerTagNotSupported");
+ "wrongIdTag");
}
}
-ndk::ScopedAStatus EffectBundleAidl::getParameterEqualizer(const Equalizer::Tag& tag,
+ndk::ScopedAStatus EffectBundleAidl::getParameterEqualizer(const Equalizer::Id& id,
Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != Equalizer::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "EqualizerTagNotSupported");
RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
Equalizer eqParam;
+
+ auto tag = id.get<Equalizer::Id::commonTag>();
switch (tag) {
case Equalizer::bandLevels: {
eqParam.set<Equalizer::bandLevels>(mContext->getEqualizerBandLevels());
@@ -190,6 +264,55 @@
return ndk::ScopedAStatus::ok();
}
+ndk::ScopedAStatus EffectBundleAidl::getParameterBassBoost(const BassBoost::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != BassBoost::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "BassBoostTagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ BassBoost bbParam;
+
+ auto tag = id.get<BassBoost::Id::commonTag>();
+ switch (tag) {
+ case BassBoost::strengthPm: {
+ bbParam.set<BassBoost::strengthPm>(mContext->getBassBoostStrength());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "BassBoostTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::bassBoost>(bbParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::getParameterVirtualizer(const Virtualizer::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != Virtualizer::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "VirtualizerTagNotSupported");
+
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ Virtualizer vrParam;
+
+ auto tag = id.get<Virtualizer::Id::commonTag>();
+ switch (tag) {
+ case Virtualizer::strengthPm: {
+ vrParam.set<Virtualizer::strengthPm>(mContext->getVirtualizerStrength());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "VirtualizerTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::virtualizer>(vrParam);
+ return ndk::ScopedAStatus::ok();
+}
+
std::shared_ptr<EffectContext> EffectBundleAidl::createContext(const Parameter::Common& common) {
if (mContext) {
LOG(DEBUG) << __func__ << " context already exist";
@@ -237,6 +360,8 @@
// Processing method running in EffectWorker thread.
IEffect::Status EffectBundleAidl::effectProcessImpl(float* in, float* out, int sampleToProcess) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
return mContext->lvmProcess(in, out, sampleToProcess);
}
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
index 9e2f656..ebf100a 100644
--- a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
@@ -50,16 +50,26 @@
ndk::ScopedAStatus commandImpl(CommandId command) override;
- std::string getEffectName() override { return "EqualizerBundle"; }
+ std::string getEffectName() override { return *mEffectName; }
private:
std::shared_ptr<BundleContext> mContext;
const Descriptor* mDescriptor;
+ const std::string* mEffectName;
lvm::BundleEffectType mType = lvm::BundleEffectType::EQUALIZER;
IEffect::Status status(binder_status_t status, size_t consumed, size_t produced);
- ndk::ScopedAStatus getParameterEqualizer(const Equalizer::Tag& tag,
+
+ ndk::ScopedAStatus setParameterBassBoost(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterBassBoost(const BassBoost::Id& id,
Parameter::Specific* specific);
+
+ ndk::ScopedAStatus setParameterEqualizer(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterEqualizer(const Equalizer::Id& id,
+ Parameter::Specific* specific);
+ ndk::ScopedAStatus setParameterVirtualizer(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterVirtualizer(const Virtualizer::Id& id,
+ Parameter::Specific* specific);
};
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index 6788b56..5e29b3f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -191,8 +191,8 @@
uint8_t key[kBlockSize],
uint8_t iv[kBlockSize],
CryptoPlugin::Mode mode,
- size_t *clearbytes,
- size_t *encryptedbytes)
+ uint32_t *clearbytes,
+ uint32_t *encryptedbytes)
{
// size needed to store all the crypto data
size_t cryptosize;
@@ -236,7 +236,7 @@
if (!meta.findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
return NULL;
}
- size_t numSubSamples = cryptedsize / sizeof(size_t);
+ size_t numSubSamples = cryptedsize / sizeof(uint32_t);
if (numSubSamples <= 0) {
ALOGE("getSampleCryptoInfo INVALID numSubSamples: %zu", numSubSamples);
@@ -285,8 +285,8 @@
(uint8_t*) key,
(uint8_t*) iv,
(CryptoPlugin::Mode)mode,
- (size_t*) cleardata,
- (size_t*) crypteddata);
+ (uint32_t*) cleardata,
+ (uint32_t*) crypteddata);
}
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
index 4360656..232638c 100644
--- a/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
+++ b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
@@ -106,8 +106,8 @@
uint8_t key[kBlockSize],
uint8_t iv[kBlockSize],
CryptoPlugin::Mode mode,
- size_t *clearbytes,
- size_t *encryptedbytes);
+ uint32_t *clearbytes,
+ uint32_t *encryptedbytes);
static CryptoInfo *getSampleCryptoInfo(MetaDataBase &meta);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d6028d9..4a5524d 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -6315,6 +6315,11 @@
flags |= OMX_BUFFERFLAG_EOS;
}
+ int32_t isDecodeOnly = 0;
+ if (buffer->meta()->findInt32("decode-only", &isDecodeOnly) && isDecodeOnly != 0) {
+ flags |= OMX_BUFFERFLAG_DECODEONLY;
+ mCodec->mDecodeOnlyTimesUs.emplace(timeUs);
+ }
size_t size = buffer->size();
size_t offset = buffer->offset();
if (buffer->base() != info->mCodecData->base()) {
@@ -6344,6 +6349,10 @@
ALOGV("[%s] calling emptyBuffer %u w/ EOS",
mCodec->mComponentName.c_str(), bufferID);
} else {
+ if (flags & OMX_BUFFERFLAG_DECODEONLY) {
+ ALOGV("[%s] calling emptyBuffer %u w/ decode only flag",
+ mCodec->mComponentName.c_str(), bufferID);
+ }
#if TRACK_BUFFER_TIMING
ALOGI("[%s] calling emptyBuffer %u w/ time %lld us",
mCodec->mComponentName.c_str(), bufferID, (long long)timeUs);
@@ -6634,6 +6643,39 @@
info->mData.clear();
+ // Workaround: if OMX_BUFFERFLAG_DECODEONLY is not implemented in
+ // HAL, the flag is then removed in the corresponding output buffer.
+
+ // for all buffers that were marked as DECODE_ONLY, remove their timestamp
+ // if it is smaller than the timestamp of the buffer that was
+ // just received
+ while (!mCodec->mDecodeOnlyTimesUs.empty() &&
+ *mCodec->mDecodeOnlyTimesUs.begin() < timeUs) {
+ mCodec->mDecodeOnlyTimesUs.erase(mCodec->mDecodeOnlyTimesUs.begin());
+ }
+ // if OMX_BUFFERFLAG_DECODEONLY is not implemented in HAL, we need to restore the
+ // OMX_BUFFERFLAG_DECODEONLY flag to the frames we had saved in the set, the set
+ // contains the timestamps of buffers that were marked as DECODE_ONLY by the app
+ if (!mCodec->mDecodeOnlyTimesUs.empty() &&
+ *mCodec->mDecodeOnlyTimesUs.begin() == timeUs) {
+ mCodec->mDecodeOnlyTimesUs.erase(timeUs);
+ // If the app queued the last valid buffer as DECODE_ONLY and queued an additional
+ // empty buffer as EOS, it's possible that HAL sets the last valid frame as EOS
+ // instead and drops the empty buffer. In such a case, we should not add back
+ // the OMX_BUFFERFLAG_DECODEONLY flag to it, as doing so will make it so that the
+ // app does not receive the EOS buffer, which breaks the contract of EOS buffers
+ if (flags & OMX_BUFFERFLAG_EOS) {
+ // Set buffer size to 0, as described by
+ // https://developer.android.com/reference/android/media/MediaCodec.BufferInfo?hl=en#size
+ // a buffer of size 0 should only be used to carry the EOS flag and should
+ // be discarded by the app as it has no data
+ buffer->setRange(0, 0);
+ } else {
+ // re-add the OMX_BUFFERFLAG_DECODEONLY flag to the buffer in case it is
+ // not the end of stream buffer
+ flags |= OMX_BUFFERFLAG_DECODEONLY;
+ }
+ }
mCodec->mBufferChannel->drainThisBuffer(info->mBufferID, flags);
info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM;
@@ -6854,6 +6896,7 @@
mCodec->mConverter[0].clear();
mCodec->mConverter[1].clear();
mCodec->mComponentName.clear();
+ mCodec->mDecodeOnlyTimesUs.clear();
}
bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) {
@@ -8839,6 +8882,7 @@
ALOGV("[%s] Now Flushing", mCodec->mComponentName.c_str());
mFlushComplete[kPortIndexInput] = mFlushComplete[kPortIndexOutput] = false;
+ mCodec->mDecodeOnlyTimesUs.clear();
// If we haven't transitioned after 3 seconds, we're probably stuck.
sp<AMessage> msg = new AMessage(ACodec::kWhatCheckIfStuck, mCodec);
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 88b15ae..c5a59ff 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -114,6 +114,10 @@
if (it->mClientBuffer->meta()->findInt32("csd", &csd)) {
it->mCodecBuffer->meta()->setInt32("csd", csd);
}
+ int32_t decodeOnly;
+ if (it->mClientBuffer->meta()->findInt32("decode-only", &decodeOnly)) {
+ it->mCodecBuffer->meta()->setInt32("decode-only", decodeOnly);
+ }
}
ALOGV("queueInputBuffer #%d", it->mBufferId);
sp<AMessage> msg = mInputBufferFilled->dup();
@@ -263,6 +267,10 @@
if (it->mClientBuffer->meta()->findInt32("csd", &csd)) {
it->mCodecBuffer->meta()->setInt32("csd", csd);
}
+ int32_t decodeOnly;
+ if (it->mClientBuffer->meta()->findInt32("decode-only", &decodeOnly)) {
+ it->mCodecBuffer->meta()->setInt32("decode-only", decodeOnly);
+ }
ALOGV("queueSecureInputBuffer #%d", it->mBufferId);
sp<AMessage> msg = mInputBufferFilled->dup();
@@ -634,6 +642,9 @@
if (omxFlags & OMX_BUFFERFLAG_EOS) {
flags |= MediaCodec::BUFFER_FLAG_EOS;
}
+ if (omxFlags & OMX_BUFFERFLAG_DECODEONLY) {
+ flags |= MediaCodec::BUFFER_FLAG_DECODE_ONLY;
+ }
it->mClientBuffer->meta()->setInt32("flags", flags);
mCallback->onOutputBufferAvailable(
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index f3ef39e..a1ada4f 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1512,6 +1512,21 @@
}
}
+bool MediaCodec::discardDecodeOnlyOutputBuffer(size_t index) {
+ Mutex::Autolock al(mBufferLock);
+ BufferInfo *info = &mPortBuffers[kPortIndexOutput][index];
+ sp<MediaCodecBuffer> buffer = info->mData;
+ int32_t flags;
+ CHECK(buffer->meta()->findInt32("flags", &flags));
+ if (flags & BUFFER_FLAG_DECODE_ONLY) {
+ info->mOwnedByClient = false;
+ info->mData.clear();
+ mBufferChannel->discardBuffer(buffer);
+ return true;
+ }
+ return false;
+}
+
// static
status_t MediaCodec::PostAndAwaitResponse(
const sp<AMessage> &msg, sp<AMessage> *response) {
@@ -3201,7 +3216,8 @@
return true;
}
-bool MediaCodec::handleDequeueOutputBuffer(const sp<AReplyToken> &replyID, bool newRequest) {
+MediaCodec::DequeueOutputResult MediaCodec::handleDequeueOutputBuffer(
+ const sp<AReplyToken> &replyID, bool newRequest) {
if (!isExecuting() || (mFlags & kFlagIsAsync)
|| (newRequest && (mFlags & kFlagDequeueOutputPending))) {
PostReplyWithError(replyID, INVALID_OPERATION);
@@ -3214,7 +3230,7 @@
sp<AMessage> response = new AMessage;
BufferInfo *info = peekNextPortBuffer(kPortIndexOutput);
if (!info) {
- return false;
+ return DequeueOutputResult::kNoBuffer;
}
// In synchronous mode, output format change should be handled
@@ -3225,10 +3241,13 @@
if (mFlags & kFlagOutputFormatChanged) {
PostReplyWithError(replyID, INFO_FORMAT_CHANGED);
mFlags &= ~kFlagOutputFormatChanged;
- return true;
+ return DequeueOutputResult::kRepliedWithError;
}
ssize_t index = dequeuePortBuffer(kPortIndexOutput);
+ if (discardDecodeOnlyOutputBuffer(index)) {
+ return DequeueOutputResult::kDiscardedBuffer;
+ }
response->setSize("index", index);
response->setSize("offset", buffer->offset());
@@ -3247,9 +3266,10 @@
statsBufferReceived(timeUs, buffer);
response->postReply(replyID);
+ return DequeueOutputResult::kSuccess;
}
- return true;
+ return DequeueOutputResult::kRepliedWithError;
}
void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
@@ -3844,11 +3864,26 @@
handleOutputFormatChangeIfNeeded(buffer);
onOutputBufferAvailable();
} else if (mFlags & kFlagDequeueOutputPending) {
- CHECK(handleDequeueOutputBuffer(mDequeueOutputReplyID));
-
- ++mDequeueOutputTimeoutGeneration;
- mFlags &= ~kFlagDequeueOutputPending;
- mDequeueOutputReplyID = 0;
+ DequeueOutputResult dequeueResult =
+ handleDequeueOutputBuffer(mDequeueOutputReplyID);
+ switch (dequeueResult) {
+ case DequeueOutputResult::kNoBuffer:
+ TRESPASS();
+ break;
+ case DequeueOutputResult::kDiscardedBuffer:
+ break;
+ case DequeueOutputResult::kRepliedWithError:
+ [[fallthrough]];
+ case DequeueOutputResult::kSuccess:
+ {
+ ++mDequeueOutputTimeoutGeneration;
+ mFlags &= ~kFlagDequeueOutputPending;
+ mDequeueOutputReplyID = 0;
+ break;
+ }
+ default:
+ TRESPASS();
+ }
} else {
postActivityNotificationIfPossible();
}
@@ -4547,27 +4582,39 @@
break;
}
- if (handleDequeueOutputBuffer(replyID, true /* new request */)) {
- break;
- }
+ DequeueOutputResult dequeueResult =
+ handleDequeueOutputBuffer(replyID, true /* new request */);
+ switch (dequeueResult) {
+ case DequeueOutputResult::kNoBuffer:
+ [[fallthrough]];
+ case DequeueOutputResult::kDiscardedBuffer:
+ {
+ int64_t timeoutUs;
+ CHECK(msg->findInt64("timeoutUs", &timeoutUs));
- int64_t timeoutUs;
- CHECK(msg->findInt64("timeoutUs", &timeoutUs));
+ if (timeoutUs == 0LL) {
+ PostReplyWithError(replyID, -EAGAIN);
+ break;
+ }
- if (timeoutUs == 0LL) {
- PostReplyWithError(replyID, -EAGAIN);
- break;
- }
+ mFlags |= kFlagDequeueOutputPending;
+ mDequeueOutputReplyID = replyID;
- mFlags |= kFlagDequeueOutputPending;
- mDequeueOutputReplyID = replyID;
-
- if (timeoutUs > 0LL) {
- sp<AMessage> timeoutMsg =
- new AMessage(kWhatDequeueOutputTimedOut, this);
- timeoutMsg->setInt32(
- "generation", ++mDequeueOutputTimeoutGeneration);
- timeoutMsg->post(timeoutUs);
+ if (timeoutUs > 0LL) {
+ sp<AMessage> timeoutMsg =
+ new AMessage(kWhatDequeueOutputTimedOut, this);
+ timeoutMsg->setInt32(
+ "generation", ++mDequeueOutputTimeoutGeneration);
+ timeoutMsg->post(timeoutUs);
+ }
+ break;
+ }
+ case DequeueOutputResult::kRepliedWithError:
+ [[fallthrough]];
+ case DequeueOutputResult::kSuccess:
+ break;
+ default:
+ TRESPASS();
}
break;
}
@@ -5229,6 +5276,7 @@
buffer->setRange(offset, size);
buffer->meta()->setInt64("timeUs", timeUs);
+
if (flags & BUFFER_FLAG_EOS) {
buffer->meta()->setInt32("eos", true);
}
@@ -5237,7 +5285,12 @@
buffer->meta()->setInt32("csd", true);
}
- if (mTunneled) {
+ bool isBufferDecodeOnly = ((flags & BUFFER_FLAG_DECODE_ONLY) != 0);
+ if (isBufferDecodeOnly) {
+ buffer->meta()->setInt32("decode-only", true);
+ }
+
+ if (mTunneled && !isBufferDecodeOnly) {
TunnelPeekState previousState = mTunnelPeekState;
switch(mTunnelPeekState){
case TunnelPeekState::kEnabledNoBuffer:
@@ -5550,6 +5603,9 @@
void MediaCodec::onOutputBufferAvailable() {
int32_t index;
while ((index = dequeuePortBuffer(kPortIndexOutput)) >= 0) {
+ if (discardDecodeOnlyOutputBuffer(index)) {
+ continue;
+ }
const sp<MediaCodecBuffer> &buffer =
mPortBuffers[kPortIndexOutput][index].mData;
sp<AMessage> msg = mCallback->dup();
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 38a4c1e..08c7917 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -17,6 +17,7 @@
#ifndef A_CODEC_H_
#define A_CODEC_H_
+#include <set>
#include <stdint.h>
#include <list>
#include <vector>
@@ -270,6 +271,7 @@
std::vector<BufferInfo> mBuffers[2];
bool mPortEOS[2];
status_t mInputEOSResult;
+ std::set<int64_t> mDecodeOnlyTimesUs;
std::list<sp<AMessage>> mDeferredQueue;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index edb3786..dbc97db 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -90,6 +90,7 @@
BUFFER_FLAG_EOS = 4,
BUFFER_FLAG_PARTIAL_FRAME = 8,
BUFFER_FLAG_MUXER_DATA = 16,
+ BUFFER_FLAG_DECODE_ONLY = 32,
};
enum CVODegree {
@@ -409,6 +410,13 @@
kBufferRendered,
};
+ enum class DequeueOutputResult {
+ kNoBuffer,
+ kDiscardedBuffer,
+ kRepliedWithError,
+ kSuccess,
+ };
+
struct ResourceManagerServiceProxy;
State mState;
@@ -555,7 +563,9 @@
sp<MediaCodecBuffer> *buffer, sp<AMessage> *format);
bool handleDequeueInputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
- bool handleDequeueOutputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
+ DequeueOutputResult handleDequeueOutputBuffer(
+ const sp<AReplyToken> &replyID,
+ bool newRequest = false);
void cancelPendingDequeueOperations();
void extractCSD(const sp<AMessage> &format);
@@ -639,6 +649,7 @@
void statsBufferSent(int64_t presentationUs, const sp<MediaCodecBuffer> &buffer);
void statsBufferReceived(int64_t presentationUs, const sp<MediaCodecBuffer> &buffer);
+ bool discardDecodeOnlyOutputBuffer(size_t index);
enum {
// the default shape of our latency histogram buckets
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 78792c5..4e9623b 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -869,6 +869,7 @@
constexpr int32_t BUFFER_FLAG_END_OF_STREAM = 4;
constexpr int32_t BUFFER_FLAG_KEY_FRAME = 1;
constexpr int32_t BUFFER_FLAG_PARTIAL_FRAME = 8;
+constexpr int32_t BUFFER_FLAG_DECODE_ONLY = 32;
constexpr int32_t BUFFER_FLAG_SYNC_FRAME = 1;
constexpr int32_t CONFIGURE_FLAG_ENCODE = 1;
constexpr int32_t CONFIGURE_FLAG_USE_BLOCK_MODEL = 2;
diff --git a/media/module/extractors/midi/MidiExtractor.cpp b/media/module/extractors/midi/MidiExtractor.cpp
index 984c76a..d0efb2f 100644
--- a/media/module/extractors/midi/MidiExtractor.cpp
+++ b/media/module/extractors/midi/MidiExtractor.cpp
@@ -327,29 +327,12 @@
bool SniffMidi(CDataSource *source, float *confidence)
{
- // look for standard prefix / magic number info in the files.
- // "MThd" for midi
- // "XMF_"
- // this will be very fast.
- //
- char hdr_magic[4];
- if (source->readAt(source->handle, 0, hdr_magic, sizeof(hdr_magic)) == sizeof(hdr_magic)) {
- if (memcmp(hdr_magic,"MThd", sizeof(hdr_magic)) == 0) {
- *confidence = 0.85;
- ALOGV("SniffMidi: yes, MThd");
- return true;
- }
- if (memcmp(hdr_magic,"XMF_", sizeof(hdr_magic)) == 0) {
- *confidence = 0.85;
- ALOGV("SniffMidi: yes, XMF_");
- return true;
- }
+ MidiEngine p(source, NULL, NULL);
+ if (p.initCheck() == OK) {
+ *confidence = 0.8;
+ ALOGV("SniffMidi: yes");
+ return true;
}
-
- // alternatives:
- // instantiate MidiEngine, (expensively) parsing the entire file to decide.
-
-
ALOGV("SniffMidi: no");
return false;
diff --git a/media/module/extractors/mp4/MPEG4Extractor.cpp b/media/module/extractors/mp4/MPEG4Extractor.cpp
index 8f8fc1b..3a5a869 100644
--- a/media/module/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/module/extractors/mp4/MPEG4Extractor.cpp
@@ -394,6 +394,15 @@
return MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
case FOURCC("mhm1"):
return MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
+ case FOURCC("dtsc"):
+ return MEDIA_MIMETYPE_AUDIO_DTS;
+ case FOURCC("dtse"):
+ case FOURCC("dtsh"):
+ return MEDIA_MIMETYPE_AUDIO_DTS_HD;
+ case FOURCC("dtsl"):
+ return MEDIA_MIMETYPE_AUDIO_DTS_HD_MA;
+ case FOURCC("dtsx"):
+ return MEDIA_MIMETYPE_AUDIO_DTS_UHD_P2;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -1804,6 +1813,11 @@
case 0x6D730055: // "ms U" mp3 audio
case FOURCC("mha1"):
case FOURCC("mhm1"):
+ case FOURCC("dtsc"):
+ case FOURCC("dtse"):
+ case FOURCC("dtsh"):
+ case FOURCC("dtsl"):
+ case FOURCC("dtsx"):
{
if (mIsQT && depth >= 1 && mPath[depth - 1] == FOURCC("wave")) {
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 48d3e6f..41d4e16 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -66,6 +66,7 @@
"av-types-aidl-cpp",
"effect-aidl-cpp",
"libaudioclient_aidl_conversion",
+ "libaudioflinger_timing",
"libaudiofoundation",
"libaudiohal",
"libaudioprocessing",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index cf7e135..d3453f5 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -194,7 +194,6 @@
BINDER_METHOD_ENTRY(restoreOutput) \
BINDER_METHOD_ENTRY(openInput) \
BINDER_METHOD_ENTRY(closeInput) \
-BINDER_METHOD_ENTRY(invalidateStream) \
BINDER_METHOD_ENTRY(setVoiceVolume) \
BINDER_METHOD_ENTRY(getRenderPosition) \
BINDER_METHOD_ENTRY(getInputFramesLost) \
@@ -232,6 +231,9 @@
BINDER_METHOD_ENTRY(setDeviceConnectedState) \
BINDER_METHOD_ENTRY(setRequestedLatencyMode) \
BINDER_METHOD_ENTRY(getSupportedLatencyModes) \
+BINDER_METHOD_ENTRY(setBluetoothVariableLatencyEnabled) \
+BINDER_METHOD_ENTRY(isBluetoothVariableLatencyEnabled) \
+BINDER_METHOD_ENTRY(supportsBluetoothVariableLatency) \
BINDER_METHOD_ENTRY(getSoundDoseInterface) \
// singleton for Binder Method Statistics for IAudioFlinger
@@ -328,7 +330,8 @@
mPatchCommandThread(sp<PatchCommandThread>::make()),
mDeviceEffectManager(sp<DeviceEffectManager>::make(*this)),
mMelReporter(sp<MelReporter>::make(*this)),
- mSystemReady(false)
+ mSystemReady(false),
+ mBluetoothLatencyModesEnabled(true)
{
// Move the audio session unique ID generator start base as time passes to limit risk of
// generating the same ID again after an audioserver restart.
@@ -1667,6 +1670,44 @@
return thread->getSupportedLatencyModes(modes);
}
+status_t AudioFlinger::setBluetoothVariableLatencyEnabled(bool enabled) {
+ Mutex::Autolock _l(mLock);
+ status_t status = INVALID_OPERATION;
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ // Success if at least one PlaybackThread supports Bluetooth latency modes
+ if (mPlaybackThreads.valueAt(i)->setBluetoothVariableLatencyEnabled(enabled) == NO_ERROR) {
+ status = NO_ERROR;
+ }
+ }
+ if (status == NO_ERROR) {
+ mBluetoothLatencyModesEnabled.store(enabled);
+ }
+ return status;
+}
+
+status_t AudioFlinger::isBluetoothVariableLatencyEnabled(bool *enabled) {
+ if (enabled == nullptr) {
+ return BAD_VALUE;
+ }
+ *enabled = mBluetoothLatencyModesEnabled.load();
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::supportsBluetoothVariableLatency(bool* support) {
+ if (support == nullptr) {
+ return BAD_VALUE;
+ }
+ Mutex::Autolock _l(mLock);
+ *support = false;
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ if (mAudioHwDevs.valueAt(i)->supportsBluetoothVariableLatency()) {
+ *support = true;
+ break;
+ }
+ }
+ return NO_ERROR;
+}
+
status_t AudioFlinger::getSoundDoseInterface(const sp<media::ISoundDoseCallback>& callback,
sp<media::ISoundDose>* soundDose) {
if (soundDose == nullptr) {
@@ -2580,6 +2621,13 @@
flags = static_cast<AudioHwDevice::Flags>(flags | AudioHwDevice::AHWD_IS_INSERT);
}
+
+ if (bool supports = false;
+ dev->supportsBluetoothVariableLatency(&supports) == NO_ERROR && supports) {
+ flags = static_cast<AudioHwDevice::Flags>(flags |
+ AudioHwDevice::AHWD_SUPPORTS_BT_LATENCY_MODES);
+ }
+
audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
AudioHwDevice *audioDevice = new AudioHwDevice(handle, name, dev, flags);
if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
@@ -2953,6 +3001,7 @@
if (thread->isMsdDevice()) {
thread->setDownStreamPatch(&patch);
}
+ thread->setBluetoothVariableLatencyEnabled(mBluetoothLatencyModesEnabled.load());
return thread;
}
}
@@ -3398,17 +3447,23 @@
closeInputFinish(thread);
}
-status_t AudioFlinger::invalidateStream(audio_stream_type_t stream)
-{
+status_t AudioFlinger::invalidateTracks(const std::vector<audio_port_handle_t> &portIds) {
Mutex::Autolock _l(mLock);
- ALOGV("invalidateStream() stream %d", stream);
+ ALOGV("%s", __func__);
+ std::set<audio_port_handle_t> portIdSet(portIds.begin(), portIds.end());
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
- thread->invalidateTracks(stream);
+ thread->invalidateTracks(portIdSet);
+ if (portIdSet.empty()) {
+ return NO_ERROR;
+ }
}
for (size_t i = 0; i < mMmapThreads.size(); i++) {
- mMmapThreads[i]->invalidateTracks(stream);
+ mMmapThreads[i]->invalidateTracks(portIdSet);
+ if (portIdSet.empty()) {
+ return NO_ERROR;
+ }
}
return NO_ERROR;
}
@@ -4607,7 +4662,6 @@
case TransactionCode::RESTORE_OUTPUT:
case TransactionCode::OPEN_INPUT:
case TransactionCode::CLOSE_INPUT:
- case TransactionCode::INVALIDATE_STREAM:
case TransactionCode::SET_VOICE_VOLUME:
case TransactionCode::MOVE_EFFECTS:
case TransactionCode::SET_EFFECT_SUSPENDED:
@@ -4622,6 +4676,7 @@
case TransactionCode::SET_DEVICE_CONNECTED_STATE:
case TransactionCode::SET_REQUESTED_LATENCY_MODE:
case TransactionCode::GET_SUPPORTED_LATENCY_MODES:
+ case TransactionCode::INVALIDATE_TRACKS:
ALOGW("%s: transaction %d received from PID %d",
__func__, code, IPCThreadState::self()->getCallingPid());
// return status only for non void methods
@@ -4650,7 +4705,10 @@
case TransactionCode::SYSTEM_READY:
case TransactionCode::SET_AUDIO_HAL_PIDS:
case TransactionCode::SET_VIBRATOR_INFOS:
- case TransactionCode::UPDATE_SECONDARY_OUTPUTS: {
+ case TransactionCode::UPDATE_SECONDARY_OUTPUTS:
+ case TransactionCode::SET_BLUETOOTH_VARIABLE_LATENCY_ENABLED:
+ case TransactionCode::IS_BLUETOOTH_VARIABLE_LATENCY_ENABLED:
+ case TransactionCode::SUPPORTS_BLUETOOTH_VARIABLE_LATENCY: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 4d3c074..6dd1cda 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -88,6 +88,7 @@
#include <audio_utils/TimestampVerifier.h>
#include <sounddose/SoundDoseManager.h>
+#include <timing/MonotonicFrameCounter.h>
#include "FastCapture.h"
#include "FastMixer.h"
@@ -209,8 +210,6 @@
virtual status_t closeInput(audio_io_handle_t input);
- virtual status_t invalidateStream(audio_stream_type_t stream);
-
virtual status_t setVoiceVolume(float volume);
virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
@@ -306,9 +305,17 @@
virtual status_t getSupportedLatencyModes(audio_io_handle_t output,
std::vector<audio_latency_mode_t>* modes);
+ virtual status_t setBluetoothVariableLatencyEnabled(bool enabled);
+
+ virtual status_t isBluetoothVariableLatencyEnabled(bool* enabled);
+
+ virtual status_t supportsBluetoothVariableLatency(bool* support);
+
virtual status_t getSoundDoseInterface(const sp<media::ISoundDoseCallback>& callback,
sp<media::ISoundDose>* soundDose);
+ status_t invalidateTracks(const std::vector<audio_port_handle_t>& portIds) override;
+
status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
const std::function<status_t()>& delegate) override;
@@ -1052,6 +1059,9 @@
/** Interface for interacting with the AudioService. */
mediautils::atomic_sp<IAudioManager> mAudioManager;
+
+ // Bluetooth Variable latency control logic is enabled or disabled
+ std::atomic_bool mBluetoothLatencyModesEnabled;
};
#undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index 8c5d239..1749f3f 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -40,6 +40,8 @@
// Means that this isn't a terminal module, and software patches
// are used to transport audio data further.
AHWD_IS_INSERT = 0x4,
+ // This Module supports BT Latency mode control
+ AHWD_SUPPORTS_BT_LATENCY_MODES = 0x8,
};
AudioHwDevice(audio_module_handle_t handle,
@@ -64,6 +66,10 @@
return (0 != (mFlags & AHWD_IS_INSERT));
}
+ bool supportsBluetoothVariableLatency() const {
+ return (0 != (mFlags & AHWD_SUPPORTS_BT_LATENCY_MODES));
+ }
+
audio_module_handle_t handle() const { return mHandle; }
const char *moduleName() const { return mModuleName; }
sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
diff --git a/services/audioflinger/MelReporter.cpp b/services/audioflinger/MelReporter.cpp
index 8cc7eab..b2e8027 100644
--- a/services/audioflinger/MelReporter.cpp
+++ b/services/audioflinger/MelReporter.cpp
@@ -27,6 +27,10 @@
namespace android {
bool AudioFlinger::MelReporter::shouldComputeMelForDeviceType(audio_devices_t device) {
+ if (mSoundDoseManager.computeCsdOnAllDevices()) {
+ return true;
+ }
+
switch (device) {
case AUDIO_DEVICE_OUT_WIRED_HEADSET:
case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
diff --git a/services/audioflinger/MelReporter.h b/services/audioflinger/MelReporter.h
index f73b3d6..b1abc59 100644
--- a/services/audioflinger/MelReporter.h
+++ b/services/audioflinger/MelReporter.h
@@ -39,7 +39,7 @@
}
/** Returns true if we should compute MEL for the given device. */
- static bool shouldComputeMelForDeviceType(audio_devices_t device);
+ bool shouldComputeMelForDeviceType(audio_devices_t device);
// For now only support internal MelReporting
[[nodiscard]] bool isHalReportingEnabled() const { return false; }
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ebbdf56..9560609 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -148,8 +148,12 @@
sp<media::VolumeHandler> getVolumeHandler() { return mVolumeHandler; }
/** Set the computed normalized final volume of the track.
* !masterMute * masterVolume * streamVolume * averageLRVolume */
- void setFinalVolume(float volume);
+ void setFinalVolume(float volumeLeft, float volumeRight);
float getFinalVolume() const { return mFinalVolume; }
+ void getFinalVolume(float* left, float* right) const {
+ *left = mFinalVolumeLeft;
+ *right = mFinalVolumeRight;
+ }
using SourceMetadatas = std::vector<playback_track_metadata_v7_t>;
using MetadataInserter = std::back_insert_iterator<SourceMetadatas>;
@@ -355,6 +359,10 @@
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
float mFinalVolume; // combine master volume, stream type volume and track volume
+ float mFinalVolumeLeft; // combine master volume, stream type volume and track
+ // volume
+ float mFinalVolumeRight; // combine master volume, stream type volume and track
+ // volume
sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 1b234ca..9598ed9 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2089,7 +2089,8 @@
mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
mLeftVolFloat(-1.0), mRightVolFloat(-1.0),
mDownStreamPatch{},
- mIsTimestampAdvancing(kMinimumTimeBetweenTimestampChecksNs)
+ mIsTimestampAdvancing(kMinimumTimeBetweenTimestampChecksNs),
+ mBluetoothLatencyModesEnabled(true)
{
snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
@@ -3571,6 +3572,28 @@
invalidateTracks_l(streamType);
}
+void AudioFlinger::PlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
+ Mutex::Autolock _l(mLock);
+ invalidateTracks_l(portIds);
+}
+
+bool AudioFlinger::PlaybackThread::invalidateTracks_l(std::set<audio_port_handle_t>& portIds) {
+ bool trackMatch = false;
+ const size_t size = mTracks.size();
+ for (size_t i = 0; i < size; i++) {
+ sp<Track> t = mTracks[i];
+ if (t->isExternalTrack() && portIds.find(t->portId()) != portIds.end()) {
+ t->invalidate();
+ portIds.erase(t->portId());
+ trackMatch = true;
+ }
+ if (portIds.empty()) {
+ break;
+ }
+ }
+ return trackMatch;
+}
+
// getTrackById_l must be called with holding thread lock
AudioFlinger::PlaybackThread::Track* AudioFlinger::PlaybackThread::getTrackById_l(
audio_port_handle_t trackPortId) {
@@ -5513,7 +5536,7 @@
vlf *= volume;
vrf *= volume;
- track->setFinalVolume((vlf + vrf) / 2.f);
+ track->setFinalVolume(vlf, vrf);
++fastTracks;
} else {
// was it previously active?
@@ -5712,7 +5735,7 @@
vaf = v * sendLevel * (1. / MAX_GAIN_INT);
}
- track->setFinalVolume((vrf + vlf) / 2.f);
+ track->setFinalVolume(vrf, vlf);
// Delegate volume control to effect in track effect chain if needed
if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
@@ -6281,11 +6304,20 @@
{
float left, right;
-
// Ensure volumeshaper state always advances even when muted.
const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
- const auto [shaperVolume, shaperActive] = track->getVolumeHandler()->getVolume(
- proxy->framesReleased());
+
+ const size_t framesReleased = proxy->framesReleased();
+ const int64_t frames = mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ const int64_t time = mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+
+ ALOGV("%s: Direct/Offload bufferConsumed:%zu timestamp frames:%lld time:%lld",
+ __func__, framesReleased, (long long)frames, (long long)time);
+
+ const int64_t volumeShaperFrames =
+ mMonotonicFrameCounter.updateAndGetMonotonicFrameCount(frames, time);
+ const auto [shaperVolume, shaperActive] =
+ track->getVolumeHandler()->getVolume(volumeShaperFrames);
mVolumeShaperActive = shaperActive;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
@@ -6320,7 +6352,7 @@
shaperVolume == 0.f});
if (lastTrack) {
- track->setFinalVolume((left + right) / 2.f);
+ track->setFinalVolume(left, right);
if (left != mLeftVolFloat || right != mRightVolFloat) {
mLeftVolFloat = left;
mRightVolFloat = right;
@@ -6767,6 +6799,7 @@
mFlushPending = false;
mTimestampVerifier.discontinuity(discontinuityForStandbyOrFlush());
mTimestamp.clear();
+ mMonotonicFrameCounter.onFlush();
}
int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
@@ -7208,6 +7241,13 @@
}
}
+void AudioFlinger::OffloadThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
+ Mutex::Autolock _l(mLock);
+ if (PlaybackThread::invalidateTracks_l(portIds)) {
+ mFlushPending = true;
+ }
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
@@ -7562,6 +7602,15 @@
return NO_ERROR;
}
+status_t AudioFlinger::PlaybackThread::setBluetoothVariableLatencyEnabled(bool enabled) {
+ if (mOutput == nullptr || mOutput->audioHwDev == nullptr
+ || !mOutput->audioHwDev->supportsBluetoothVariableLatency()) {
+ return INVALID_OPERATION;
+ }
+ mBluetoothLatencyModesEnabled.store(enabled);
+ return NO_ERROR;
+}
+
void AudioFlinger::SpatializerThread::checkOutputStageEffects()
{
bool hasVirtualizer = false;
@@ -10552,6 +10601,25 @@
}
}
+void AudioFlinger::MmapPlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds)
+{
+ Mutex::Autolock _l(mLock);
+ bool trackMatch = false;
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (portIds.find(track->portId()) != portIds.end()) {
+ track->invalidate();
+ trackMatch = true;
+ portIds.erase(track->portId());
+ }
+ if (portIds.empty()) {
+ break;
+ }
+ }
+ if (trackMatch) {
+ broadcast_l();
+ }
+}
+
void AudioFlinger::MmapPlaybackThread::processVolume_l()
{
float volume;
@@ -10794,6 +10862,8 @@
Vector<sp<Track>> *tracksToRemove) {
mixer_state result = MixerThread::prepareTracks_l(tracksToRemove);
// If there is only one active track and it is bit-perfect, enable tee buffer.
+ float volumeLeft = 1.0f;
+ float volumeRight = 1.0f;
if (mActiveTracks.size() == 1 && mActiveTracks[0]->isBitPerfect()) {
const int trackId = mActiveTracks[0]->id();
mAudioMixer->setParameter(
@@ -10801,6 +10871,7 @@
mAudioMixer->setParameter(
trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER_FRAME_COUNT,
(void *)(uintptr_t)mNormalFrameCount);
+ mActiveTracks[0]->getFinalVolume(&volumeLeft, &volumeRight);
mIsBitPerfect = true;
} else {
mIsBitPerfect = false;
@@ -10812,6 +10883,11 @@
trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER, nullptr);
}
}
+ if (mVolumeLeft != volumeLeft || mVolumeRight != volumeRight) {
+ mVolumeLeft = volumeLeft;
+ mVolumeRight = volumeRight;
+ setVolumeForOutput_l(volumeLeft, volumeRight);
+ }
return result;
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index f1b82e4..1f0f13a 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1036,7 +1036,11 @@
// called with AudioFlinger lock held
bool invalidateTracks_l(audio_stream_type_t streamType);
+ bool invalidateTracks_l(std::set<audio_port_handle_t>& portIds);
virtual void invalidateTracks(audio_stream_type_t streamType);
+ // Invalidate tracks by a set of port ids. The port id will be removed from
+ // the given set if the corresponding track is found and invalidated.
+ virtual void invalidateTracks(std::set<audio_port_handle_t>& portIds);
virtual size_t frameCount() const { return mNormalFrameCount; }
@@ -1098,6 +1102,8 @@
return INVALID_OPERATION;
}
+ virtual status_t setBluetoothVariableLatencyEnabled(bool enabled);
+
void startMelComputation(const sp<audio_utils::MelProcessor>& processor);
void stopMelComputation();
@@ -1456,6 +1462,9 @@
virtual void flushHw_l() {
mIsTimestampAdvancing.clear();
}
+
+ // Bluetooth Variable latency control logic is enabled or disabled for this thread
+ std::atomic_bool mBluetoothLatencyModesEnabled;
};
class MixerThread : public PlaybackThread {
@@ -1596,6 +1605,8 @@
virtual void onAddNewTrack_l();
const audio_offload_info_t mOffloadInfo;
+
+ audioflinger::MonotonicFrameCounter mMonotonicFrameCounter; // for VolumeShaper
bool mVolumeShaperActive = false;
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
@@ -1653,6 +1664,7 @@
virtual bool waitingAsyncCallback();
virtual bool waitingAsyncCallback_l();
virtual void invalidateTracks(audio_stream_type_t streamType);
+ void invalidateTracks(std::set<audio_port_handle_t>& portIds) override;
virtual bool keepWakeLock() const { return (mKeepWakeLock || (mDrainSequence & 1)); }
@@ -2149,6 +2161,7 @@
virtual audio_stream_type_t streamType() { return AUDIO_STREAM_DEFAULT; }
virtual void invalidateTracks(audio_stream_type_t streamType __unused) {}
+ virtual void invalidateTracks(std::set<audio_port_handle_t>& portIds __unused) {}
// Sets the UID records silence
virtual void setRecordSilenced(audio_port_handle_t portId __unused,
@@ -2228,6 +2241,7 @@
void setMasterMute_l(bool muted) { mMasterMute = muted; }
virtual void invalidateTracks(audio_stream_type_t streamType);
+ void invalidateTracks(std::set<audio_port_handle_t>& portIds) override;
virtual audio_stream_type_t streamType() { return mStreamType; }
virtual void checkSilentMode_l();
@@ -2295,4 +2309,6 @@
private:
bool mIsBitPerfect;
+ float mVolumeLeft = 0.f;
+ float mVolumeRight = 0.f;
};
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 950d555..ac863b5 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1362,25 +1362,7 @@
const sp<VolumeShaper::Configuration>& configuration,
const sp<VolumeShaper::Operation>& operation)
{
- sp<VolumeShaper::Configuration> newConfiguration;
-
- if (isOffloadedOrDirect()) {
- const VolumeShaper::Configuration::OptionFlag optionFlag
- = configuration->getOptionFlags();
- if ((optionFlag & VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME) == 0) {
- ALOGW("%s(%d): %s tracks do not support frame counted VolumeShaper,"
- " using clock time instead",
- __func__, mId,
- isOffloaded() ? "Offload" : "Direct");
- newConfiguration = new VolumeShaper::Configuration(*configuration);
- newConfiguration->setOptionFlags(
- VolumeShaper::Configuration::OptionFlag(optionFlag
- | VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME));
- }
- }
-
- VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(
- (newConfiguration.get() != nullptr ? newConfiguration : configuration), operation);
+ VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(configuration, operation);
if (isOffloadedOrDirect()) {
// Signal thread to fetch new volume.
@@ -1401,8 +1383,11 @@
return mVolumeHandler->getVolumeShaperState(id);
}
-void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
+void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volumeLeft, float volumeRight)
{
+ mFinalVolumeLeft = volumeLeft;
+ mFinalVolumeRight = volumeRight;
+ const float volume = (volumeLeft + volumeRight) * 0.5f;
if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
mFinalVolume = volume;
setMetadataHasChanged();
diff --git a/services/audioflinger/sounddose/SoundDoseManager.cpp b/services/audioflinger/sounddose/SoundDoseManager.cpp
index 46f310c..61f27cb 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.cpp
+++ b/services/audioflinger/sounddose/SoundDoseManager.cpp
@@ -117,6 +117,64 @@
return binder::Status::ok();
}
+binder::Status SoundDoseManager::SoundDose::getOutputRs2(float* value) {
+ ALOGV("%s", __func__);
+ auto soundDoseManager = mSoundDoseManager.promote();
+ if (soundDoseManager != nullptr) {
+ std::lock_guard _l(soundDoseManager->mLock);
+ *value = soundDoseManager->mRs2Value;
+ }
+ return binder::Status::ok();
+}
+
+binder::Status SoundDoseManager::SoundDose::getCsd(float* value) {
+ ALOGV("%s", __func__);
+ auto soundDoseManager = mSoundDoseManager.promote();
+ if (soundDoseManager != nullptr) {
+ *value = soundDoseManager->mMelAggregator->getCsd();
+ }
+ return binder::Status::ok();
+}
+
+binder::Status SoundDoseManager::SoundDose::forceUseFrameworkMel(bool useFrameworkMel) {
+ ALOGV("%s", __func__);
+ auto soundDoseManager = mSoundDoseManager.promote();
+ if (soundDoseManager != nullptr) {
+ soundDoseManager->setUseFrameworkMel(useFrameworkMel);
+ }
+ return binder::Status::ok();
+}
+
+binder::Status SoundDoseManager::SoundDose::forceComputeCsdOnAllDevices(
+ bool computeCsdOnAllDevices) {
+ ALOGV("%s", __func__);
+ auto soundDoseManager = mSoundDoseManager.promote();
+ if (soundDoseManager != nullptr) {
+ soundDoseManager->setComputeCsdOnAllDevices(computeCsdOnAllDevices);
+ }
+ return binder::Status::ok();
+}
+
+void SoundDoseManager::setUseFrameworkMel(bool useFrameworkMel) {
+ std::lock_guard _l(mLock);
+ mUseFrameworkMel = useFrameworkMel;
+}
+
+bool SoundDoseManager::useFrameworkMel() const {
+ std::lock_guard _l(mLock);
+ return mUseFrameworkMel;
+}
+
+void SoundDoseManager::setComputeCsdOnAllDevices(bool computeCsdOnAllDevices) {
+ std::lock_guard _l(mLock);
+ mComputeCsdOnAllDevices = computeCsdOnAllDevices;
+}
+
+bool SoundDoseManager::computeCsdOnAllDevices() const {
+ std::lock_guard _l(mLock);
+ return mComputeCsdOnAllDevices;
+}
+
void SoundDoseManager::resetSoundDose() {
std::lock_guard lock(mLock);
mSoundDose = nullptr;
diff --git a/services/audioflinger/sounddose/SoundDoseManager.h b/services/audioflinger/sounddose/SoundDoseManager.h
index b0aa5d6..eb5fa49 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.h
+++ b/services/audioflinger/sounddose/SoundDoseManager.h
@@ -82,6 +82,9 @@
// used for testing
size_t getCachedMelRecordsSize() const;
+ bool useFrameworkMel() const;
+ bool computeCsdOnAllDevices() const;
+
/** Method for converting from audio_utils::CsdRecord to media::SoundDoseRecord. */
static media::SoundDoseRecord csdRecordToSoundDoseRecord(const audio_utils::CsdRecord& legacy);
@@ -107,6 +110,10 @@
binder::Status setOutputRs2(float value) override;
binder::Status resetCsd(float currentCsd,
const std::vector<media::SoundDoseRecord>& records) override;
+ binder::Status getOutputRs2(float* value);
+ binder::Status getCsd(float* value);
+ binder::Status forceUseFrameworkMel(bool useFrameworkMel);
+ binder::Status forceComputeCsdOnAllDevices(bool computeCsdOnAllDevices);
wp<SoundDoseManager> mSoundDoseManager;
const sp<media::ISoundDoseCallback> mSoundDoseCallback;
@@ -117,7 +124,10 @@
void resetCsd(float currentCsd, const std::vector<media::SoundDoseRecord>& records);
sp<media::ISoundDoseCallback> getSoundDoseCallback() const;
-
+
+ void setUseFrameworkMel(bool useFrameworkMel);
+ void setComputeCsdOnAllDevices(bool computeCsdOnAllDevices);
+
mutable std::mutex mLock;
// no need for lock since MelAggregator is thread-safe
@@ -129,6 +139,9 @@
float mRs2Value GUARDED_BY(mLock);
sp<SoundDose> mSoundDose GUARDED_BY(mLock);
+
+ bool mUseFrameworkMel GUARDED_BY(mLock);
+ bool mComputeCsdOnAllDevices GUARDED_BY(mLock);
};
} // namespace android
diff --git a/services/audioflinger/timing/Android.bp b/services/audioflinger/timing/Android.bp
new file mode 100644
index 0000000..17ce8bd
--- /dev/null
+++ b/services/audioflinger/timing/Android.bp
@@ -0,0 +1,28 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_base_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_services_audioflinger_license"],
+}
+
+cc_library {
+ name: "libaudioflinger_timing",
+
+ host_supported: true,
+
+ srcs: [
+ "MonotonicFrameCounter.cpp",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "liblog",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+}
diff --git a/services/audioflinger/timing/MonotonicFrameCounter.cpp b/services/audioflinger/timing/MonotonicFrameCounter.cpp
new file mode 100644
index 0000000..286f549
--- /dev/null
+++ b/services/audioflinger/timing/MonotonicFrameCounter.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MonotonicFrameCounter"
+
+#include <utils/Log.h>
+#include "MonotonicFrameCounter.h"
+
+namespace android::audioflinger {
+
+int64_t MonotonicFrameCounter::updateAndGetMonotonicFrameCount(
+ int64_t newFrameCount, int64_t newTime) {
+ if (newFrameCount < 0 || newTime < 0) {
+ const auto result = getLastReportedFrameCount();
+ ALOGW("%s: invalid (frame, time) pair newFrameCount:%lld newFrameCount:%lld,"
+ " using %lld as frameCount",
+ __func__, (long long) newFrameCount, (long long)newFrameCount,
+ (long long)result);
+ return result;
+ }
+ if (newFrameCount < mLastReceivedFrameCount) {
+ const auto result = getLastReportedFrameCount();
+ ALOGW("%s: retrograde newFrameCount:%lld < mLastReceivedFrameCount:%lld,"
+ " ignoring, returning %lld as frameCount",
+ __func__, (long long) newFrameCount, (long long)mLastReceivedFrameCount,
+ (long long)result);
+ return result;
+ }
+ // Input looks fine.
+ // For better granularity, we could consider extrapolation on newTime.
+ mLastReceivedFrameCount = newFrameCount;
+ return getLastReportedFrameCount();
+}
+
+int64_t MonotonicFrameCounter::onFlush() {
+ ALOGV("%s: Updating mOffsetFrameCount:%lld with mLastReceivedFrameCount:%lld",
+ __func__, (long long)mOffsetFrameCount, (long long)mLastReceivedFrameCount);
+ mOffsetFrameCount += mLastReceivedFrameCount;
+ mLastReceivedFrameCount = 0;
+ return mOffsetFrameCount;
+}
+
+} // namespace android::audioflinger
diff --git a/services/audioflinger/timing/MonotonicFrameCounter.h b/services/audioflinger/timing/MonotonicFrameCounter.h
new file mode 100644
index 0000000..0ea9510
--- /dev/null
+++ b/services/audioflinger/timing/MonotonicFrameCounter.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+namespace android::audioflinger {
+
+/**
+ * MonotonicFrameCounter
+ *
+ * Advances a monotonic frame count based on input timestamp pairs (frames, time).
+ * It takes into account a possible flush, which will "reset" the frames to 0.
+ *
+ * This class is used to drive VolumeShaper volume automation.
+ *
+ * The timestamps provided in updateAndGetMonotonicFrameCount should
+ * be of sufficient granularity for the purpose at hand. Currently no temporal
+ * extrapolation is done.
+ *
+ * This class is not thread safe.
+ */
+class MonotonicFrameCounter {
+public:
+ /**
+ * Receives a new timestamp pair (frames, time) and returns a monotonic frameCount.
+ *
+ * \param newFrameCount the frameCount currently played.
+ * \param newTime the time corresponding to the frameCount.
+ * \return a monotonic frame count usable for automation timing.
+ */
+ int64_t updateAndGetMonotonicFrameCount(int64_t newFrameCount, int64_t newTime);
+
+ /**
+ * Notifies when a flush occurs, whereupon the received frameCount sequence restarts at 0.
+ *
+ * \return the last reported frameCount.
+ */
+ int64_t onFlush();
+
+ /**
+ * Returns the received (input) frameCount to reported (output) frameCount offset.
+ *
+ * This offset is sufficient to ensure monotonicity after flush is called,
+ * suitability for any other purpose is *not* guaranteed.
+ */
+ int64_t getOffsetFrameCount() const { return mOffsetFrameCount; }
+
+ /**
+ * Returns the last received frameCount.
+ */
+ int64_t getLastReceivedFrameCount() const {
+ return mLastReceivedFrameCount;
+ }
+
+ /**
+ * Returns the last reported frameCount from updateAndGetMonotonicFrameCount().
+ */
+ int64_t getLastReportedFrameCount() const {
+ // This is consistent after onFlush().
+ return mOffsetFrameCount + mLastReceivedFrameCount;
+ }
+
+private:
+ int64_t mOffsetFrameCount = 0;
+ int64_t mLastReceivedFrameCount = 0;
+};
+
+} // namespace android::audioflinger
diff --git a/services/audioflinger/timing/tests/Android.bp b/services/audioflinger/timing/tests/Android.bp
new file mode 100644
index 0000000..29267a6
--- /dev/null
+++ b/services/audioflinger/timing/tests/Android.bp
@@ -0,0 +1,29 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_base_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_services_audioflinger_license"],
+}
+
+cc_test {
+ name: "monotonicframecounter_tests",
+
+ host_supported: true,
+
+ srcs: [
+ "monotonicframecounter_tests.cpp"
+ ],
+
+ static_libs: [
+ "libaudioflinger_timing",
+ "liblog",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+}
\ No newline at end of file
diff --git a/services/audioflinger/timing/tests/monotonicframecounter_tests.cpp b/services/audioflinger/timing/tests/monotonicframecounter_tests.cpp
new file mode 100644
index 0000000..7aaa4fa
--- /dev/null
+++ b/services/audioflinger/timing/tests/monotonicframecounter_tests.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "monotonicframecounter_tests"
+
+#include "../MonotonicFrameCounter.h"
+
+#include <gtest/gtest.h>
+
+using namespace android::audioflinger;
+
+namespace {
+
+TEST(MonotonicFrameCounterTest, SimpleProgression) {
+ MonotonicFrameCounter monotonicFrameCounter;
+
+ const std::vector<std::pair<int64_t, int64_t>> frametimes{
+ {0, 0}, {100, 100}, {200, 200},
+ };
+
+ int64_t maxReceivedFrameCount = 0;
+ for (const auto& p : frametimes) {
+ maxReceivedFrameCount = std::max(maxReceivedFrameCount, p.first);
+ ASSERT_EQ(p.first,
+ monotonicFrameCounter.updateAndGetMonotonicFrameCount(p.first, p.second));
+ }
+ ASSERT_EQ(maxReceivedFrameCount, monotonicFrameCounter.getLastReportedFrameCount());
+}
+
+TEST(MonotonicFrameCounterTest, InvalidData) {
+ MonotonicFrameCounter monotonicFrameCounter;
+
+ const std::vector<std::pair<int64_t, int64_t>> frametimes{
+ {-1, -1}, {100, 100}, {-1, -1}, {90, 90}, {200, 200},
+ };
+
+ int64_t prevFrameCount = 0;
+ int64_t maxReceivedFrameCount = 0;
+ for (const auto& p : frametimes) {
+ maxReceivedFrameCount = std::max(maxReceivedFrameCount, p.first);
+ const int64_t frameCount =
+ monotonicFrameCounter.updateAndGetMonotonicFrameCount(p.first, p.second);
+ // we must be monotonic
+ ASSERT_GE(frameCount, prevFrameCount);
+ prevFrameCount = frameCount;
+ }
+ ASSERT_EQ(maxReceivedFrameCount, monotonicFrameCounter.getLastReportedFrameCount());
+}
+
+TEST(MonotonicFrameCounterTest, Flush) {
+ MonotonicFrameCounter monotonicFrameCounter;
+
+ // Different playback sequences are separated by a flush.
+ const std::vector<std::vector<std::pair<int64_t, int64_t>>> frameset{
+ {{-1, -1}, {100, 10}, {200, 20}, {300, 30},},
+ {{-1, -1}, {100, 10}, {200, 20}, {300, 30},},
+ {{-1, -1}, {100, 100}, {-1, -1}, {90, 90}, {200, 200},},
+ };
+
+ int64_t prevFrameCount = 0;
+ int64_t maxReceivedFrameCount = 0;
+ int64_t sumMaxReceivedFrameCount = 0;
+ for (const auto& v : frameset) {
+ for (const auto& p : v) {
+ maxReceivedFrameCount = std::max(maxReceivedFrameCount, p.first);
+ const int64_t frameCount =
+ monotonicFrameCounter.updateAndGetMonotonicFrameCount(p.first, p.second);
+ // we must be monotonic
+ ASSERT_GE(frameCount, prevFrameCount);
+ prevFrameCount = frameCount;
+ }
+ monotonicFrameCounter.onFlush();
+ sumMaxReceivedFrameCount += maxReceivedFrameCount;
+ maxReceivedFrameCount = 0;
+ }
+
+ // On flush we keep a monotonic reported framecount
+ // even though the received framecount resets to 0.
+ // The requirement of equality here is implementation dependent.
+ ASSERT_EQ(sumMaxReceivedFrameCount, monotonicFrameCounter.getLastReportedFrameCount());
+}
+
+} // namespace
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index a5fa78b..520bad2 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -137,7 +137,7 @@
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
- const AttributionSourceState& attributionSouce,
+ const AttributionSourceState& attributionSource,
audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
@@ -160,7 +160,7 @@
audio_io_handle_t *input,
audio_unique_id_t riid,
audio_session_t session,
- const AttributionSourceState& attributionSouce,
+ const AttributionSourceState& attributionSource,
audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
@@ -492,9 +492,6 @@
virtual status_t setStreamVolume(audio_stream_type_t stream, float volume,
audio_io_handle_t output, int delayMs = 0) = 0;
- // invalidate a stream type, causing a reroute to an unspecified new output
- virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
-
// function enabling to send proprietary informations directly from audio policy manager to
// audio hardware interface.
virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs,
@@ -564,6 +561,8 @@
const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
virtual status_t setDeviceConnectedState(const struct audio_port_v7 *port, bool connected) = 0;
+
+ virtual status_t invalidateTracks(const std::vector<audio_port_handle_t>& portIds) = 0;
};
// These are the signatures of createAudioPolicyManager/destroyAudioPolicyManager
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index c513098..52a000f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -442,6 +442,8 @@
bool isConfigurationMatched(const audio_config_base_t& config, audio_output_flags_t flags);
+ PortHandleVector getClientsForStream(audio_stream_type_t streamType) const;
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index c7296e9..a46186b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -947,6 +947,17 @@
&& mFormat == config.format;
}
+PortHandleVector SwAudioOutputDescriptor::getClientsForStream(
+ audio_stream_type_t streamType) const {
+ PortHandleVector clientsForStream;
+ for (const auto& client : getClientIterable()) {
+ if (client->stream() == streamType) {
+ clientsForStream.push_back(client->portId());
+ }
+ }
+ return clientsForStream;
+}
+
void SwAudioOutputCollection::dump(String8 *dst) const
{
dst->appendFormat("\n Outputs (%zu):\n", size());
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 092319c..1d4eb1e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -68,16 +68,6 @@
// media / notification / system volume.
constexpr float IN_CALL_EARPIECE_HEADROOM_DB = 3.f;
-// Compressed formats for MSD module, ordered from most preferred to least preferred.
-static const std::vector<audio_format_t> msdCompressedFormatsOrder = {{
- AUDIO_FORMAT_IEC60958, AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
- AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
-// Channel masks for MSD module, 3D > 2D > 1D ordering (most preferred to least preferred).
-static const std::vector<audio_channel_mask_t> msdSurroundChannelMasksOrder = {{
- AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
- AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
- AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
-
template <typename T>
bool operator== (const SortedVector<T> &left, const SortedVector<T> &right)
{
@@ -114,7 +104,7 @@
const char* device_address,
const char* device_name,
audio_format_t encodedFormat) {
- media::AudioPort aidlPort;
+ media::AudioPortFw aidlPort;
if (status_t status = deviceToAudioPort(device, device_address, device_name, &aidlPort);
status == OK) {
return setDeviceConnectionState(state, aidlPort.hal, encodedFormat);
@@ -172,7 +162,7 @@
const char* device_address,
const char* device_name,
audio_format_t encodedFormat) {
- media::AudioPort aidlPort;
+ media::AudioPortFw aidlPort;
if (status_t status = deviceToAudioPort(deviceType, device_address, device_name, &aidlPort);
status == OK) {
return setDeviceConnectionStateInt(state, aidlPort.hal, encodedFormat);
@@ -460,7 +450,7 @@
status_t AudioPolicyManager::deviceToAudioPort(audio_devices_t device, const char* device_address,
const char* device_name,
- media::AudioPort* aidlPort) {
+ media::AudioPortFw* aidlPort) {
DeviceDescriptorBase devDescr(device, device_address);
devDescr.setName(device_name);
return devDescr.writeToParcelable(aidlPort);
@@ -830,7 +820,7 @@
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
// force reevaluating accessibility routing when call stops
- mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
+ invalidateStreams({AUDIO_STREAM_ACCESSIBILITY});
}
/**
@@ -913,7 +903,7 @@
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
// force reevaluating accessibility routing when call starts
- mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
+ invalidateStreams({AUDIO_STREAM_ACCESSIBILITY});
}
// Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
@@ -946,8 +936,7 @@
// force client reconnection to reevaluate flag AUDIO_FLAG_AUDIBILITY_ENFORCED
if (usage == AUDIO_POLICY_FORCE_FOR_SYSTEM) {
- mpClientInterface->invalidateStream(AUDIO_STREAM_SYSTEM);
- mpClientInterface->invalidateStream(AUDIO_STREAM_ENFORCED_AUDIBLE);
+ invalidateStreams({AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE});
}
//FIXME: workaround for truncated touch sounds
@@ -1712,10 +1701,28 @@
const AudioProfileVector &sourceProfiles, const AudioProfileVector &sinkProfiles,
audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
{
+ // Compressed formats for MSD module, ordered from most preferred to least preferred.
+ static const std::vector<audio_format_t> formatsOrder = {{
+ AUDIO_FORMAT_IEC60958, AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
+ AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
+ static const std::vector<audio_channel_mask_t> channelMasksOrder = [](){
+ // Channel position masks for MSD module, 3D > 2D > 1D ordering (most preferred to least
+ // preferred).
+ std::vector<audio_channel_mask_t> masks = {{
+ AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
+ AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
+ AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
+ // insert index masks (higher counts most preferred) as preferred over position masks
+ for (int i = 1; i <= AUDIO_CHANNEL_COUNT_MAX; i++) {
+ masks.insert(
+ masks.begin(), audio_channel_mask_for_index_assignment_from_count(i));
+ }
+ return masks;
+ }();
+
struct audio_config_base bestSinkConfig;
- status_t result = findBestMatchingOutputConfig(sourceProfiles, sinkProfiles,
- msdCompressedFormatsOrder, msdSurroundChannelMasksOrder,
- true /*preferHigherSamplingRates*/, bestSinkConfig);
+ status_t result = findBestMatchingOutputConfig(sourceProfiles, sinkProfiles, formatsOrder,
+ channelMasksOrder, true /*preferHigherSamplingRates*/, bestSinkConfig);
if (result != NO_ERROR) {
ALOGD("%s() no matching config found for sink, hwAvSync: %d",
__func__, hwAvSync);
@@ -1737,7 +1744,10 @@
}
sourceConfig->sample_rate = bestSinkConfig.sample_rate;
// Specify exact channel mask to prevent guessing by bit count in PatchPanel.
- sourceConfig->channel_mask = audio_channel_mask_out_to_in(bestSinkConfig.channel_mask);
+ sourceConfig->channel_mask =
+ audio_channel_mask_get_representation(bestSinkConfig.channel_mask)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX ?
+ bestSinkConfig.channel_mask : audio_channel_mask_out_to_in(bestSinkConfig.channel_mask);
sourceConfig->format = bestSinkConfig.format;
// Copy input stream directly without any processing (e.g. resampling).
sourceConfig->flags.input = static_cast<audio_input_flags_t>(
@@ -2287,7 +2297,7 @@
// force reevaluating accessibility routing when ringtone or alarm starts
if (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_ALARM))) {
- mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
+ invalidateStreams({AUDIO_STREAM_ACCESSIBILITY});
}
if (waitMs > muteWaitMs) {
@@ -5172,9 +5182,7 @@
// invalidate all tracks in this strategy to force re connection.
// Otherwise select new device on the output mix.
if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
- for (auto stream : mEngine->getStreamTypesForProductStrategy(ps)) {
- mpClientInterface->invalidateStream(stream);
- }
+ invalidateStreams(mEngine->getStreamTypesForProductStrategy(ps));
} else {
DeviceVector newDevices = getNewOutputDevices(outputDesc, false /*fromCache*/);
if (outputDesc->mUsePreferredMixerAttributes && outputDesc->devices() != newDevices) {
@@ -5742,9 +5750,7 @@
}
}
- for (audio_stream_type_t stream : streamsToInvalidate) {
- mpClientInterface->invalidateStream(stream);
- }
+ invalidateStreams(StreamTypeVector(streamsToInvalidate.begin(), streamsToInvalidate.end()));
}
@@ -6758,9 +6764,7 @@
}
// Move tracks associated to this stream (and linked) from previous output to new output
if (!invalidatedOutputs.empty()) {
- for (auto stream : mEngine->getStreamTypesForProductStrategy(psId)) {
- mpClientInterface->invalidateStream(stream);
- }
+ invalidateStreams(mEngine->getStreamTypesForProductStrategy(psId));
for (sp<SwAudioOutputDescriptor> desc : invalidatedOutputs) {
desc->setTracksInvalidatedStatusByStrategy(psId);
}
@@ -6778,7 +6782,7 @@
}
void AudioPolicyManager::checkSecondaryOutputs() {
- std::set<audio_stream_type_t> streamsToInvalidate;
+ PortHandleVector clientsToInvalidate;
TrackSecondaryOutputsMap trackSecondaryOutputs;
for (size_t i = 0; i < mOutputs.size(); i++) {
const sp<SwAudioOutputDescriptor>& outputDescriptor = mOutputs[i];
@@ -6796,8 +6800,11 @@
}
}
- if (status != OK) {
- streamsToInvalidate.insert(client->stream());
+ if (status != OK &&
+ (client->flags() & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == AUDIO_OUTPUT_FLAG_NONE) {
+ // When it failed to query secondary output, only invalidate the client that is not
+ // MMAP. The reason is that MMAP stream will not support secondary output.
+ clientsToInvalidate.push_back(client->portId());
} else if (!std::equal(
client->getSecondaryOutputs().begin(),
client->getSecondaryOutputs().end(),
@@ -6805,7 +6812,7 @@
if (!audio_is_linear_pcm(client->config().format)) {
// If the format is not PCM, the tracks should be invalidated to get correct
// behavior when the secondary output is changed.
- streamsToInvalidate.insert(client->stream());
+ clientsToInvalidate.push_back(client->portId());
} else {
std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
std::vector<audio_io_handle_t> secondaryOutputIds;
@@ -6822,9 +6829,9 @@
if (!trackSecondaryOutputs.empty()) {
mpClientInterface->updateSecondaryOutputs(trackSecondaryOutputs);
}
- for (audio_stream_type_t stream : streamsToInvalidate) {
- ALOGD("%s Invalidate stream %d due to fail getting output for attr", __func__, stream);
- mpClientInterface->invalidateStream(stream);
+ if (!clientsToInvalidate.empty()) {
+ ALOGD("%s Invalidate clients due to fail getting output for attr", __func__);
+ mpClientInterface->invalidateTracks(clientsToInvalidate);
}
}
@@ -8321,4 +8328,23 @@
}
}
+PortHandleVector AudioPolicyManager::getClientsForStream(
+ audio_stream_type_t streamType) const {
+ PortHandleVector clients;
+ for (size_t i = 0; i < mOutputs.size(); ++i) {
+ PortHandleVector clientsForStream = mOutputs.valueAt(i)->getClientsForStream(streamType);
+ clients.insert(clients.end(), clientsForStream.begin(), clientsForStream.end());
+ }
+ return clients;
+}
+
+void AudioPolicyManager::invalidateStreams(StreamTypeVector streams) const {
+ PortHandleVector clients;
+ for (auto stream : streams) {
+ PortHandleVector clientsForStream = getClientsForStream(stream);
+ clients.insert(clients.end(), clientsForStream.begin(), clientsForStream.end());
+ }
+ mpClientInterface->invalidateTracks(clients);
+}
+
} // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 1ada3cc..885f7c6 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -1035,7 +1035,7 @@
// Called by setDeviceConnectionState()
status_t deviceToAudioPort(audio_devices_t deviceType, const char* device_address,
- const char* device_name, media::AudioPort* aidPort);
+ const char* device_name, media::AudioPortFw* aidPort);
bool isMsdPatch(const audio_patch_handle_t &handle) const;
private:
@@ -1305,6 +1305,7 @@
sp<PreferredMixerAttributesInfo> getPreferredMixerAttributesInfo(
audio_port_handle_t devicePortId, product_strategy_t strategy);
+
sp<SwAudioOutputDescriptor> reopenOutput(
sp<SwAudioOutputDescriptor> outputDesc,
const audio_config_t *config,
@@ -1313,6 +1314,9 @@
void reopenOutputsWithDevices(
const std::map<audio_io_handle_t, DeviceVector>& outputsToReopen);
+
+ PortHandleVector getClientsForStream(audio_stream_type_t streamType) const;
+ void invalidateStreams(StreamTypeVector streams) const;
};
};
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index 4c19d40..10403fa 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -36,6 +36,7 @@
"libaudiohal",
"libaudiopolicy",
"libaudiopolicymanagerdefault",
+ "libaudiousecasevalidation",
"libaudioutils",
"libbinder",
"libcutils",
@@ -85,6 +86,7 @@
export_shared_lib_headers: [
"libactivitymanager_aidl",
+ "libaudiousecasevalidation",
"libheadtracking",
"libheadtracking-binding",
"libsensorprivacy",
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index c766a15..1bb89df 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -69,6 +69,12 @@
*halConfig = VALUE_OR_RETURN_STATUS(
aidl2legacy_AudioConfig_audio_config_t(response.config, false /*isInput*/));
*latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(response.latencyMs));
+
+ audio_config_base_t config = {.sample_rate = halConfig->sample_rate,
+ .channel_mask = halConfig->channel_mask,
+ .format = halConfig->format,
+ };
+ mAudioPolicyService->registerOutput(*output, config, flags);
}
return status;
}
@@ -91,7 +97,7 @@
if (af == 0) {
return PERMISSION_DENIED;
}
-
+ mAudioPolicyService->unregisterOutput(output);
return af->closeOutput(output);
}
@@ -168,16 +174,6 @@
delay_ms);
}
-status_t AudioPolicyService::AudioPolicyClient::invalidateStream(audio_stream_type_t stream)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->invalidateStream(stream);
-}
-
void AudioPolicyService::AudioPolicyClient::setParameters(audio_io_handle_t io_handle,
const String8& keyValuePairs,
int delay_ms)
@@ -322,5 +318,15 @@
return af->setDeviceConnectedState(port, connected);
}
+status_t AudioPolicyService::AudioPolicyClient::invalidateTracks(
+ const std::vector<audio_port_handle_t>& portIds) {
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ return PERMISSION_DENIED;
+ }
+
+ return af->invalidateTracks(portIds);
+}
+
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 4eb5336..5c32209 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -417,6 +417,9 @@
}
if (result == NO_ERROR) {
+ attr = VALUE_OR_RETURN_BINDER_STATUS(
+ mUsecaseValidator->verifyAudioAttributes(output, attributionSource, attr));
+
sp<AudioPlaybackClient> client =
new AudioPlaybackClient(attr, output, attributionSource, session,
portId, selectedDeviceId, stream, isSpatialized);
@@ -435,6 +438,8 @@
legacy2aidl_audio_io_handle_t_int32_t));
_aidl_return->isSpatialized = isSpatialized;
_aidl_return->isBitPerfect = isBitPerfect;
+ _aidl_return->attr = VALUE_OR_RETURN_BINDER_STATUS(
+ legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
} else {
_aidl_return->configBase.format = VALUE_OR_RETURN_BINDER_STATUS(
legacy2aidl_audio_format_t_AudioFormatDescription(config.format));
@@ -486,6 +491,10 @@
AutoCallerClear acc;
status_t status = mAudioPolicyManager->startOutput(portId);
if (status == NO_ERROR) {
+ //TODO b/257922898: decide if/how we need to handle attributes update when playback starts
+ // or during playback
+ (void)mUsecaseValidator->startClient(client->io, client->portId, client->attributionSource,
+ client->attributes, nullptr /* callback */);
client->active = true;
onUpdateActiveSpatializerTracks_l();
}
@@ -526,6 +535,7 @@
if (status == NO_ERROR) {
client->active = false;
onUpdateActiveSpatializerTracks_l();
+ mUsecaseValidator->stopClient(client->io, client->portId);
}
return status;
}
@@ -1497,7 +1507,7 @@
Status AudioPolicyService::listAudioPorts(media::AudioPortRole roleAidl,
media::AudioPortType typeAidl, Int* count,
- std::vector<media::AudioPort>* portsAidl,
+ std::vector<media::AudioPortFw>* portsAidl,
int32_t* _aidl_return) {
audio_port_role_t role = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioPortRole_audio_port_role_t(roleAidl));
@@ -1529,7 +1539,7 @@
}
Status AudioPolicyService::getAudioPort(int portId,
- media::AudioPort* _aidl_return) {
+ media::AudioPortFw* _aidl_return) {
audio_port_v7 port{ .id = portId };
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager == NULL) {
@@ -1541,7 +1551,8 @@
return Status::ok();
}
-Status AudioPolicyService::createAudioPatch(const media::AudioPatch& patchAidl, int32_t handleAidl,
+Status AudioPolicyService::createAudioPatch(const media::AudioPatchFw& patchAidl,
+ int32_t handleAidl,
int32_t* _aidl_return) {
audio_patch patch = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioPatch_audio_patch(patchAidl));
@@ -1582,7 +1593,7 @@
}
Status AudioPolicyService::listAudioPatches(Int* count,
- std::vector<media::AudioPatch>* patchesAidl,
+ std::vector<media::AudioPatchFw>* patchesAidl,
int32_t* _aidl_return) {
unsigned int num_patches = VALUE_OR_RETURN_BINDER_STATUS(
convertIntegral<unsigned int>(count->value));
@@ -1609,7 +1620,7 @@
return Status::ok();
}
-Status AudioPolicyService::setAudioPortConfig(const media::AudioPortConfig& configAidl)
+Status AudioPolicyService::setAudioPortConfig(const media::AudioPortConfigFw& configAidl)
{
audio_port_config config = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioPortConfig_audio_port_config(configAidl));
@@ -1782,7 +1793,7 @@
return binderStatusFromStatusT(mAudioPolicyManager->removeUserIdDeviceAffinities(userId));
}
-Status AudioPolicyService::startAudioSource(const media::AudioPortConfig& sourceAidl,
+Status AudioPolicyService::startAudioSource(const media::AudioPortConfigFw& sourceAidl,
const media::AudioAttributesInternal& attributesAidl,
int32_t* _aidl_return) {
audio_port_config source = VALUE_OR_RETURN_BINDER_STATUS(
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 48997db..2be5121 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -205,7 +205,8 @@
mPhoneState(AUDIO_MODE_INVALID),
mCaptureStateNotifier(false),
mCreateAudioPolicyManager(createAudioPolicyManager),
- mDestroyAudioPolicyManager(destroyAudioPolicyManager) {
+ mDestroyAudioPolicyManager(destroyAudioPolicyManager),
+ mUsecaseValidator(media::createUsecaseValidator()) {
setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
}
@@ -1537,6 +1538,16 @@
" help print this message\n");
}
+status_t AudioPolicyService::registerOutput(audio_io_handle_t output,
+ const audio_config_base_t& config,
+ const audio_output_flags_t flags) {
+ return mUsecaseValidator->registerStream(output, config, flags);
+}
+
+status_t AudioPolicyService::unregisterOutput(audio_io_handle_t output) {
+ return mUsecaseValidator->unregisterStream(output);
+}
+
// ----------- AudioPolicyService::UidPolicy implementation ----------
void AudioPolicyService::UidPolicy::registerSelf() {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 58af46d..50f2180 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -36,6 +36,7 @@
#include <media/ToneGenerator.h>
#include <media/AudioEffect.h>
#include <media/AudioPolicy.h>
+#include <media/UsecaseValidator.h>
#include <mediautils/ServiceUtilities.h>
#include "AudioPolicyEffects.h"
#include "CaptureStateNotifier.h"
@@ -172,16 +173,16 @@
const media::AudioAttributesInternal& attributes,
bool* _aidl_return) override;
binder::Status listAudioPorts(media::AudioPortRole role, media::AudioPortType type,
- Int* count, std::vector<media::AudioPort>* ports,
+ Int* count, std::vector<media::AudioPortFw>* ports,
int32_t* _aidl_return) override;
binder::Status getAudioPort(int portId,
- media::AudioPort* _aidl_return) override;
- binder::Status createAudioPatch(const media::AudioPatch& patch, int32_t handle,
+ media::AudioPortFw* _aidl_return) override;
+ binder::Status createAudioPatch(const media::AudioPatchFw& patch, int32_t handle,
int32_t* _aidl_return) override;
binder::Status releaseAudioPatch(int32_t handle) override;
- binder::Status listAudioPatches(Int* count, std::vector<media::AudioPatch>* patches,
+ binder::Status listAudioPatches(Int* count, std::vector<media::AudioPatchFw>* patches,
int32_t* _aidl_return) override;
- binder::Status setAudioPortConfig(const media::AudioPortConfig& config) override;
+ binder::Status setAudioPortConfig(const media::AudioPortConfigFw& config) override;
binder::Status registerClient(const sp<media::IAudioPolicyServiceClient>& client) override;
binder::Status setAudioPortCallbacksEnabled(bool enabled) override;
binder::Status setAudioVolumeGroupCallbacksEnabled(bool enabled) override;
@@ -197,7 +198,7 @@
int32_t userId,
const std::vector<AudioDevice>& devices) override;
binder::Status removeUserIdDeviceAffinities(int32_t userId) override;
- binder::Status startAudioSource(const media::AudioPortConfig& source,
+ binder::Status startAudioSource(const media::AudioPortConfigFw& source,
const media::AudioAttributesInternal& attributes,
int32_t* _aidl_return) override;
binder::Status stopAudioSource(int32_t portId) override;
@@ -431,6 +432,11 @@
*/
static bool isAppOpSource(audio_source_t source);
+ status_t registerOutput(audio_io_handle_t output,
+ const audio_config_base_t& config,
+ const audio_output_flags_t flags);
+ status_t unregisterOutput(audio_io_handle_t output);
+
// If recording we need to make sure the UID is allowed to do that. If the UID is idle
// then it cannot record and gets buffers with zeros - silence. As soon as the UID
// transitions to an active state we will start reporting buffers with data. This approach
@@ -776,9 +782,6 @@
// for each output (destination device) it is attached to.
virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0);
- // invalidate a stream type, causing a reroute to an unspecified new output
- virtual status_t invalidateStream(audio_stream_type_t stream);
-
// function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0);
// function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
@@ -838,6 +841,8 @@
status_t setDeviceConnectedState(
const struct audio_port_v7 *port, bool connected) override;
+ status_t invalidateTracks(const std::vector<audio_port_handle_t>& portIds) override;
+
private:
AudioPolicyService *mAudioPolicyService;
};
@@ -903,7 +908,7 @@
const audio_attributes_t attributes; // source, flags ...
const audio_io_handle_t io; // audio HAL stream IO handle
- const AttributionSourceState& attributionSource; //client attributionsource
+ const AttributionSourceState attributionSource; //client attributionsource
const audio_session_t session; // audio session ID
const audio_port_handle_t portId;
const audio_port_handle_t deviceId; // selected input device port ID
@@ -1082,6 +1087,7 @@
void *mLibraryHandle = nullptr;
CreateAudioPolicyManagerInstance mCreateAudioPolicyManager;
DestroyAudioPolicyManagerInstance mDestroyAudioPolicyManager;
+ std::unique_ptr<media::UsecaseValidator> mUsecaseValidator;
};
} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 8a85fee..0c04e35 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -54,7 +54,6 @@
float /*volume*/,
audio_io_handle_t /*output*/,
int /*delayMs*/) override { return NO_INIT; }
- status_t invalidateStream(audio_stream_type_t /*stream*/) override { return NO_INIT; }
void setParameters(audio_io_handle_t /*ioHandle*/,
const String8& /*keyValuePairs*/,
int /*delayMs*/) override { }
@@ -101,6 +100,9 @@
const struct audio_port_v7 *port __unused, bool connected __unused) override {
return NO_INIT;
}
+ status_t invalidateTracks(const std::vector<audio_port_handle_t>& /*portIds*/) override {
+ return NO_INIT;
+ }
};
} // namespace android
diff --git a/services/audiopolicy/tests/audio_health_tests.cpp b/services/audiopolicy/tests/audio_health_tests.cpp
index 10f8dc0..798332c 100644
--- a/services/audiopolicy/tests/audio_health_tests.cpp
+++ b/services/audiopolicy/tests/audio_health_tests.cpp
@@ -111,7 +111,7 @@
continue;
}
std::string address = "11:22:33:44:55:66";
- media::AudioPort aidlPort;
+ media::AudioPortFw aidlPort;
ASSERT_EQ(OK, manager.deviceToAudioPort(device->type(), address.c_str(), "" /*name*/,
&aidlPort));
ASSERT_EQ(AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 386a67e..7f5c7a5 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -461,7 +461,7 @@
sp<AudioProfile> ac3OutputProfile = new AudioProfile(
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, k48000SamplingRate);
sp<AudioProfile> iec958OutputProfile = new AudioProfile(
- AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_OUT_STEREO, k48000SamplingRate);
+ AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_INDEX_MASK_24, k48000SamplingRate);
mMsdOutputDevice->addAudioProfile(pcmOutputProfile);
mMsdOutputDevice->addAudioProfile(ac3OutputProfile);
mMsdOutputDevice->addAudioProfile(iec958OutputProfile);
@@ -534,7 +534,7 @@
// Add HDMI input device with IEC60958 profile for HDMI in -> MSD patching.
mHdmiInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_HDMI);
sp<AudioProfile> iec958InputProfile = new AudioProfile(
- AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate);
+ AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_INDEX_MASK_24, k48000SamplingRate);
mHdmiInputDevice->addAudioProfile(iec958InputProfile);
config.addDevice(mHdmiInputDevice);
sp<InputProfile> hdmiInputProfile = new InputProfile("hdmi input");
@@ -692,8 +692,8 @@
ASSERT_EQ(AUDIO_PORT_ROLE_SINK, patch->mPatch.sinks[0].role);
ASSERT_EQ(AUDIO_FORMAT_IEC60958, patch->mPatch.sources[0].format);
ASSERT_EQ(AUDIO_FORMAT_IEC60958, patch->mPatch.sinks[0].format);
- ASSERT_EQ(AUDIO_CHANNEL_IN_STEREO, patch->mPatch.sources[0].channel_mask);
- ASSERT_EQ(AUDIO_CHANNEL_OUT_STEREO, patch->mPatch.sinks[0].channel_mask);
+ ASSERT_EQ(AUDIO_CHANNEL_INDEX_MASK_24, patch->mPatch.sources[0].channel_mask);
+ ASSERT_EQ(AUDIO_CHANNEL_INDEX_MASK_24, patch->mPatch.sinks[0].channel_mask);
ASSERT_EQ(k48000SamplingRate, patch->mPatch.sources[0].sample_rate);
ASSERT_EQ(k48000SamplingRate, patch->mPatch.sinks[0].sample_rate);
ASSERT_EQ(1, patchCount.deltaFromSnapshot());
@@ -769,7 +769,7 @@
audio_config_base_t msdDirectConfig2 = AUDIO_CONFIG_BASE_INITIALIZER;
msdDirectConfig2.format = AUDIO_FORMAT_IEC60958;
msdDirectConfig2.sample_rate = 48000;
- msdDirectConfig2.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ msdDirectConfig2.channel_mask = AUDIO_CHANNEL_INDEX_MASK_24;
audio_config_base_t msdNonDirectConfig = AUDIO_CONFIG_BASE_INITIALIZER;
msdNonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
@@ -836,7 +836,7 @@
audio_config_t msdDirectConfig2 = AUDIO_CONFIG_INITIALIZER;
msdDirectConfig2.format = AUDIO_FORMAT_IEC60958;
msdDirectConfig2.sample_rate = 48000;
- msdDirectConfig2.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ msdDirectConfig2.channel_mask = AUDIO_CHANNEL_INDEX_MASK_24;
audio_config_t msdNonDirectConfig = AUDIO_CONFIG_INITIALIZER;
msdNonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
@@ -2007,7 +2007,7 @@
}
const std::string name = std::get<1>(GetParam());
const std::string address = std::get<2>(GetParam());
- android::media::AudioPort audioPort;
+ android::media::AudioPortFw audioPort;
ASSERT_EQ(NO_ERROR,
mManager->deviceToAudioPort(type, address.c_str(), name.c_str(), &audioPort));
android::media::audio::common::AudioPort& port = audioPort.hal;
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index d52e540..1e6524f 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -65,6 +65,7 @@
"api2/DepthCompositeStream.cpp",
"api2/HeicEncoderInfoManager.cpp",
"api2/HeicCompositeStream.cpp",
+ "api2/JpegRCompositeStream.cpp",
"device3/BufferUtils.cpp",
"device3/Camera3Device.cpp",
"device3/Camera3OfflineSession.cpp",
@@ -172,6 +173,9 @@
"libbinderthreadstateutils",
"media_permission-aidl-cpp",
"libcameraservice_device_independent",
+ "libjpegrecoverymap",
+ "libjpegencoder",
+ "libjpegdecoder",
],
export_shared_lib_headers: [
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index d80b6ec..720ffd7 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -36,6 +36,7 @@
#include "DepthCompositeStream.h"
#include "HeicCompositeStream.h"
+#include "JpegRCompositeStream.h"
// Convenience methods for constructing binder::Status objects for error returns
@@ -889,6 +890,7 @@
int timestampBase = outputConfiguration.getTimestampBase();
int mirrorMode = outputConfiguration.getMirrorMode();
int32_t colorSpace = outputConfiguration.getColorSpace();
+ bool useReadoutTimestamp = outputConfiguration.useReadoutTimestamp();
res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
outputConfiguration.getSurfaceType());
@@ -955,19 +957,25 @@
bool isDepthCompositeStream =
camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
bool isHeicCompisiteStream = camera3::HeicCompositeStream::isHeicCompositeStream(surfaces[0]);
- if (isDepthCompositeStream || isHeicCompisiteStream) {
+ bool isJpegRCompositeStream =
+ camera3::JpegRCompositeStream::isJpegRCompositeStream(surfaces[0]);
+ if (isDepthCompositeStream || isHeicCompisiteStream || isJpegRCompositeStream) {
sp<CompositeStream> compositeStream;
if (isDepthCompositeStream) {
compositeStream = new camera3::DepthCompositeStream(mDevice, getRemoteCallback());
- } else {
+ } else if (isHeicCompisiteStream) {
compositeStream = new camera3::HeicCompositeStream(mDevice, getRemoteCallback());
+ } else {
+ compositeStream = new camera3::JpegRCompositeStream(mDevice, getRemoteCallback());
}
err = compositeStream->createStream(surfaces, deferredConsumer, streamInfo.width,
streamInfo.height, streamInfo.format,
static_cast<camera_stream_rotation_t>(outputConfiguration.getRotation()),
&streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
- outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution);
+ outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution,
+ streamInfo.colorSpace, streamInfo.dynamicRangeProfile, streamInfo.streamUseCase,
+ useReadoutTimestamp);
if (err == OK) {
Mutex::Autolock l(mCompositeLock);
mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
@@ -980,7 +988,8 @@
&streamId, physicalCameraId, streamInfo.sensorPixelModesUsed, &surfaceIds,
outputConfiguration.getSurfaceSetID(), isShared, isMultiResolution,
/*consumerUsage*/0, streamInfo.dynamicRangeProfile, streamInfo.streamUseCase,
- streamInfo.timestampBase, streamInfo.mirrorMode, streamInfo.colorSpace);
+ streamInfo.timestampBase, streamInfo.mirrorMode, streamInfo.colorSpace,
+ useReadoutTimestamp);
}
if (err != OK) {
@@ -1079,7 +1088,8 @@
outputConfiguration.isMultiResolution(), consumerUsage,
outputConfiguration.getDynamicRangeProfile(),
outputConfiguration.getStreamUseCase(),
- outputConfiguration.getMirrorMode());
+ outputConfiguration.getMirrorMode(),
+ outputConfiguration.useReadoutTimestamp());
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -1823,7 +1833,8 @@
for (const auto& gbp : mConfiguredOutputs.valueAt(index).getGraphicBufferProducers()) {
sp<Surface> s = new Surface(gbp, false /*controlledByApp*/);
isCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(s) ||
- camera3::HeicCompositeStream::isHeicCompositeStream(s);
+ camera3::HeicCompositeStream::isHeicCompositeStream(s) ||
+ camera3::JpegRCompositeStream::isJpegRCompositeStream(s);
if (isCompositeStream) {
auto compositeIdx = mCompositeStreamMap.indexOfKey(IInterface::asBinder(gbp));
if (compositeIdx == NAME_NOT_FOUND) {
diff --git a/services/camera/libcameraservice/api2/CompositeStream.cpp b/services/camera/libcameraservice/api2/CompositeStream.cpp
index 4b840fc..503cf23 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/CompositeStream.cpp
@@ -49,7 +49,8 @@
camera_stream_rotation_t rotation, int * id, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> * surfaceIds,
- int streamSetId, bool isShared, bool isMultiResolution) {
+ int streamSetId, bool isShared, bool isMultiResolution, int32_t colorSpace,
+ int64_t dynamicProfile, int64_t streamUseCase, bool useReadoutTimestamp) {
if (hasDeferredConsumer) {
ALOGE("%s: Deferred consumers not supported in case of composite streams!",
__FUNCTION__);
@@ -75,7 +76,8 @@
}
return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation,
- id, physicalCameraId, sensorPixelModesUsed, surfaceIds, streamSetId, isShared);
+ id, physicalCameraId, sensorPixelModesUsed, surfaceIds, streamSetId, isShared,
+ colorSpace, dynamicProfile, streamUseCase, useReadoutTimestamp);
}
status_t CompositeStream::deleteStream() {
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
index 600bd28..c27faba 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.h
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -46,7 +46,8 @@
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds,
- int streamSetId, bool isShared, bool isMultiResolution);
+ int streamSetId, bool isShared, bool isMultiResolution, int32_t colorSpace,
+ int64_t dynamicProfile, int64_t streamUseCase, bool useReadoutTimestamp);
status_t deleteStream();
@@ -59,7 +60,8 @@
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds,
- int streamSetId, bool isShared) = 0;
+ int streamSetId, bool isShared, int32_t colorSpace,
+ int64_t dynamicProfile, int64_t streamUseCase, bool useReadoutTimestamp) = 0;
// Release all internal streams and corresponding resources.
virtual status_t deleteInternalStreams() = 0;
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 048d85d..a3547dd 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -581,7 +581,8 @@
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds,
- int /*streamSetId*/, bool /*isShared*/) {
+ int /*streamSetId*/, bool /*isShared*/, int32_t /*colorSpace*/,
+ int64_t /*dynamicProfile*/, int64_t /*streamUseCase*/, bool useReadoutTimestamp) {
if (mSupportedDepthSizes.empty()) {
ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
return INVALID_OPERATION;
@@ -612,7 +613,14 @@
mBlobSurface = new Surface(producer);
ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
- id, physicalCameraId, sensorPixelModesUsed, surfaceIds);
+ id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
+ camera3::CAMERA3_STREAM_SET_ID_INVALID, /*isShared*/false, /*isMultiResolution*/false,
+ /*consumerUsage*/0, ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+ ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
+ OutputConfiguration::MIRROR_MODE_AUTO,
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ useReadoutTimestamp);
if (ret == OK) {
mBlobStreamId = *id;
mBlobSurfaceId = (*surfaceIds)[0];
@@ -629,7 +637,14 @@
std::vector<int> depthSurfaceId;
ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, sensorPixelModesUsed,
- &depthSurfaceId);
+ &depthSurfaceId, camera3::CAMERA3_STREAM_SET_ID_INVALID, /*isShared*/false,
+ /*isMultiResolution*/false, /*consumerUsage*/0,
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+ ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
+ OutputConfiguration::MIRROR_MODE_AUTO,
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ useReadoutTimestamp);
if (ret == OK) {
mDepthSurfaceId = depthSurfaceId[0];
} else {
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
index c1c75c1..de0ed67 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.h
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -53,7 +53,8 @@
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds,
- int streamSetId, bool isShared) override;
+ int streamSetId, bool isShared, int32_t colorSpace,
+ int64_t dynamicProfile, int64_t streamUseCase, bool useReadoutTimestamp) override;
status_t deleteInternalStreams() override;
status_t configureStream() override;
status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index cd57299..8b8dbe8 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -120,8 +120,8 @@
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds,
- int /*streamSetId*/, bool /*isShared*/) {
-
+ int /*streamSetId*/, bool /*isShared*/, int32_t /*colorSpace*/,
+ int64_t /*dynamicProfile*/, int64_t /*streamUseCase*/, bool useReadoutTimestamp) {
sp<CameraDeviceBase> device = mDevice.promote();
if (!device.get()) {
ALOGE("%s: Invalid camera device!", __FUNCTION__);
@@ -147,7 +147,14 @@
res = device->createStream(mAppSegmentSurface, mAppSegmentMaxSize, 1, format,
kAppSegmentDataSpace, rotation, &mAppSegmentStreamId, physicalCameraId,
- sensorPixelModesUsed,surfaceIds);
+ sensorPixelModesUsed, surfaceIds, camera3::CAMERA3_STREAM_SET_ID_INVALID,
+ /*isShared*/false, /*isMultiResolution*/false,
+ /*consumerUsage*/0, ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+ ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
+ OutputConfiguration::MIRROR_MODE_AUTO,
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ useReadoutTimestamp);
if (res == OK) {
mAppSegmentSurfaceId = (*surfaceIds)[0];
} else {
@@ -183,7 +190,14 @@
int srcStreamFmt = mUseGrid ? HAL_PIXEL_FORMAT_YCbCr_420_888 :
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
res = device->createStream(mMainImageSurface, width, height, srcStreamFmt, kHeifDataSpace,
- rotation, id, physicalCameraId, sensorPixelModesUsed, &sourceSurfaceId);
+ rotation, id, physicalCameraId, sensorPixelModesUsed, &sourceSurfaceId,
+ camera3::CAMERA3_STREAM_SET_ID_INVALID, /*isShared*/false, /*isMultiResolution*/false,
+ /*consumerUsage*/0, ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+ ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
+ OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
+ OutputConfiguration::MIRROR_MODE_AUTO,
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ useReadoutTimestamp);
if (res == OK) {
mMainImageSurfaceId = sourceSurfaceId[0];
mMainImageStreamId = *id;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index 1077a1f..3132183 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -47,8 +47,8 @@
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
const std::unordered_set<int32_t> &sensorPixelModesUsed,
- std::vector<int> *surfaceIds,
- int streamSetId, bool isShared) override;
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared, int32_t colorSpace,
+ int64_t dynamicProfile, int64_t streamUseCase, bool useReadoutTimestamp) override;
status_t deleteInternalStreams() override;
diff --git a/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
new file mode 100644
index 0000000..fb8979d
--- /dev/null
+++ b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
@@ -0,0 +1,822 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hardware/gralloc.h"
+#include "system/graphics-base-v1.0.h"
+#include "system/graphics-base-v1.1.h"
+#define LOG_TAG "Camera3-JpegRCompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <aidl/android/hardware/camera/device/CameraBlob.h>
+#include <aidl/android/hardware/camera/device/CameraBlobId.h>
+
+#include "common/CameraProviderManager.h"
+#include <gui/Surface.h>
+#include <jpegrecoverymap/recoverymap.h>
+#include <utils/ExifUtils.h>
+#include <utils/Log.h>
+#include "utils/SessionConfigurationUtils.h"
+#include <utils/Trace.h>
+
+#include "JpegRCompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+using aidl::android::hardware::camera::device::CameraBlob;
+using aidl::android::hardware::camera::device::CameraBlobId;
+
+JpegRCompositeStream::JpegRCompositeStream(sp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ CompositeStream(device, cb),
+ mBlobStreamId(-1),
+ mBlobSurfaceId(-1),
+ mP010StreamId(-1),
+ mP010SurfaceId(-1),
+ mBlobWidth(0),
+ mBlobHeight(0),
+ mP010BufferAcquired(false),
+ mBlobBufferAcquired(false),
+ mOutputColorSpace(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED),
+ mProducerListener(new ProducerListener()),
+ mMaxJpegBufferSize(-1),
+ mUHRMaxJpegBufferSize(-1),
+ mStaticInfo(device->info()) {
+ auto entry = mStaticInfo.find(ANDROID_JPEG_MAX_SIZE);
+ if (entry.count > 0) {
+ mMaxJpegBufferSize = entry.data.i32[0];
+ } else {
+ ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
+ }
+
+ mUHRMaxJpegSize =
+ SessionConfigurationUtils::getMaxJpegResolution(mStaticInfo,
+ /*ultraHighResolution*/true);
+ mDefaultMaxJpegSize =
+ SessionConfigurationUtils::getMaxJpegResolution(mStaticInfo,
+ /*isUltraHighResolution*/false);
+
+ mUHRMaxJpegBufferSize =
+ SessionConfigurationUtils::getUHRMaxJpegBufferSize(mUHRMaxJpegSize, mDefaultMaxJpegSize,
+ mMaxJpegBufferSize);
+}
+
+JpegRCompositeStream::~JpegRCompositeStream() {
+ mBlobConsumer.clear(),
+ mBlobSurface.clear(),
+ mBlobStreamId = -1;
+ mBlobSurfaceId = -1;
+ mP010Consumer.clear();
+ mP010Surface.clear();
+ mP010Consumer = nullptr;
+ mP010Surface = nullptr;
+}
+
+void JpegRCompositeStream::compilePendingInputLocked() {
+ CpuConsumer::LockedBuffer imgBuffer;
+
+ while (mSupportInternalJpeg && !mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
+ auto it = mInputJpegBuffers.begin();
+ auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputJpegBuffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mBlobConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
+ mBlobBufferAcquired = true;
+ }
+ mInputJpegBuffers.erase(it);
+ }
+
+ while (!mInputP010Buffers.empty() && !mP010BufferAcquired) {
+ auto it = mInputP010Buffers.begin();
+ auto res = mP010Consumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error receiving P010 image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputP010Buffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting P010 buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mP010Consumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].p010Buffer = imgBuffer;
+ mP010BufferAcquired = true;
+ }
+ mInputP010Buffers.erase(it);
+ }
+
+ while (!mCaptureResults.empty()) {
+ auto it = mCaptureResults.begin();
+ // Negative timestamp indicates that something went wrong during the capture result
+ // collection process.
+ if (it->first >= 0) {
+ mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
+ mPendingInputFrames[it->first].result = std::get<1>(it->second);
+ }
+ mCaptureResults.erase(it);
+ }
+
+ while (!mFrameNumberMap.empty()) {
+ auto it = mFrameNumberMap.begin();
+ mPendingInputFrames[it->second].frameNumber = it->first;
+ mFrameNumberMap.erase(it);
+ }
+
+ auto it = mErrorFrameNumbers.begin();
+ while (it != mErrorFrameNumbers.end()) {
+ bool frameFound = false;
+ for (auto &inputFrame : mPendingInputFrames) {
+ if (inputFrame.second.frameNumber == *it) {
+ inputFrame.second.error = true;
+ frameFound = true;
+ break;
+ }
+ }
+
+ if (frameFound) {
+ it = mErrorFrameNumbers.erase(it);
+ } else {
+ ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
+ *it);
+ it++;
+ }
+ }
+}
+
+bool JpegRCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
+ if (currentTs == nullptr) {
+ return false;
+ }
+
+ bool newInputAvailable = false;
+ for (const auto& it : mPendingInputFrames) {
+ if ((!it.second.error) && (it.second.p010Buffer.data != nullptr) &&
+ ((it.second.jpegBuffer.data != nullptr) || !mSupportInternalJpeg) &&
+ (it.first < *currentTs)) {
+ *currentTs = it.first;
+ newInputAvailable = true;
+ }
+ }
+
+ return newInputAvailable;
+}
+
+int64_t JpegRCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
+ int64_t ret = -1;
+ if (currentTs == nullptr) {
+ return ret;
+ }
+
+ for (const auto& it : mPendingInputFrames) {
+ if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ ret = it.second.frameNumber;
+ }
+ }
+
+ return ret;
+}
+
+status_t JpegRCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
+ status_t res;
+ sp<ANativeWindow> outputANW = mOutputSurface;
+ ANativeWindowBuffer *anb;
+ int fenceFd;
+ void *dstBuffer;
+
+ size_t maxJpegRBufferSize = 0;
+ if (mMaxJpegBufferSize > 0) {
+ // If this is an ultra high resolution sensor and the input frames size
+ // is > default res jpeg.
+ if (mUHRMaxJpegSize.width != 0 &&
+ inputFrame.jpegBuffer.width * inputFrame.jpegBuffer.height >
+ mDefaultMaxJpegSize.width * mDefaultMaxJpegSize.height) {
+ maxJpegRBufferSize = mUHRMaxJpegBufferSize;
+ } else {
+ maxJpegRBufferSize = mMaxJpegBufferSize;
+ }
+ } else {
+ maxJpegRBufferSize = inputFrame.p010Buffer.width * inputFrame.p010Buffer.height;
+ }
+
+ uint8_t jpegQuality = 100;
+ auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
+ if (entry.count > 0) {
+ jpegQuality = entry.data.u8[0];
+ }
+
+ uint8_t jpegOrientation = 0;
+ entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
+ if (entry.count > 0) {
+ jpegOrientation = entry.data.i32[0];
+ }
+
+ if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), maxJpegRBufferSize, 1))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer dimensions"
+ " %zux%u for stream %d", __FUNCTION__, maxJpegRBufferSize, 1U, mP010StreamId);
+ return res;
+ }
+
+ res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
+ res);
+ return res;
+ }
+
+ sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
+ GraphicBufferLocker gbLocker(gb);
+ res = gbLocker.lockAsync(&dstBuffer, fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return res;
+ }
+
+ if ((gb->getWidth() < maxJpegRBufferSize) || (gb->getHeight() != 1)) {
+ ALOGE("%s: Blob buffer size mismatch, expected %zux%u received %dx%d", __FUNCTION__,
+ maxJpegRBufferSize, 1, gb->getWidth(), gb->getHeight());
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return BAD_VALUE;
+ }
+
+ size_t actualJpegRSize = 0;
+ if (mSupportInternalJpeg) {
+ recoverymap::jpegr_uncompressed_struct p010;
+ recoverymap::jpegr_compressed_struct jpeg;
+ recoverymap::jpegr_compressed_struct jpegR;
+
+ p010.height = inputFrame.p010Buffer.height;
+ p010.width = inputFrame.p010Buffer.width;
+ p010.colorGamut = recoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_BT2100;
+ size_t yChannelSizeInByte = p010.width * p010.height * 2;
+ size_t uvChannelSizeInByte = p010.width * p010.height;
+ p010.data = new uint8_t[yChannelSizeInByte + uvChannelSizeInByte];
+ std::unique_ptr<uint8_t[]> p010_data;
+ p010_data.reset(reinterpret_cast<uint8_t*>(p010.data));
+ memcpy((uint8_t*)p010.data, inputFrame.p010Buffer.data, yChannelSizeInByte);
+ memcpy((uint8_t*)p010.data + yChannelSizeInByte, inputFrame.p010Buffer.dataCb,
+ uvChannelSizeInByte);
+
+ jpeg.data = inputFrame.jpegBuffer.data;
+ jpeg.length = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
+ inputFrame.jpegBuffer.width);
+ if (jpeg.length == 0) {
+ ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!",
+ __FUNCTION__);
+ jpeg.length = inputFrame.jpegBuffer.width;
+ }
+
+ if (mOutputColorSpace == ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_DISPLAY_P3) {
+ jpeg.colorGamut = recoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_P3;
+ } else {
+ jpeg.colorGamut = recoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_BT709;
+ }
+
+ recoverymap::jpegr_transfer_function transferFunction;
+ switch (mP010DynamicRange) {
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
+ transferFunction = recoverymap::jpegr_transfer_function::JPEGR_TF_PQ;
+ break;
+ default:
+ transferFunction = recoverymap::jpegr_transfer_function::JPEGR_TF_HLG;
+ }
+
+ jpegR.data = dstBuffer;
+ jpegR.maxLength = maxJpegRBufferSize;
+
+ recoverymap::RecoveryMap recoveryMap;
+ res = recoveryMap.encodeJPEGR(&p010, &jpeg, transferFunction, &jpegR);
+ if (res != OK) {
+ ALOGE("%s: Error trying to encode JPEG/R: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ actualJpegRSize = jpegR.length;
+ p010_data.release();
+ } else {
+ const uint8_t* exifBuffer = nullptr;
+ size_t exifBufferSize = 0;
+ std::unique_ptr<ExifUtils> utils(ExifUtils::create());
+ utils->initializeEmpty();
+ utils->setFromMetadata(inputFrame.result, mStaticInfo, inputFrame.p010Buffer.width,
+ inputFrame.p010Buffer.height);
+ if (utils->generateApp1()) {
+ exifBuffer = utils->getApp1Buffer();
+ exifBufferSize = utils->getApp1Length();
+ } else {
+ ALOGE("%s: Unable to generate App1 buffer", __FUNCTION__);
+ }
+ }
+
+ size_t finalJpegRSize = actualJpegRSize + sizeof(CameraBlob);
+ if (finalJpegRSize > maxJpegRBufferSize) {
+ ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return NO_MEMORY;
+ }
+
+ res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
+ getStreamId(), strerror(-res), res);
+ return res;
+ }
+
+ ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegRSize);
+ uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
+ (gb->getWidth() - sizeof(CameraBlob));
+ CameraBlob blobHeader = {
+ .blobId = CameraBlobId::JPEG,
+ .blobSizeBytes = static_cast<int32_t>(actualJpegRSize)
+ };
+ memcpy(header, &blobHeader, sizeof(CameraBlob));
+ outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+
+ return res;
+}
+
+void JpegRCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
+ if (inputFrame == nullptr) {
+ return;
+ }
+
+ if (inputFrame->p010Buffer.data != nullptr) {
+ mP010Consumer->unlockBuffer(inputFrame->p010Buffer);
+ inputFrame->p010Buffer.data = nullptr;
+ mP010BufferAcquired = false;
+ }
+
+ if (inputFrame->jpegBuffer.data != nullptr) {
+ mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
+ inputFrame->jpegBuffer.data = nullptr;
+ mBlobBufferAcquired = false;
+ }
+
+ if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
+ //TODO: Figure out correct requestId
+ notifyError(inputFrame->frameNumber, -1 /*requestId*/);
+ inputFrame->errorNotified = true;
+ }
+}
+
+void JpegRCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+ auto it = mPendingInputFrames.begin();
+ while (it != mPendingInputFrames.end()) {
+ if (it->first <= currentTs) {
+ releaseInputFrameLocked(&it->second);
+ it = mPendingInputFrames.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+bool JpegRCompositeStream::threadLoop() {
+ int64_t currentTs = INT64_MAX;
+ bool newInputAvailable = false;
+
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (mErrorState) {
+ // In case we landed in error state, return any pending buffers and
+ // halt all further processing.
+ compilePendingInputLocked();
+ releaseInputFramesLocked(currentTs);
+ return false;
+ }
+
+ while (!newInputAvailable) {
+ compilePendingInputLocked();
+ newInputAvailable = getNextReadyInputLocked(¤tTs);
+ if (!newInputAvailable) {
+ auto failingFrameNumber = getNextFailingInputLocked(¤tTs);
+ if (failingFrameNumber >= 0) {
+ // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
+ // possible for two internal stream buffers to fail. In such scenario the
+ // composite stream should notify the client about a stream buffer error only
+ // once and this information is kept within 'errorNotified'.
+ // Any present failed input frames will be removed on a subsequent call to
+ // 'releaseInputFramesLocked()'.
+ releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
+ currentTs = INT64_MAX;
+ }
+
+ auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
+ if (ret == TIMED_OUT) {
+ return true;
+ } else if (ret != OK) {
+ ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ return false;
+ }
+ }
+ }
+ }
+
+ auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
+ Mutex::Autolock l(mMutex);
+ if (res != OK) {
+ ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
+ currentTs, strerror(-res), res);
+ mPendingInputFrames[currentTs].error = true;
+ }
+
+ releaseInputFramesLocked(currentTs);
+
+ return true;
+}
+
+bool JpegRCompositeStream::isJpegRCompositeStream(const sp<Surface> &surface) {
+ if (CameraProviderManager::kFrameworkJpegRDisabled) {
+ return false;
+ }
+ ANativeWindow *anw = surface.get();
+ status_t err;
+ int format;
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ ALOGE("%s: Failed to query Surface format: %s (%d)", __FUNCTION__, strerror(-err),
+ err);
+ return false;
+ }
+
+ int dataspace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
+ ALOGE("%s: Failed to query Surface dataspace: %s (%d)", __FUNCTION__, strerror(-err),
+ err);
+ return false;
+ }
+
+ if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == static_cast<int>(kJpegRDataSpace))) {
+ return true;
+ }
+
+ return false;
+}
+
+void JpegRCompositeStream::deriveDynamicRangeAndDataspace(int64_t dynamicProfile,
+ int64_t* /*out*/dynamicRange, int64_t* /*out*/dataSpace) {
+ if ((dynamicRange == nullptr) || (dataSpace == nullptr)) {
+ return;
+ }
+
+ switch (dynamicProfile) {
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
+ *dynamicRange = dynamicProfile;
+ *dataSpace = HAL_DATASPACE_BT2020_ITU_PQ;
+ break;
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF:
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_REF_PO:
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM:
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_DOLBY_VISION_10B_HDR_OEM_PO:
+ *dynamicRange = dynamicProfile;
+ *dataSpace = HAL_DATASPACE_BT2020_ITU_HLG;
+ break;
+ default:
+ *dynamicRange = kP010DefaultDynamicRange;
+ *dataSpace = kP010DefaultDataSpace;
+ }
+
+}
+
+status_t JpegRCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
+ camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int /*streamSetId*/, bool /*isShared*/, int32_t colorSpace,
+ int64_t dynamicProfile, int64_t streamUseCase, bool useReadoutTimestamp) {
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ deriveDynamicRangeAndDataspace(dynamicProfile, &mP010DynamicRange, &mP010DataSpace);
+ mSupportInternalJpeg = CameraProviderManager::isConcurrentDynamicRangeCaptureSupported(
+ mStaticInfo, mP010DynamicRange,
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD);
+
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mP010Consumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
+ mP010Consumer->setFrameAvailableListener(this);
+ mP010Consumer->setName(String8("Camera3-P010CompositeStream"));
+ mP010Surface = new Surface(producer);
+
+ auto ret = device->createStream(mP010Surface, width, height, kP010PixelFormat,
+ static_cast<android_dataspace>(mP010DataSpace), rotation,
+ id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
+ camera3::CAMERA3_STREAM_SET_ID_INVALID, false /*isShared*/, false /*isMultiResolution*/,
+ GRALLOC_USAGE_SW_READ_OFTEN, mP010DynamicRange, streamUseCase,
+ OutputConfiguration::TIMESTAMP_BASE_DEFAULT, OutputConfiguration::MIRROR_MODE_AUTO,
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED, useReadoutTimestamp);
+ if (ret == OK) {
+ mP010StreamId = *id;
+ mP010SurfaceId = (*surfaceIds)[0];
+ mOutputSurface = consumers[0];
+ } else {
+ return ret;
+ }
+
+ if (mSupportInternalJpeg) {
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
+ mBlobConsumer->setFrameAvailableListener(this);
+ mBlobConsumer->setName(String8("Camera3-JpegRCompositeStream"));
+ mBlobSurface = new Surface(producer);
+ std::vector<int> blobSurfaceId;
+ ret = device->createStream(mBlobSurface, width, height, format,
+ kJpegDataSpace, rotation, &mBlobStreamId, physicalCameraId, sensorPixelModesUsed,
+ &blobSurfaceId,
+ /*streamSetI*/ camera3::CAMERA3_STREAM_SET_ID_INVALID,
+ /*isShared*/ false,
+ /*isMultiResolution*/ false,
+ /*consumerUsage*/ GRALLOC_USAGE_SW_READ_OFTEN,
+ /*dynamicProfile*/ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD,
+ streamUseCase,
+ /*timestampBase*/ OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
+ /*mirrorMode*/ OutputConfiguration::MIRROR_MODE_AUTO,
+ /*colorSpace*/ colorSpace, useReadoutTimestamp);
+ if (ret == OK) {
+ mBlobSurfaceId = blobSurfaceId[0];
+ } else {
+ return ret;
+ }
+
+ ret = registerCompositeStreamListener(mBlobStreamId);
+ if (ret != OK) {
+ ALOGE("%s: Failed to register jpeg stream listener!", __FUNCTION__);
+ return ret;
+ }
+ }
+
+ ret = registerCompositeStreamListener(getStreamId());
+ if (ret != OK) {
+ ALOGE("%s: Failed to register P010 stream listener!", __FUNCTION__);
+ return ret;
+ }
+
+ mOutputColorSpace = colorSpace;
+ mBlobWidth = width;
+ mBlobHeight = height;
+
+ return ret;
+}
+
+status_t JpegRCompositeStream::configureStream() {
+ if (isRunning()) {
+ // Processing thread is already running, nothing more to do.
+ return NO_ERROR;
+ }
+
+ if (mOutputSurface.get() == nullptr) {
+ ALOGE("%s: No valid output surface set!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ if (res != OK) {
+ ALOGE("%s: Unable to connect to native window for stream %d",
+ __FUNCTION__, mP010StreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
+ mP010StreamId);
+ return res;
+ }
+
+ int maxProducerBuffers;
+ ANativeWindow *anw = mP010Surface.get();
+ if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mP010StreamId);
+ return res;
+ }
+
+ ANativeWindow *anwConsumer = mOutputSurface.get();
+ int maxConsumerBuffers;
+ if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+ &maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mP010StreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffer_count(
+ anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mP010StreamId);
+ return res;
+ }
+
+ run("JpegRCompositeStreamProc");
+
+ return NO_ERROR;
+}
+
+status_t JpegRCompositeStream::deleteInternalStreams() {
+ // The 'CameraDeviceClient' parent will delete the P010 stream
+ requestExit();
+
+ auto ret = join();
+ if (ret != OK) {
+ ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ }
+
+ if (mBlobStreamId >= 0) {
+ // Camera devices may not be valid after switching to offline mode.
+ // In this case, all offline streams including internal composite streams
+ // are managed and released by the offline session.
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device.get() != nullptr) {
+ ret = device->deleteStream(mBlobStreamId);
+ }
+
+ mBlobStreamId = -1;
+ }
+
+ if (mOutputSurface != nullptr) {
+ mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
+ mOutputSurface.clear();
+ }
+
+ return ret;
+}
+
+void JpegRCompositeStream::onFrameAvailable(const BufferItem& item) {
+ if (item.mDataSpace == kJpegDataSpace) {
+ ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
+ __func__, ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputJpegBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else if (item.mDataSpace == static_cast<android_dataspace_t>(mP010DataSpace)) {
+ ALOGV("%s: P010 buffer with ts: %" PRIu64 " ms. arrived!", __func__,
+ ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputP010Buffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else {
+ ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
+ }
+}
+
+status_t JpegRCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
+ if (outputStreamIds == nullptr) {
+ return BAD_VALUE;
+ }
+
+ if (outSurfaceMap->find(mP010StreamId) == outSurfaceMap->end()) {
+ outputStreamIds->push_back(mP010StreamId);
+ }
+ (*outSurfaceMap)[mP010StreamId].push_back(mP010SurfaceId);
+
+ if (mSupportInternalJpeg) {
+ if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
+ outputStreamIds->push_back(mBlobStreamId);
+ }
+ (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
+ }
+
+ if (currentStreamId != nullptr) {
+ *currentStreamId = mP010StreamId;
+ }
+
+ return NO_ERROR;
+}
+
+status_t JpegRCompositeStream::insertCompositeStreamIds(
+ std::vector<int32_t>* compositeStreamIds /*out*/) {
+ if (compositeStreamIds == nullptr) {
+ return BAD_VALUE;
+ }
+
+ compositeStreamIds->push_back(mP010StreamId);
+ if (mSupportInternalJpeg) {
+ compositeStreamIds->push_back(mBlobStreamId);
+ }
+
+ return OK;
+}
+
+void JpegRCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
+ // Processing can continue even in case of result errors.
+ // At the moment Jpeg/R composite stream processing relies mainly on static camera
+ // characteristics data. The actual result data can be used for the jpeg quality but
+ // in case it is absent we can default to maximum.
+ eraseResult(resultExtras.frameNumber);
+}
+
+bool JpegRCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
+ bool ret = false;
+ // Buffer errors concerning internal composite streams should not be directly visible to
+ // camera clients. They must only receive a single buffer error with the public composite
+ // stream id.
+ if ((resultExtras.errorStreamId == mP010StreamId) ||
+ (resultExtras.errorStreamId == mBlobStreamId)) {
+ flagAnErrorFrameNumber(resultExtras.frameNumber);
+ ret = true;
+ }
+
+ return ret;
+}
+
+status_t JpegRCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& staticInfo,
+ std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
+ if (compositeOutput == nullptr) {
+ return BAD_VALUE;
+ }
+
+ int64_t dynamicRange, dataSpace;
+ deriveDynamicRangeAndDataspace(streamInfo.dynamicRangeProfile, &dynamicRange, &dataSpace);
+
+ compositeOutput->clear();
+ compositeOutput->push_back({});
+ (*compositeOutput)[0].width = streamInfo.width;
+ (*compositeOutput)[0].height = streamInfo.height;
+ (*compositeOutput)[0].format = kP010PixelFormat;
+ (*compositeOutput)[0].dataSpace = static_cast<android_dataspace_t>(dataSpace);
+ (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+ (*compositeOutput)[0].dynamicRangeProfile = dynamicRange;
+ (*compositeOutput)[0].colorSpace =
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED;
+
+ if (CameraProviderManager::isConcurrentDynamicRangeCaptureSupported(staticInfo,
+ streamInfo.dynamicRangeProfile,
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD)) {
+ compositeOutput->push_back({});
+ (*compositeOutput)[1].width = streamInfo.width;
+ (*compositeOutput)[1].height = streamInfo.height;
+ (*compositeOutput)[1].format = HAL_PIXEL_FORMAT_BLOB;
+ (*compositeOutput)[1].dataSpace = kJpegDataSpace;
+ (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+ (*compositeOutput)[1].dynamicRangeProfile =
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+ (*compositeOutput)[1].colorSpace = streamInfo.colorSpace;
+ }
+
+ return NO_ERROR;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/JpegRCompositeStream.h b/services/camera/libcameraservice/api2/JpegRCompositeStream.h
new file mode 100644
index 0000000..4b462b5
--- /dev/null
+++ b/services/camera/libcameraservice/api2/JpegRCompositeStream.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_JPEG_R_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_JPEG_R_COMPOSITE_STREAM_H
+
+#include <gui/CpuConsumer.h>
+#include "aidl/android/hardware/graphics/common/Dataspace.h"
+#include "system/graphics-base-v1.1.h"
+
+#include "api1/client2/JpegProcessor.h"
+
+#include "CompositeStream.h"
+
+namespace android {
+
+class CameraDeviceClient;
+class CameraMetadata;
+class Surface;
+
+namespace camera3 {
+
+class JpegRCompositeStream : public CompositeStream, public Thread,
+ public CpuConsumer::FrameAvailableListener {
+
+public:
+ JpegRCompositeStream(sp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ ~JpegRCompositeStream() override;
+
+ static bool isJpegRCompositeStream(const sp<Surface> &surface);
+
+ // CompositeStream overrides
+ status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ const std::unordered_set<int32_t> &sensorPixelModesUsed,
+ std::vector<int> *surfaceIds,
+ int streamSetId, bool isShared, int32_t colorSpace,
+ int64_t dynamicProfile, int64_t streamUseCase, bool useReadoutTimestamp) override;
+ status_t deleteInternalStreams() override;
+ status_t configureStream() override;
+ status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
+ int32_t* /*out*/currentStreamId) override;
+ status_t insertCompositeStreamIds(std::vector<int32_t>* compositeStreamIds /*out*/) override;
+ int getStreamId() override { return mP010StreamId; }
+
+ // CpuConsumer listener implementation
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // Return stream information about the internal camera streams
+ static status_t getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/);
+
+protected:
+
+ bool threadLoop() override;
+ bool onStreamBufferError(const CaptureResultExtras& resultExtras) override;
+ void onResultError(const CaptureResultExtras& resultExtras) override;
+
+private:
+ struct InputFrame {
+ CpuConsumer::LockedBuffer p010Buffer;
+ CpuConsumer::LockedBuffer jpegBuffer;
+ CameraMetadata result;
+ bool error;
+ bool errorNotified;
+ int64_t frameNumber;
+ int32_t requestId;
+
+ InputFrame() : error(false), errorNotified(false), frameNumber(-1), requestId(-1) { }
+ };
+
+ status_t processInputFrame(nsecs_t ts, const InputFrame &inputFrame);
+
+ // Buffer/Results handling
+ void compilePendingInputLocked();
+ void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
+ void releaseInputFramesLocked(int64_t currentTs);
+
+ // Find first complete and valid frame with smallest timestamp
+ bool getNextReadyInputLocked(int64_t *currentTs /*inout*/);
+
+ // Find next failing frame number with smallest timestamp and return respective frame number
+ int64_t getNextFailingInputLocked(int64_t *currentTs /*inout*/);
+
+ static void deriveDynamicRangeAndDataspace(int64_t dynamicProfile, int64_t* /*out*/dynamicRange,
+ int64_t* /*out*/dataSpace);
+
+ static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ static const auto kP010PixelFormat = HAL_PIXEL_FORMAT_YCBCR_P010;
+ static const auto kP010DefaultDataSpace = HAL_DATASPACE_BT2020_ITU_HLG;
+ static const auto kP010DefaultDynamicRange =
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10;
+ static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
+ static const auto kJpegRDataSpace =
+ aidl::android::hardware::graphics::common::Dataspace::JPEG_R;
+
+ bool mSupportInternalJpeg = false;
+ int64_t mP010DataSpace = HAL_DATASPACE_BT2020_HLG;
+ int64_t mP010DynamicRange =
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10;
+ int mBlobStreamId, mBlobSurfaceId, mP010StreamId, mP010SurfaceId;
+ size_t mBlobWidth, mBlobHeight;
+ sp<CpuConsumer> mBlobConsumer, mP010Consumer;
+ bool mP010BufferAcquired, mBlobBufferAcquired;
+ sp<Surface> mP010Surface, mBlobSurface, mOutputSurface;
+ int32_t mOutputColorSpace;
+ sp<ProducerListener> mProducerListener;
+
+ ssize_t mMaxJpegBufferSize;
+ ssize_t mUHRMaxJpegBufferSize;
+
+ camera3::Size mDefaultMaxJpegSize;
+ camera3::Size mUHRMaxJpegSize;
+
+ // Keep all incoming P010 buffer timestamps pending further processing.
+ std::vector<int64_t> mInputP010Buffers;
+
+ // Keep all incoming Jpeg/Blob buffer timestamps pending further processing.
+ std::vector<int64_t> mInputJpegBuffers;
+
+ // Map of all input frames pending further processing.
+ std::unordered_map<int64_t, InputFrame> mPendingInputFrames;
+
+ const CameraMetadata mStaticInfo;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 977ab7c..6c30606 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -193,7 +193,8 @@
int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false)
= 0;
/**
@@ -216,7 +217,8 @@
int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false)
= 0;
/**
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 89c7459..5b8e3a1 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "system/graphics-base-v1.0.h"
+#include "system/graphics-base-v1.1.h"
#define LOG_TAG "CameraProviderManager"
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
@@ -40,6 +42,7 @@
#include <cutils/properties.h>
#include <hwbinder/IPCThreadState.h>
#include <utils/Trace.h>
+#include <ui/PublicFormat.h>
#include "api2/HeicCompositeStream.h"
#include "device3/ZoomRatioMapper.h"
@@ -59,6 +62,8 @@
} // anonymous namespace
const float CameraProviderManager::kDepthARTolerance = .1f;
+const bool CameraProviderManager::kFrameworkJpegRDisabled =
+ property_get_bool("ro.camera.disableJpegR", false);
CameraProviderManager::HidlServiceInteractionProxyImpl
CameraProviderManager::sHidlServiceInteractionProxy{};
@@ -1071,6 +1076,209 @@
}
}
+bool CameraProviderManager::isConcurrentDynamicRangeCaptureSupported(
+ const CameraMetadata& deviceInfo, int64_t profile, int64_t concurrentProfile) {
+ auto entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ if (entry.count == 0) {
+ return false;
+ }
+
+ const auto it = std::find(entry.data.u8, entry.data.u8 + entry.count,
+ ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT);
+ if (it == entry.data.u8 + entry.count) {
+ return false;
+ }
+
+ entry = deviceInfo.find(ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP);
+ if (entry.count == 0 || ((entry.count % 3) != 0)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < entry.count; i += 3) {
+ if (entry.data.i64[i] == profile) {
+ if (entry.data.i64[i+1] & concurrentProfile) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::deriveJpegRTags(bool maxResolution) {
+ if (kFrameworkJpegRDisabled) {
+ return OK;
+ }
+
+ const int32_t scalerSizesTag =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, maxResolution);
+ const int32_t scalerMinFrameDurationsTag =
+ ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS;
+ const int32_t scalerStallDurationsTag =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, maxResolution);
+
+ const int32_t jpegRSizesTag =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS, maxResolution);
+ const int32_t jpegRStallDurationsTag =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS, maxResolution);
+ const int32_t jpegRMinFrameDurationsTag =
+ SessionConfigurationUtils::getAppropriateModeTag(
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS, maxResolution);
+
+ auto& c = mCameraCharacteristics;
+ std::vector<int32_t> supportedChTags;
+ auto chTags = c.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+ if (chTags.count == 0) {
+ ALOGE("%s: No supported camera characteristics keys!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::vector<std::tuple<size_t, size_t>> supportedP010Sizes, supportedBlobSizes,
+ supportedDynamicDepthSizes, internalDepthSizes;
+ auto capabilities = c.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ if (capabilities.count == 0) {
+ ALOGE("%s: Supported camera capabilities is empty!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ auto end = capabilities.data.u8 + capabilities.count;
+ bool isTenBitOutputSupported = std::find(capabilities.data.u8, end,
+ ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DYNAMIC_RANGE_TEN_BIT) != end;
+ if (!isTenBitOutputSupported) {
+ // No 10-bit support, nothing more to do.
+ return OK;
+ }
+
+ if (!isConcurrentDynamicRangeCaptureSupported(c,
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10,
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD)) {
+ // Advertise Jpeg/R only in case 10 and 8-bit concurrent capture is supported.
+ // This can be removed when 10-bit to 8-bit tonemapping is available.
+ return OK;
+ }
+
+ getSupportedSizes(c, scalerSizesTag,
+ static_cast<android_pixel_format_t>(HAL_PIXEL_FORMAT_BLOB), &supportedBlobSizes);
+ getSupportedSizes(c, scalerSizesTag,
+ static_cast<android_pixel_format_t>(HAL_PIXEL_FORMAT_YCBCR_P010), &supportedP010Sizes);
+ auto it = supportedP010Sizes.begin();
+ while (it != supportedP010Sizes.end()) {
+ // Resolutions that don't align on 32 pixels are not supported by Jpeg/R.
+ // This can be removed as soon as the encoder restriction is lifted.
+ if ((std::find(supportedBlobSizes.begin(), supportedBlobSizes.end(), *it) ==
+ supportedBlobSizes.end()) || ((std::get<0>(*it) % 32) != 0)) {
+ it = supportedP010Sizes.erase(it);
+ } else {
+ it++;
+ }
+ }
+ if (supportedP010Sizes.empty()) {
+ // Nothing to do in this case.
+ return OK;
+ }
+
+ std::vector<int32_t> jpegREntries;
+ for (const auto& it : supportedP010Sizes) {
+ int32_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(it)),
+ static_cast<int32_t> (std::get<1>(it)),
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_OUTPUT };
+ jpegREntries.insert(jpegREntries.end(), entry, entry + 4);
+ }
+
+ std::vector<int64_t> blobMinDurations, blobStallDurations;
+ std::vector<int64_t> jpegRMinDurations, jpegRStallDurations;
+
+ // We use the jpeg stall and min frame durations to approximate the respective jpeg/r
+ // durations.
+ getSupportedDurations(c, scalerMinFrameDurationsTag, HAL_PIXEL_FORMAT_BLOB,
+ supportedP010Sizes, &blobMinDurations);
+ getSupportedDurations(c, scalerStallDurationsTag, HAL_PIXEL_FORMAT_BLOB,
+ supportedP010Sizes, &blobStallDurations);
+ if (blobStallDurations.empty() || blobMinDurations.empty() ||
+ (blobMinDurations.size() != blobStallDurations.size())) {
+ ALOGE("%s: Unexpected number of available blob durations! %zu vs. %zu",
+ __FUNCTION__, blobMinDurations.size(), blobStallDurations.size());
+ return BAD_VALUE;
+ }
+
+ auto itDuration = blobMinDurations.begin();
+ auto itSize = supportedP010Sizes.begin();
+ while (itDuration != blobMinDurations.end()) {
+ int64_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(*itSize)),
+ static_cast<int32_t> (std::get<1>(*itSize)), *itDuration};
+ jpegRMinDurations.insert(jpegRMinDurations.end(), entry, entry + 4);
+ itDuration++; itSize++;
+ }
+
+ itDuration = blobStallDurations.begin();
+ itSize = supportedP010Sizes.begin();
+ while (itDuration != blobStallDurations.end()) {
+ int64_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(*itSize)),
+ static_cast<int32_t> (std::get<1>(*itSize)), *itDuration};
+ jpegRStallDurations.insert(jpegRStallDurations.end(), entry, entry + 4);
+ itDuration++; itSize++;
+ }
+
+ supportedChTags.reserve(chTags.count + 3);
+ supportedChTags.insert(supportedChTags.end(), chTags.data.i32,
+ chTags.data.i32 + chTags.count);
+ supportedChTags.push_back(jpegRSizesTag);
+ supportedChTags.push_back(jpegRMinFrameDurationsTag);
+ supportedChTags.push_back(jpegRStallDurationsTag);
+ c.update(jpegRSizesTag, jpegREntries.data(), jpegREntries.size());
+ c.update(jpegRMinFrameDurationsTag, jpegRMinDurations.data(), jpegRMinDurations.size());
+ c.update(jpegRStallDurationsTag, jpegRStallDurations.data(), jpegRStallDurations.size());
+ c.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, supportedChTags.data(),
+ supportedChTags.size());
+
+ auto colorSpaces = c.find(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP);
+ if (colorSpaces.count > 0 && !maxResolution) {
+ bool displayP3Support = false;
+ int64_t dynamicRange = ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD;
+ for (size_t i = 0; i < colorSpaces.count; i += 3) {
+ auto colorSpace = colorSpaces.data.i64[i];
+ auto format = colorSpaces.data.i64[i+1];
+ bool formatMatch = (format == static_cast<int64_t>(PublicFormat::JPEG)) ||
+ (format == static_cast<int64_t>(PublicFormat::UNKNOWN));
+ bool colorSpaceMatch =
+ colorSpace == ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_DISPLAY_P3;
+ if (formatMatch && colorSpaceMatch) {
+ displayP3Support = true;
+ }
+
+ // Jpeg/R will support the same dynamic range profiles as P010
+ if (format == static_cast<int64_t>(PublicFormat::YCBCR_P010)) {
+ dynamicRange |= colorSpaces.data.i64[i+2];
+ }
+ }
+ if (displayP3Support) {
+ std::vector<int64_t> supportedColorSpaces;
+ // Jpeg/R must support the default system as well ase display P3 color space
+ supportedColorSpaces.reserve(colorSpaces.count + 3*2);
+ supportedColorSpaces.insert(supportedColorSpaces.end(), colorSpaces.data.i64,
+ colorSpaces.data.i64 + colorSpaces.count);
+
+ supportedColorSpaces.push_back(static_cast<int64_t>(
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_SRGB));
+ supportedColorSpaces.push_back(static_cast<int64_t>(PublicFormat::JPEG_R));
+ supportedColorSpaces.push_back(dynamicRange);
+
+ supportedColorSpaces.push_back(static_cast<int64_t>(
+ ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_DISPLAY_P3));
+ supportedColorSpaces.push_back(static_cast<int64_t>(PublicFormat::JPEG_R));
+ supportedColorSpaces.push_back(dynamicRange);
+ c.update(ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP,
+ supportedColorSpaces.data(), supportedColorSpaces.size());
+ }
+ }
+
+ return OK;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addDynamicDepthTags(
bool maxResolution) {
const int32_t depthExclTag = ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index ab1b389..acf511b 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -407,7 +407,11 @@
status_t notifyUsbDeviceEvent(int32_t eventId, const std::string &usbDeviceId);
+ static bool isConcurrentDynamicRangeCaptureSupported(const CameraMetadata& deviceInfo,
+ int64_t profile, int64_t concurrentProfile);
+
static const float kDepthARTolerance;
+ static const bool kFrameworkJpegRDisabled;
private:
// All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
mutable std::mutex mInterfaceMutex;
@@ -675,6 +679,7 @@
status_t fixupTorchStrengthTags();
status_t addDynamicDepthTags(bool maxResolution = false);
status_t deriveHeicTags(bool maxResolution = false);
+ status_t deriveJpegRTags(bool maxResolution = false);
status_t addRotateCropTags();
status_t addAutoframingTags();
status_t addPreCorrectionActiveArraySize();
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index 5a449b6..84fe3a5 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -501,6 +501,11 @@
ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+ res = deriveJpegRTags();
+ if (OK != res) {
+ ALOGE("%s: Unable to derive Jpeg/R tags based on camera and media capabilities: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
if (camera3::SessionConfigurationUtils::isUltraHighResolutionSensor(mCameraCharacteristics)) {
status_t status = addDynamicDepthTags(/*maxResolution*/true);
@@ -514,6 +519,12 @@
ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities for"
"maximum resolution mode: %s (%d)", __FUNCTION__, strerror(-status), status);
}
+
+ status = deriveJpegRTags(/*maxResolution*/true);
+ if (OK != status) {
+ ALOGE("%s: Unable to derive Jpeg/R tags based on camera and media capabilities for"
+ "maximum resolution mode: %s (%d)", __FUNCTION__, strerror(-status), status);
+ }
}
res = addRotateCropTags();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 5e99389..28a150c 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1001,7 +1001,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
- int timestampBase, int mirrorMode, int32_t colorSpace) {
+ int timestampBase, int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) {
ATRACE_CALL();
if (consumer == nullptr) {
@@ -1015,7 +1015,7 @@
return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
format, dataSpace, rotation, id, physicalCameraId, sensorPixelModesUsed, surfaceIds,
streamSetId, isShared, isMultiResolution, consumerUsage, dynamicRangeProfile,
- streamUseCase, timestampBase, mirrorMode, colorSpace);
+ streamUseCase, timestampBase, mirrorMode, colorSpace, useReadoutTimestamp);
}
static bool isRawFormat(int format) {
@@ -1036,7 +1036,7 @@
const String8& physicalCameraId, const std::unordered_set<int32_t> &sensorPixelModesUsed,
std::vector<int> *surfaceIds, int streamSetId, bool isShared, bool isMultiResolution,
uint64_t consumerUsage, int64_t dynamicRangeProfile, int64_t streamUseCase,
- int timestampBase, int mirrorMode, int32_t colorSpace) {
+ int timestampBase, int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
@@ -1045,10 +1045,11 @@
ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
" consumer usage %" PRIu64 ", isShared %d, physicalCameraId %s, isMultiResolution %d"
" dynamicRangeProfile 0x%" PRIx64 ", streamUseCase %" PRId64 ", timestampBase %d,"
- " mirrorMode %d colorSpace %d",
+ " mirrorMode %d, colorSpace %d, useReadoutTimestamp %d",
mId.string(), mNextStreamId, width, height, format, dataSpace, rotation,
consumerUsage, isShared, physicalCameraId.string(), isMultiResolution,
- dynamicRangeProfile, streamUseCase, timestampBase, mirrorMode, colorSpace);
+ dynamicRangeProfile, streamUseCase, timestampBase, mirrorMode, colorSpace,
+ useReadoutTimestamp);
status_t res;
bool wasActive = false;
@@ -1119,7 +1120,7 @@
width, height, blobBufferSize, format, dataSpace, rotation,
mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
- timestampBase, mirrorMode, colorSpace);
+ timestampBase, mirrorMode, colorSpace, useReadoutTimestamp);
} else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
bool maxResolution =
sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
@@ -1134,25 +1135,25 @@
width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
- timestampBase, mirrorMode, colorSpace);
+ timestampBase, mirrorMode, colorSpace, useReadoutTimestamp);
} else if (isShared) {
newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
width, height, format, consumerUsage, dataSpace, rotation,
mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
mUseHalBufManager, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
- timestampBase, mirrorMode, colorSpace);
+ timestampBase, mirrorMode, colorSpace, useReadoutTimestamp);
} else if (consumers.size() == 0 && hasDeferredConsumer) {
newStream = new Camera3OutputStream(mNextStreamId,
width, height, format, consumerUsage, dataSpace, rotation,
mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
- timestampBase, mirrorMode, colorSpace);
+ timestampBase, mirrorMode, colorSpace, useReadoutTimestamp);
} else {
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, format, dataSpace, rotation,
mTimestampOffset, physicalCameraId, sensorPixelModesUsed, transport, streamSetId,
isMultiResolution, dynamicRangeProfile, streamUseCase, mDeviceTimeBaseIsRealtime,
- timestampBase, mirrorMode, colorSpace);
+ timestampBase, mirrorMode, colorSpace, useReadoutTimestamp);
}
size_t consumerCount = consumers.size();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 9b75ac5..990f556 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -153,7 +153,8 @@
int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false)
override;
status_t createStream(const std::vector<sp<Surface>>& consumers,
@@ -170,7 +171,8 @@
int64_t streamUseCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED)
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false)
override;
status_t createInputStream(
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 1abcd86..4d6ab3d 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -56,7 +56,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
- int mirrorMode, int32_t colorSpace) :
+ int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
@@ -67,7 +67,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
- mUseReadoutTime(false),
+ mUseReadoutTime(useReadoutTimestamp),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -91,7 +91,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
- int mirrorMode, int32_t colorSpace) :
+ int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height, maxSize,
format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
setId, isMultiResolution, dynamicRangeProfile, streamUseCase,
@@ -101,7 +101,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
- mUseReadoutTime(false),
+ mUseReadoutTime(useReadoutTimestamp),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -131,7 +131,7 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
int setId, bool isMultiResolution, int64_t dynamicRangeProfile,
int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
- int mirrorMode, int32_t colorSpace) :
+ int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
Camera3IOStreamBase(id, CAMERA_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation,
physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
@@ -142,7 +142,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
- mUseReadoutTime(false),
+ mUseReadoutTime(useReadoutTimestamp),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -180,7 +180,8 @@
int setId, bool isMultiResolution,
int64_t dynamicRangeProfile, int64_t streamUseCase,
bool deviceTimeBaseIsRealtime, int timestampBase,
- int mirrorMode, int32_t colorSpace) :
+ int mirrorMode, int32_t colorSpace,
+ bool useReadoutTimestamp) :
Camera3IOStreamBase(id, type, width, height,
/*maxSize*/0,
format, dataSpace, rotation,
@@ -191,7 +192,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
- mUseReadoutTime(false),
+ mUseReadoutTime(useReadoutTimestamp),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -467,7 +468,7 @@
}
}
- nsecs_t captureTime = (mUseReadoutTime && readoutTimestamp != 0 ?
+ nsecs_t captureTime = ((mUseReadoutTime || mSyncToDisplay) && readoutTimestamp != 0 ?
readoutTimestamp : timestamp) - mTimestampOffset;
if (mPreviewFrameSpacer != nullptr) {
nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
@@ -719,16 +720,12 @@
mFrameCount = 0;
mLastTimestamp = 0;
- mUseReadoutTime =
- (timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR || mSyncToDisplay);
-
if (isDeviceTimeBaseRealtime()) {
if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
// Default time base, but not hardware composer or video encoder
mTimestampOffset = 0;
} else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
- timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR ||
- timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR) {
+ timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR) {
mTimestampOffset = 0;
}
// If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
@@ -738,7 +735,7 @@
// Reverse offset for monotonicTime -> bootTime
mTimestampOffset = -mTimestampOffset;
} else {
- // If timestampBase is DEFAULT, MONOTONIC, SENSOR, READOUT_SENSOR or
+ // If timestampBase is DEFAULT, MONOTONIC, SENSOR or
// CHOREOGRAPHER_SYNCED, timestamp offset is 0.
mTimestampOffset = 0;
}
@@ -1523,7 +1520,8 @@
vsyncTime.deadlineTimestamp >= currentTime &&
((!cameraDisplayInSync && vsyncTime.expectedPresentationTime > minPresentT) ||
(cameraDisplayInSync && vsyncTime.expectedPresentationTime >
- mLastPresentTime + minInterval + biasForShortDelay * kTimelineThresholdNs))) {
+ mLastPresentTime + minInterval +
+ static_cast<nsecs_t>(biasForShortDelay * kTimelineThresholdNs)))) {
expectedPresentT = vsyncTime.expectedPresentationTime;
minDiff = std::abs(vsyncTime.expectedPresentationTime - idealPresentT);
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 0d758bc..a2f16d4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -97,7 +97,8 @@
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false);
/**
* Set up a stream for formats that have a variable buffer size for the same
* dimensions, such as compressed JPEG.
@@ -115,7 +116,8 @@
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false);
/**
* Set up a stream with deferred consumer for formats that have 2 dimensions, such as
* RAW and YUV. The consumer must be set before using this stream for output. A valid
@@ -132,7 +134,8 @@
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false);
virtual ~Camera3OutputStream();
@@ -277,7 +280,8 @@
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false);
/**
* Note that we release the lock briefly in this function
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index da45227..f3a7359 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -35,12 +35,13 @@
const std::unordered_set<int32_t> &sensorPixelModesUsed, IPCTransport transport,
int setId, bool useHalBufManager, int64_t dynamicProfile,
int64_t streamUseCase, bool deviceTimeBaseIsRealtime, int timestampBase,
- int mirrorMode, int32_t colorSpace) :
+ int mirrorMode, int32_t colorSpace, bool useReadoutTimestamp) :
Camera3OutputStream(id, CAMERA_STREAM_OUTPUT, width, height,
format, dataSpace, rotation, physicalCameraId, sensorPixelModesUsed,
transport, consumerUsage, timestampOffset, setId,
/*isMultiResolution*/false, dynamicProfile, streamUseCase,
- deviceTimeBaseIsRealtime, timestampBase, mirrorMode, colorSpace),
+ deviceTimeBaseIsRealtime, timestampBase, mirrorMode, colorSpace,
+ useReadoutTimestamp),
mUseHalBufManager(useHalBufManager) {
size_t consumerCount = std::min(surfaces.size(), kMaxOutputs);
if (surfaces.size() > consumerCount) {
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index 5167225..1102ecb 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -46,7 +46,8 @@
bool deviceTimeBaseIsRealtime = false,
int timestampBase = OutputConfiguration::TIMESTAMP_BASE_DEFAULT,
int mirrorMode = OutputConfiguration::MIRROR_MODE_AUTO,
- int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED);
+ int32_t colorSpace = ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_UNSPECIFIED,
+ bool useReadoutTimestamp = false);
virtual ~Camera3SharedOutputStream();
diff --git a/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h b/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
index 8b9b92b..9098fe8 100644
--- a/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
+++ b/services/camera/libcameraservice/hidl/VndkVersionMetadataTags.h
@@ -77,6 +77,12 @@
{34, {
ANDROID_CONTROL_AUTOFRAMING_AVAILABLE,
ANDROID_CONTROL_AVAILABLE_SETTINGS_OVERRIDES,
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS,
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS,
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS_MAXIMUM_RESOLUTION,
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS,
+ ANDROID_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION,
ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP,
} },
};
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index f9afd41..f786b79 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -19,6 +19,8 @@
#include "SessionConfigurationUtils.h"
#include "../api2/DepthCompositeStream.h"
#include "../api2/HeicCompositeStream.h"
+#include "aidl/android/hardware/graphics/common/Dataspace.h"
+#include "api2/JpegRCompositeStream.h"
#include "common/CameraDeviceBase.h"
#include "common/HalConversionsTemplated.h"
#include "../CameraService.h"
@@ -26,6 +28,7 @@
#include "device3/hidl/HidlCamera3Device.h"
#include "device3/Camera3OutputStream.h"
#include "system/graphics-base-v1.1.h"
+#include <ui/PublicFormat.h>
using android::camera3::OutputStreamInfo;
using android::camera3::OutputStreamInfo;
@@ -209,11 +212,18 @@
}
//check if format is 10-bit compatible
-bool is10bitCompatibleFormat(int32_t format) {
+bool is10bitCompatibleFormat(int32_t format, android_dataspace_t dataSpace) {
switch(format) {
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
case HAL_PIXEL_FORMAT_YCBCR_P010:
return true;
+ case HAL_PIXEL_FORMAT_BLOB:
+ if (dataSpace == static_cast<android_dataspace_t>(
+ ::aidl::android::hardware::graphics::common::Dataspace::JPEG_R)) {
+ return true;
+ }
+
+ return false;
default:
return false;
}
@@ -316,6 +326,10 @@
return false; // RAW_DEPTH, not applicable
} else if (format == HAL_PIXEL_FORMAT_RAW10 && dataSpace == HAL_DATASPACE_DEPTH) {
return false; // RAW_DEPTH10, not applicable
+ } else if (format == HAL_PIXEL_FORMAT_BLOB && dataSpace ==
+ static_cast<android_dataspace>(
+ ::aidl::android::hardware::graphics::common::Dataspace::JPEG_R)) {
+ format64 = static_cast<int64_t>(PublicFormat::JPEG_R);
}
camera_metadata_ro_entry_t entry =
@@ -499,7 +513,7 @@
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (SessionConfigurationUtils::is10bitDynamicRangeProfile(dynamicRangeProfile) &&
- !SessionConfigurationUtils::is10bitCompatibleFormat(format)) {
+ !SessionConfigurationUtils::is10bitCompatibleFormat(format, dataSpace)) {
String8 msg = String8::format("Camera %s: No 10-bit supported stream configurations with "
"format %#x defined and profile %" PRIx64 ", failed to create output stream",
logicalCameraId.string(), format, dynamicRangeProfile);
@@ -772,7 +786,9 @@
camera3::DepthCompositeStream::isDepthCompositeStream(surface);
bool isHeicCompositeStream =
camera3::HeicCompositeStream::isHeicCompositeStream(surface);
- if (isDepthCompositeStream || isHeicCompositeStream) {
+ bool isJpegRCompositeStream =
+ camera3::JpegRCompositeStream::isJpegRCompositeStream(surface);
+ if (isDepthCompositeStream || isHeicCompositeStream || isJpegRCompositeStream) {
// We need to take in to account that composite streams can have
// additional internal camera streams.
std::vector<OutputStreamInfo> compositeStreams;
@@ -780,10 +796,14 @@
// TODO: Take care of composite streams.
ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
deviceInfo, &compositeStreams);
- } else {
+ } else if (isHeicCompositeStream) {
ret = camera3::HeicCompositeStream::getCompositeStreamInfo(streamInfo,
deviceInfo, &compositeStreams);
+ } else {
+ ret = camera3::JpegRCompositeStream::getCompositeStreamInfo(streamInfo,
+ deviceInfo, &compositeStreams);
}
+
if (ret != OK) {
String8 msg = String8::format(
"Camera %s: Failed adding composite streams: %s (%d)",
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 264045e..b5654ac 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -102,7 +102,7 @@
int32_t colorSpace);
//check if format is 10-bit output compatible
-bool is10bitCompatibleFormat(int32_t format);
+bool is10bitCompatibleFormat(int32_t format, android_dataspace_t dataSpace);
// check if the dynamic range requires 10-bit output
bool is10bitDynamicRangeProfile(int64_t dynamicRangeProfile);
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
index 250ac63..28a22e1 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHost.cpp
@@ -49,6 +49,12 @@
return ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
return ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS:
+ return ANDROID_JPEGR_AVAILABLE_JPEG_R_STREAM_CONFIGURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS:
+ return ANDROID_JPEGR_AVAILABLE_JPEG_R_MIN_FRAME_DURATIONS_MAXIMUM_RESOLUTION;
+ case ANDROID_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS:
+ return ANDROID_JPEGR_AVAILABLE_JPEG_R_STALL_DURATIONS_MAXIMUM_RESOLUTION;
case ANDROID_SENSOR_OPAQUE_RAW_SIZE:
return ANDROID_SENSOR_OPAQUE_RAW_SIZE_MAXIMUM_RESOLUTION;
case ANDROID_LENS_INTRINSIC_CALIBRATION:
@@ -97,4 +103,4 @@
} // namespace SessionConfigurationUtils
} // namespace camera3
-} // namespace android
\ No newline at end of file
+} // namespace android